From 2e28cfd3b8efdcccd08670e2d8a5483d16fe4e27 Mon Sep 17 00:00:00 2001 From: awssdkgo Date: Wed, 9 Oct 2019 18:29:57 +0000 Subject: [PATCH] Release v1.25.9 --- CHANGELOG.md | 10 + aws/version.go | 2 +- models/apis/elasticache/2015-02-02/api-2.json | 46 +++- .../apis/elasticache/2015-02-02/docs-2.json | 38 ++- models/apis/kafka/2018-11-14/docs-2.json | 4 +- .../apis/mediaconvert/2017-08-29/api-2.json | 96 ++++++-- .../apis/mediaconvert/2017-08-29/docs-2.json | 59 +++-- service/elasticache/api.go | 188 ++++++++++++-- service/kafka/api.go | 19 +- service/mediaconvert/api.go | 233 +++++++++++++++--- 10 files changed, 594 insertions(+), 101 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d1008b32c..2f0f0e6cd8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.25.9 (2019-10-09) +=== + +### Service Client Updates +* `service/elasticache`: Updates service API and documentation + * Amazon ElastiCache now allows you to apply available service updates on demand to your Memcached and Redis Cache Clusters. Features included: (1) Access to the list of applicable service updates and their priorities. (2) Service update monitoring and regular status updates. (3) Recommended apply-by-dates for scheduling the service updates. (4) Ability to stop and later re-apply updates. For more information, see https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Self-Service-Updates.html and https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Self-Service-Updates.html +* `service/kafka`: Updates service documentation +* `service/mediaconvert`: Updates service API and documentation + * AWS Elemental MediaConvert SDK has added support for Dolby Atmos encoding, up to 36 outputs, accelerated transcoding with frame capture and preferred acceleration feature. + Release v1.25.8 (2019-10-08) === diff --git a/aws/version.go b/aws/version.go index 9ac3b30086..198836fcc9 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.8" +const SDKVersion = "1.25.9" diff --git a/models/apis/elasticache/2015-02-02/api-2.json b/models/apis/elasticache/2015-02-02/api-2.json index c9515a4ddc..6299555de6 100644 --- a/models/apis/elasticache/2015-02-02/api-2.json +++ b/models/apis/elasticache/2015-02-02/api-2.json @@ -988,23 +988,19 @@ "AwsQueryErrorMessage":{"type":"string"}, "BatchApplyUpdateActionMessage":{ "type":"structure", - "required":[ - "ReplicationGroupIds", - "ServiceUpdateName" - ], + "required":["ServiceUpdateName"], "members":{ "ReplicationGroupIds":{"shape":"ReplicationGroupIdList"}, + "CacheClusterIds":{"shape":"CacheClusterIdList"}, "ServiceUpdateName":{"shape":"String"} } }, "BatchStopUpdateActionMessage":{ "type":"structure", - "required":[ - "ReplicationGroupIds", - "ServiceUpdateName" - ], + "required":["ServiceUpdateName"], "members":{ "ReplicationGroupIds":{"shape":"ReplicationGroupIdList"}, + "CacheClusterIds":{"shape":"CacheClusterIdList"}, "ServiceUpdateName":{"shape":"String"} } }, @@ -1052,6 +1048,11 @@ }, "exception":true }, + "CacheClusterIdList":{ + "type":"list", + "member":{"shape":"String"}, + "max":20 + }, "CacheClusterList":{ "type":"list", "member":{ @@ -1162,6 +1163,26 @@ "locationName":"CacheNodeTypeSpecificValue" } }, + "CacheNodeUpdateStatus":{ + "type":"structure", + "members":{ + "CacheNodeId":{"shape":"String"}, + "NodeUpdateStatus":{"shape":"NodeUpdateStatus"}, + "NodeDeletionDate":{"shape":"TStamp"}, + "NodeUpdateStartDate":{"shape":"TStamp"}, + "NodeUpdateEndDate":{"shape":"TStamp"}, + "NodeUpdateInitiatedBy":{"shape":"NodeUpdateInitiatedBy"}, + "NodeUpdateInitiatedDate":{"shape":"TStamp"}, + "NodeUpdateStatusModifiedDate":{"shape":"TStamp"} + } + }, + "CacheNodeUpdateStatusList":{ + "type":"list", + "member":{ + "shape":"CacheNodeUpdateStatus", + "locationName":"CacheNodeUpdateStatus" + } + }, "CacheParameterGroup":{ "type":"structure", "members":{ @@ -1834,6 +1855,8 @@ "members":{ "ServiceUpdateName":{"shape":"String"}, "ReplicationGroupIds":{"shape":"ReplicationGroupIdList"}, + "CacheClusterIds":{"shape":"CacheClusterIdList"}, + "Engine":{"shape":"String"}, "ServiceUpdateStatus":{"shape":"ServiceUpdateStatusList"}, "ServiceUpdateTimeRange":{"shape":"TimeRangeFilter"}, "UpdateActionStatus":{"shape":"UpdateActionStatusList"}, @@ -2450,6 +2473,7 @@ "type":"structure", "members":{ "ReplicationGroupId":{"shape":"String"}, + "CacheClusterId":{"shape":"String"}, "ServiceUpdateName":{"shape":"String"}, "UpdateActionStatus":{"shape":"UpdateActionStatus"} } @@ -3080,6 +3104,7 @@ "type":"structure", "members":{ "ReplicationGroupId":{"shape":"String"}, + "CacheClusterId":{"shape":"String"}, "ServiceUpdateName":{"shape":"String"}, "ErrorType":{"shape":"String"}, "ErrorMessage":{"shape":"String"} @@ -3096,6 +3121,7 @@ "type":"structure", "members":{ "ReplicationGroupId":{"shape":"String"}, + "CacheClusterId":{"shape":"String"}, "ServiceUpdateName":{"shape":"String"}, "ServiceUpdateReleaseDate":{"shape":"TStamp"}, "ServiceUpdateSeverity":{"shape":"ServiceUpdateSeverity"}, @@ -3108,7 +3134,9 @@ "UpdateActionStatusModifiedDate":{"shape":"TStamp"}, "SlaMet":{"shape":"SlaMet"}, "NodeGroupUpdateStatus":{"shape":"NodeGroupUpdateStatusList"}, - "EstimatedUpdateTime":{"shape":"String"} + "CacheNodeUpdateStatus":{"shape":"CacheNodeUpdateStatusList"}, + "EstimatedUpdateTime":{"shape":"String"}, + "Engine":{"shape":"String"} } }, "UpdateActionList":{ diff --git a/models/apis/elasticache/2015-02-02/docs-2.json b/models/apis/elasticache/2015-02-02/docs-2.json index 2a82d1d233..13b6080648 100644 --- a/models/apis/elasticache/2015-02-02/docs-2.json +++ b/models/apis/elasticache/2015-02-02/docs-2.json @@ -198,6 +198,14 @@ "refs": { } }, + "CacheClusterIdList": { + "base": null, + "refs": { + "BatchApplyUpdateActionMessage$CacheClusterIds": "

The cache cluster IDs

", + "BatchStopUpdateActionMessage$CacheClusterIds": "

The cache cluster IDs

", + "DescribeUpdateActionsMessage$CacheClusterIds": "

The cache cluster IDs

" + } + }, "CacheClusterList": { "base": null, "refs": { @@ -277,6 +285,18 @@ "CacheNodeTypeSpecificParameter$CacheNodeTypeSpecificValues": "

A list of cache node types and their corresponding values for this parameter.

" } }, + "CacheNodeUpdateStatus": { + "base": "

The status of the service update on the cache node

", + "refs": { + "CacheNodeUpdateStatusList$member": null + } + }, + "CacheNodeUpdateStatusList": { + "base": null, + "refs": { + "UpdateAction$CacheNodeUpdateStatus": "

The status of the service update on the cache node

" + } + }, "CacheParameterGroup": { "base": "

Represents the output of a CreateCacheParameterGroup operation.

", "refs": { @@ -1017,12 +1037,14 @@ "NodeUpdateInitiatedBy": { "base": null, "refs": { + "CacheNodeUpdateStatus$NodeUpdateInitiatedBy": "

Reflects whether the update was initiated by the customer or automatically applied

", "NodeGroupMemberUpdateStatus$NodeUpdateInitiatedBy": "

Reflects whether the update was initiated by the customer or automatically applied

" } }, "NodeUpdateStatus": { "base": null, "refs": { + "CacheNodeUpdateStatus$NodeUpdateStatus": "

The update status of the node

", "NodeGroupMemberUpdateStatus$NodeUpdateStatus": "

The update status of the node

" } }, @@ -1437,6 +1459,7 @@ "CacheCluster$CacheSubnetGroupName": "

The name of the cache subnet group associated with the cluster.

", "CacheCluster$ReplicationGroupId": "

The replication group to which this cluster belongs. If this field is empty, the cluster is not associated with any replication group.

", "CacheCluster$SnapshotWindow": "

The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your cluster.

Example: 05:00-09:00

", + "CacheClusterIdList$member": null, "CacheClusterMessage$Marker": "

Provides an identifier to allow retrieval of paginated results.

", "CacheEngineVersion$Engine": "

The name of the cache engine.

", "CacheEngineVersion$EngineVersion": "

The version number of the cache engine.

", @@ -1458,6 +1481,7 @@ "CacheNodeTypeSpecificParameter$MinimumEngineVersion": "

The earliest cache engine version to which the parameter can apply.

", "CacheNodeTypeSpecificValue$CacheNodeType": "

The cache node type for which this value applies.

", "CacheNodeTypeSpecificValue$Value": "

The value for the cache node type.

", + "CacheNodeUpdateStatus$CacheNodeId": "

The node ID of the cache cluster

", "CacheParameterGroup$CacheParameterGroupName": "

The name of the cache parameter group.

", "CacheParameterGroup$CacheParameterGroupFamily": "

The name of the cache parameter group family that this cache parameter group is compatible with.

Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 |

", "CacheParameterGroup$Description": "

The description for this cache parameter group.

", @@ -1572,6 +1596,7 @@ "DescribeSnapshotsMessage$SnapshotSource": "

If set to system, the output shows snapshots that were automatically created by ElastiCache. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.

", "DescribeSnapshotsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeUpdateActionsMessage$ServiceUpdateName": "

The unique ID of the service update

", + "DescribeUpdateActionsMessage$Engine": "

The Elasticache engine to which the update applies. Either Redis or Memcached

", "DescribeUpdateActionsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "EC2SecurityGroup$Status": "

The status of the Amazon EC2 security group.

", "EC2SecurityGroup$EC2SecurityGroupName": "

The name of the Amazon EC2 security group.

", @@ -1643,6 +1668,7 @@ "PendingModifiedValues$CacheNodeType": "

The cache node type that this cluster or replication group is scaled to.

", "PreferredAvailabilityZoneList$member": null, "ProcessedUpdateAction$ReplicationGroupId": "

The ID of the replication group

", + "ProcessedUpdateAction$CacheClusterId": "

The ID of the cache cluster

", "ProcessedUpdateAction$ServiceUpdateName": "

The unique ID of the service update

", "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodesOfferingId": "

The ID of the reserved cache node offering to purchase.

Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706

", "PurchaseReservedCacheNodesOfferingMessage$ReservedCacheNodeId": "

A customer-specified identifier to track this reservation.

The Reserved Cache Node ID is an unique customer-specified identifier to track this reservation. If this parameter is not specified, ElastiCache automatically generates an identifier for the reservation.

Example: myreservationID

", @@ -1682,8 +1708,8 @@ "SecurityGroupMembership$Status": "

The status of the cache security group membership. The status changes whenever a cache security group is modified, or when the cache security groups assigned to a cluster are modified.

", "ServiceUpdate$ServiceUpdateName": "

The unique ID of the service update

", "ServiceUpdate$ServiceUpdateDescription": "

Provides details of the service update

", - "ServiceUpdate$Engine": "

The Redis engine to which the service update applies

", - "ServiceUpdate$EngineVersion": "

The Redis engine version to which the service update applies

", + "ServiceUpdate$Engine": "

The Elasticache engine to which the update applies. Either Redis or Memcached

", + "ServiceUpdate$EngineVersion": "

The Elasticache engine version to which the update applies. Either Redis or Memcached engine version

", "ServiceUpdate$EstimatedUpdateTime": "

The estimated length of time the service update will take

", "ServiceUpdatesMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "Snapshot$SnapshotName": "

The name of a snapshot. For an automatic snapshot, the name is system-generated. For a manual snapshot, this is the user-provided name.

", @@ -1710,13 +1736,16 @@ "Tag$Value": "

The tag's value. May be null.

", "TestFailoverMessage$ReplicationGroupId": "

The name of the replication group (console: cluster) whose automatic failover is being tested by this operation.

", "UnprocessedUpdateAction$ReplicationGroupId": "

The replication group ID

", + "UnprocessedUpdateAction$CacheClusterId": "

The ID of the cache cluster

", "UnprocessedUpdateAction$ServiceUpdateName": "

The unique ID of the service update

", "UnprocessedUpdateAction$ErrorType": "

The error type for requests that are not processed

", "UnprocessedUpdateAction$ErrorMessage": "

The error message that describes the reason the request was not processed

", "UpdateAction$ReplicationGroupId": "

The ID of the replication group

", + "UpdateAction$CacheClusterId": "

The ID of the cache cluster

", "UpdateAction$ServiceUpdateName": "

The unique ID of the service update

", "UpdateAction$NodesUpdated": "

The progress of the service update on the replication group

", "UpdateAction$EstimatedUpdateTime": "

The estimated length of time for the update to complete

", + "UpdateAction$Engine": "

The Elasticache engine to which the update applies. Either Redis or Memcached

", "UpdateActionsMessage$Marker": "

An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" } }, @@ -1749,6 +1778,11 @@ "refs": { "CacheCluster$CacheClusterCreateTime": "

The date and time when the cluster was created.

", "CacheNode$CacheNodeCreateTime": "

The date and time when the cache node was created.

", + "CacheNodeUpdateStatus$NodeDeletionDate": "

The deletion date of the node

", + "CacheNodeUpdateStatus$NodeUpdateStartDate": "

The start date of the update for a node

", + "CacheNodeUpdateStatus$NodeUpdateEndDate": "

The end date of the update for a node

", + "CacheNodeUpdateStatus$NodeUpdateInitiatedDate": "

The date when the update is triggered

", + "CacheNodeUpdateStatus$NodeUpdateStatusModifiedDate": "

The date when the NodeUpdateStatus was last modified>

", "DescribeEventsMessage$StartTime": "

The beginning of the time interval to retrieve events for, specified in ISO 8601 format.

Example: 2017-03-30T07:03:49.555Z

", "DescribeEventsMessage$EndTime": "

The end of the time interval for which to retrieve events, specified in ISO 8601 format.

Example: 2017-03-30T07:03:49.555Z

", "Event$Date": "

The date and time when the event occurred.

", diff --git a/models/apis/kafka/2018-11-14/docs-2.json b/models/apis/kafka/2018-11-14/docs-2.json index 2d66fde7b8..7a54a147ec 100644 --- a/models/apis/kafka/2018-11-14/docs-2.json +++ b/models/apis/kafka/2018-11-14/docs-2.json @@ -27,9 +27,9 @@ "refs" : { } }, "BrokerAZDistribution" : { - "base" : "\n

The distribution of broker nodes across Availability Zones. By default, broker nodes are distributed among three Availability Zones. Currently, the only supported value is DEFAULT. You can either specify this value explicitly or leave it out.

\n ", + "base" : "\n

The distribution of broker nodes across Availability Zones. This is an optional parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. You can also explicitly set this parameter to the value DEFAULT. No other values are currently allowed.

\n

Amazon MSK distributes the broker nodes evenly across the Availability Zones that correspond to the subnets you provide when you create the cluster.

\n ", "refs" : { - "BrokerNodeGroupInfo$BrokerAZDistribution" : "\n

The distribution of broker nodes across Availability Zones.

\n " + "BrokerNodeGroupInfo$BrokerAZDistribution" : "\n

The distribution of broker nodes across Availability Zones. This is an optional parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. You can also explicitly set this parameter to the value DEFAULT. No other values are currently allowed.

\n

Amazon MSK distributes the broker nodes evenly across the Availability Zones that correspond to the subnets you provide when you create the cluster.

\n " } }, "BrokerEBSVolumeInfo" : { diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index cafae5534b..64792eeab4 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -1044,7 +1044,8 @@ "type": "string", "enum": [ "DISABLED", - "ENABLED" + "ENABLED", + "PREFERRED" ] }, "AccelerationSettings": { @@ -1059,6 +1060,15 @@ "Mode" ] }, + "AccelerationStatus": { + "type": "string", + "enum": [ + "NOT_APPLICABLE", + "IN_PROGRESS", + "ACCELERATED", + "NOT_ACCELERATED" + ] + }, "AfdSignaling": { "type": "string", "enum": [ @@ -1319,7 +1329,7 @@ "locationName": "defaultSelection" }, "ExternalAudioFileInput": { - "shape": "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", + "shape": "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE", "locationName": "externalAudioFileInput" }, "LanguageCode": { @@ -1380,7 +1390,7 @@ "type": "structure", "members": { "AvailBlankingImage": { - "shape": "__stringMin14PatternS3BmpBMPPngPNG", + "shape": "__stringMin14PatternHttpHttpsS3BmpBMPPngPNG", "locationName": "availBlankingImage" } } @@ -1403,7 +1413,8 @@ "enum": [ "QUEUE", "PRESET", - "JOB_TEMPLATE" + "JOB_TEMPLATE", + "JOB" ] }, "BurninDestinationSettings": { @@ -1822,6 +1833,10 @@ "shape": "__doubleMin0Max2147483647", "locationName": "minFinalSegmentLength" }, + "MpdProfile": { + "shape": "CmafMpdProfile", + "locationName": "mpdProfile" + }, "SegmentControl": { "shape": "CmafSegmentControl", "locationName": "segmentControl" @@ -1872,6 +1887,13 @@ "INTEGER" ] }, + "CmafMpdProfile": { + "type": "string", + "enum": [ + "MAIN_PROFILE", + "ON_DEMAND_PROFILE" + ] + }, "CmafSegmentControl": { "type": "string", "enum": [ @@ -2070,6 +2092,10 @@ "shape": "StatusUpdateInterval", "locationName": "statusUpdateInterval" }, + "Tags": { + "shape": "__mapOf__string", + "locationName": "tags" + }, "UserMetadata": { "shape": "__mapOf__string", "locationName": "userMetadata" @@ -2266,6 +2292,10 @@ "shape": "__integerMin0Max2147483647", "locationName": "minBufferTime" }, + "MpdProfile": { + "shape": "DashIsoMpdProfile", + "locationName": "mpdProfile" + }, "SegmentControl": { "shape": "DashIsoSegmentControl", "locationName": "segmentControl" @@ -2287,6 +2317,13 @@ "NONE" ] }, + "DashIsoMpdProfile": { + "type": "string", + "enum": [ + "MAIN_PROFILE", + "ON_DEMAND_PROFILE" + ] + }, "DashIsoPlaybackDeviceCompatibility": { "type": "string", "enum": [ @@ -3144,7 +3181,7 @@ "locationName": "convert608To708" }, "SourceFile": { - "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", + "shape": "__stringMin14PatternHttpHttpsS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI", "locationName": "sourceFile" }, "TimeDelta": { @@ -4683,7 +4720,7 @@ "locationName": "height" }, "ImageInserterInput": { - "shape": "__stringMin14PatternS3BmpBMPPngPNGTgaTGA", + "shape": "__stringMin14PatternHttpHttpsS3BmpBMPPngPNGTgaTGA", "locationName": "imageInserterInput" }, "ImageX": { @@ -4732,6 +4769,10 @@ "shape": "AccelerationSettings", "locationName": "accelerationSettings" }, + "AccelerationStatus": { + "shape": "AccelerationStatus", + "locationName": "accelerationStatus" + }, "Arn": { "shape": "__string", "locationName": "arn" @@ -4768,6 +4809,10 @@ "shape": "__string", "locationName": "jobTemplate" }, + "Messages": { + "shape": "JobMessages", + "locationName": "messages" + }, "OutputGroupDetails": { "shape": "__listOfOutputGroupDetail", "locationName": "outputGroupDetails" @@ -4818,6 +4863,19 @@ "Settings" ] }, + "JobMessages": { + "type": "structure", + "members": { + "Info": { + "shape": "__listOf__string", + "locationName": "info" + }, + "Warning": { + "shape": "__listOf__string", + "locationName": "warning" + } + } + }, "JobPhase": { "type": "string", "enum": [ @@ -5712,7 +5770,7 @@ "locationName": "framerate" }, "Input": { - "shape": "__stringMin14Max1285PatternS3Mov09Png", + "shape": "__stringMin14Max1285PatternHttpHttpsS3Mov09Png", "locationName": "input" }, "InsertionMode": { @@ -8132,26 +8190,26 @@ "max": 11, "pattern": "^((([0-1]\\d)|(2[0-3]))(:[0-5]\\d){2}([:;][0-5]\\d))$" }, - "__stringMin14Max1285PatternS3Mov09Png": { + "__stringMin14Max1285PatternHttpHttpsS3Mov09Png": { "type": "string", "min": 14, "max": 1285, - "pattern": "^(s3:\\/\\/)(.*)(\\.mov|[0-9]+\\.png)$" + "pattern": "^(http|https|s3)://(.*)(\\.mov|[0-9]+\\.png)$" }, - "__stringMin14PatternS3BmpBMPPngPNG": { + "__stringMin14PatternHttpHttpsS3BmpBMPPngPNG": { "type": "string", "min": 14, - "pattern": "^(s3:\\/\\/)(.*?)\\.(bmp|BMP|png|PNG)$" + "pattern": "^(http|https|s3)://(.*?)\\.(bmp|BMP|png|PNG)$" }, - "__stringMin14PatternS3BmpBMPPngPNGTgaTGA": { + "__stringMin14PatternHttpHttpsS3BmpBMPPngPNGTgaTGA": { "type": "string", "min": 14, - "pattern": "^(s3:\\/\\/)(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)$" + "pattern": "^(http|https|s3)://(.*?)\\.(bmp|BMP|png|PNG|tga|TGA)$" }, - "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { + "__stringMin14PatternHttpHttpsS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { "type": "string", "min": 14, - "pattern": "^(s3:\\/\\/)(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI)$" + "pattern": "^(http|https|s3)://(.*?)\\.(scc|SCC|ttml|TTML|dfxp|DFXP|stl|STL|srt|SRT|xml|XML|smi|SMI)$" }, "__stringMin16Max24PatternAZaZ0922AZaZ0916": { "type": "string", @@ -8240,6 +8298,10 @@ "type": "string", "pattern": "^(\\d+(\\/\\d+)*)$" }, + "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "type": "string", + "pattern": "^(http|https|s3)://([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" + }, "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { "type": "string", "pattern": "^(http|https|s3)://([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))$" @@ -8260,10 +8322,6 @@ "type": "string", "pattern": "^s3:\\/\\/.*\\/(ASSETMAP.xml)?$" }, - "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { - "type": "string", - "pattern": "^(s3:\\/\\/)([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" - }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "type": "string", "pattern": "^\\s*<(.|\\n)*ManifestConfirmConditionNotification(.|\\n)*>\\s*$" diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index beb311f1ce..c9b3741fde 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -114,9 +114,9 @@ } }, "AccelerationMode": { - "base": "Enable Acceleration (AccelerationMode) on any job that you want processed with accelerated transcoding.", + "base": "Specify whether the service runs your job with accelerated transcoding. Choose DISABLED if you don't want accelerated transcoding. Choose ENABLED if you want your job to run with accelerated transcoding and to fail if your input files or your job settings aren't compatible with accelerated transcoding. Choose PREFERRED if you want your job to run with accelerated transcoding if the job is compatible with the feature and to run at standard speed if it's not.", "refs": { - "AccelerationSettings$Mode": "Acceleration configuration for the job." + "AccelerationSettings$Mode": "Specify the conditions when the service will run your job with accelerated transcoding." } }, "AccelerationSettings": { @@ -129,6 +129,12 @@ "UpdateJobTemplateRequest$AccelerationSettings": "Accelerated transcoding can significantly speed up jobs with long, visually complex content. Outputs that use this feature incur pro-tier pricing. For information about feature limitations, see the AWS Elemental MediaConvert User Guide." } }, + "AccelerationStatus": { + "base": "Describes whether the current job is running with accelerated transcoding. For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. AccelerationStatus is IN_PROGRESS initially, while the service determines whether the input files and job settings are compatible with accelerated transcoding. If they are, AcclerationStatus is ACCELERATED. If your input files and job settings aren't compatible with accelerated transcoding, the service either fails your job or runs it without accelerated transcoding, depending on how you set Acceleration (AccelerationMode). When the service runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED.", + "refs": { + "Job$AccelerationStatus": "Describes whether the current job is running with accelerated transcoding. For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. AccelerationStatus is IN_PROGRESS initially, while the service determines whether the input files and job settings are compatible with accelerated transcoding. If they are, AcclerationStatus is ACCELERATED. If your input files and job settings aren't compatible with accelerated transcoding, the service either fails your job or runs it without accelerated transcoding, depending on how you set Acceleration (AccelerationMode). When the service runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED." + } + }, "AfdSignaling": { "base": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "refs": { @@ -433,6 +439,12 @@ "CmafGroupSettings$ManifestDurationFormat": "Indicates whether the output manifest should use floating point values for segment duration." } }, + "CmafMpdProfile": { + "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "refs": { + "CmafGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + } + }, "CmafSegmentControl": { "base": "When set to SINGLE_FILE, a single output file is generated, which is internally segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, separate segment files will be created.", "refs": { @@ -570,6 +582,12 @@ "DashIsoGroupSettings$HbbtvCompliance": "Supports HbbTV specification as indicated" } }, + "DashIsoMpdProfile": { + "base": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "refs": { + "DashIsoGroupSettings$MpdProfile": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + } + }, "DashIsoPlaybackDeviceCompatibility": { "base": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", "refs": { @@ -585,7 +603,7 @@ "DashIsoWriteSegmentTimelineInRepresentation": { "base": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "refs": { - "DashIsoGroupSettings$WriteSegmentTimelineInRepresentation": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." + "DashIsoGroupSettings$WriteSegmentTimelineInRepresentation": "If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest." } }, "DecryptionMode": { @@ -1343,9 +1361,9 @@ } }, "H265WriteMp4PackagingType": { - "base": "Use this setting only for outputs encoded with H.265 that are in CMAF or DASH output groups. If you include writeMp4PackagingType in your JSON job specification for other outputs, your video might not work properly with downstream systems and video players. If the location of parameter set NAL units don't matter in your workflow, ignore this setting. The service defaults to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. This makes your output compliant with this specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples.", + "base": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. For file MP4 outputs, choosing HVC1 can create video that doesn't work properly with some downstream systems and video players. Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples.", "refs": { - "H265Settings$WriteMp4PackagingType": "Use this setting only for outputs encoded with H.265 that are in CMAF or DASH output groups. If you include writeMp4PackagingType in your JSON job specification for other outputs, your video might not work properly with downstream systems and video players. If the location of parameter set NAL units don't matter in your workflow, ignore this setting. The service defaults to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. This makes your output compliant with this specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples." + "H265Settings$WriteMp4PackagingType": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. For file MP4 outputs, choosing HVC1 can create video that doesn't work properly with some downstream systems and video players. Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples." } }, "Hdr10Metadata": { @@ -1603,6 +1621,12 @@ "__listOfJob$member": null } }, + "JobMessages": { + "base": "Provides messages from the service about jobs that you have already successfully submitted.", + "refs": { + "Job$Messages": "Provides messages from the service about jobs that you have already successfully submitted." + } + }, "JobPhase": { "base": "A job's phase can be PROBING, TRANSCODING OR UPLOADING", "refs": { @@ -3373,6 +3397,8 @@ "__listOf__string": { "base": null, "refs": { + "JobMessages$Info": "List of messages that are informational only and don't indicate a problem with your job.", + "JobMessages$Warning": "List of messages that warn about conditions that might cause your job not to run or to fail.", "UntagResourceRequest$TagKeys": "The keys of the tags that you want to remove from the resource." } }, @@ -3425,6 +3451,7 @@ "__mapOf__string": { "base": null, "refs": { + "CreateJobRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", "CreateJobRequest$UserMetadata": "User-defined metadata that you want to associate with an MediaConvert job. You specify metadata in key/value pairs.", "CreateJobTemplateRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", "CreatePresetRequest$Tags": "The tags that you want to add to the resource. You can tag resources with a key-value pair or with only a key.", @@ -3549,25 +3576,25 @@ "MotionImageInserter$StartTime": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find job-wide and input timecode configuration settings in your JSON job settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource." } }, - "__stringMin14Max1285PatternS3Mov09Png": { + "__stringMin14Max1285PatternHttpHttpsS3Mov09Png": { "base": null, "refs": { "MotionImageInserter$Input": "Specify the .mov file or series of .png files that you want to overlay on your video. For .png files, provide the file name of the first file in the series. Make sure that the names of the .png files end with sequential numbers that specify the order that they are played in. For example, overlay_000.png, overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, and each image file name must have the same number of digits. Pad your initial file names with enough zeros to complete the sequence. For example, if the first image is overlay_0.png, there can be only 10 images in the sequence, with the last image being overlay_9.png. But if the first image is overlay_00.png, there can be 100 images in the sequence." } }, - "__stringMin14PatternS3BmpBMPPngPNG": { + "__stringMin14PatternHttpHttpsS3BmpBMPPngPNG": { "base": null, "refs": { "AvailBlanking$AvailBlankingImage": "Blanking image to be used. Leave empty for solid black. Only bmp and png images are supported." } }, - "__stringMin14PatternS3BmpBMPPngPNGTgaTGA": { + "__stringMin14PatternHttpHttpsS3BmpBMPPngPNGTgaTGA": { "base": null, "refs": { - "InsertableImage$ImageInserterInput": "Specify the Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file." + "InsertableImage$ImageInserterInput": "Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file." } }, - "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { + "__stringMin14PatternHttpHttpsS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMI": { "base": null, "refs": { "FileSourceSettings$SourceFile": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'." @@ -3695,6 +3722,12 @@ "StaticKeyProvider$KeyFormatVersions": "Relates to DRM implementation. Either a single positive integer version value or a slash delimited list of version values (1/2/3)." } }, + "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { + "base": null, + "refs": { + "AudioSelector$ExternalAudioFileInput": "Specifies audio data from an external file source." + } + }, "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { "base": null, "refs": { @@ -3730,12 +3763,6 @@ "__listOf__stringPatternS3ASSETMAPXml$member": null } }, - "__stringPatternS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { - "base": null, - "refs": { - "AudioSelector$ExternalAudioFileInput": "Specifies audio data from an external file source." - } - }, "__stringPatternSNManifestConfirmConditionNotificationNS": { "base": null, "refs": { diff --git a/service/elasticache/api.go b/service/elasticache/api.go index f3d4246019..6df29f9186 100644 --- a/service/elasticache/api.go +++ b/service/elasticache/api.go @@ -5586,10 +5586,11 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { type BatchApplyUpdateActionInput struct { _ struct{} `type:"structure"` + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + // The replication group IDs - // - // ReplicationGroupIds is a required field - ReplicationGroupIds []*string `type:"list" required:"true"` + ReplicationGroupIds []*string `type:"list"` // The unique ID of the service update // @@ -5610,9 +5611,6 @@ func (s BatchApplyUpdateActionInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *BatchApplyUpdateActionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "BatchApplyUpdateActionInput"} - if s.ReplicationGroupIds == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupIds")) - } if s.ServiceUpdateName == nil { invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) } @@ -5623,6 +5621,12 @@ func (s *BatchApplyUpdateActionInput) Validate() error { return nil } +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchApplyUpdateActionInput) SetCacheClusterIds(v []*string) *BatchApplyUpdateActionInput { + s.CacheClusterIds = v + return s +} + // SetReplicationGroupIds sets the ReplicationGroupIds field's value. func (s *BatchApplyUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchApplyUpdateActionInput { s.ReplicationGroupIds = v @@ -5670,10 +5674,11 @@ func (s *BatchApplyUpdateActionOutput) SetUnprocessedUpdateActions(v []*Unproces type BatchStopUpdateActionInput struct { _ struct{} `type:"structure"` + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + // The replication group IDs - // - // ReplicationGroupIds is a required field - ReplicationGroupIds []*string `type:"list" required:"true"` + ReplicationGroupIds []*string `type:"list"` // The unique ID of the service update // @@ -5694,9 +5699,6 @@ func (s BatchStopUpdateActionInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *BatchStopUpdateActionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "BatchStopUpdateActionInput"} - if s.ReplicationGroupIds == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupIds")) - } if s.ServiceUpdateName == nil { invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) } @@ -5707,6 +5709,12 @@ func (s *BatchStopUpdateActionInput) Validate() error { return nil } +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchStopUpdateActionInput) SetCacheClusterIds(v []*string) *BatchStopUpdateActionInput { + s.CacheClusterIds = v + return s +} + // SetReplicationGroupIds sets the ReplicationGroupIds field's value. func (s *BatchStopUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchStopUpdateActionInput { s.ReplicationGroupIds = v @@ -6407,6 +6415,94 @@ func (s *CacheNodeTypeSpecificValue) SetValue(v string) *CacheNodeTypeSpecificVa return s } +// The status of the service update on the cache node +type CacheNodeUpdateStatus struct { + _ struct{} `type:"structure"` + + // The node ID of the cache cluster + CacheNodeId *string `type:"string"` + + // The deletion date of the node + NodeDeletionDate *time.Time `type:"timestamp"` + + // The end date of the update for a node + NodeUpdateEndDate *time.Time `type:"timestamp"` + + // Reflects whether the update was initiated by the customer or automatically + // applied + NodeUpdateInitiatedBy *string `type:"string" enum:"NodeUpdateInitiatedBy"` + + // The date when the update is triggered + NodeUpdateInitiatedDate *time.Time `type:"timestamp"` + + // The start date of the update for a node + NodeUpdateStartDate *time.Time `type:"timestamp"` + + // The update status of the node + NodeUpdateStatus *string `type:"string" enum:"NodeUpdateStatus"` + + // The date when the NodeUpdateStatus was last modified> + NodeUpdateStatusModifiedDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CacheNodeUpdateStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeUpdateStatus) GoString() string { + return s.String() +} + +// SetCacheNodeId sets the CacheNodeId field's value. +func (s *CacheNodeUpdateStatus) SetCacheNodeId(v string) *CacheNodeUpdateStatus { + s.CacheNodeId = &v + return s +} + +// SetNodeDeletionDate sets the NodeDeletionDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeDeletionDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeDeletionDate = &v + return s +} + +// SetNodeUpdateEndDate sets the NodeUpdateEndDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateEndDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateEndDate = &v + return s +} + +// SetNodeUpdateInitiatedBy sets the NodeUpdateInitiatedBy field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedBy(v string) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedBy = &v + return s +} + +// SetNodeUpdateInitiatedDate sets the NodeUpdateInitiatedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedDate = &v + return s +} + +// SetNodeUpdateStartDate sets the NodeUpdateStartDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStartDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStartDate = &v + return s +} + +// SetNodeUpdateStatus sets the NodeUpdateStatus field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatus(v string) *CacheNodeUpdateStatus { + s.NodeUpdateStatus = &v + return s +} + +// SetNodeUpdateStatusModifiedDate sets the NodeUpdateStatusModifiedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatusModifiedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStatusModifiedDate = &v + return s +} + // Represents the output of a CreateCacheParameterGroup operation. type CacheParameterGroup struct { _ struct{} `type:"structure"` @@ -10213,6 +10309,12 @@ func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshots type DescribeUpdateActionsInput struct { _ struct{} `type:"structure"` + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. @@ -10251,6 +10353,18 @@ func (s DescribeUpdateActionsInput) GoString() string { return s.String() } +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *DescribeUpdateActionsInput) SetCacheClusterIds(v []*string) *DescribeUpdateActionsInput { + s.CacheClusterIds = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DescribeUpdateActionsInput) SetEngine(v string) *DescribeUpdateActionsInput { + s.Engine = &v + return s +} + // SetMarker sets the Marker field's value. func (s *DescribeUpdateActionsInput) SetMarker(v string) *DescribeUpdateActionsInput { s.Marker = &v @@ -12385,6 +12499,9 @@ func (s *PendingModifiedValues) SetNumCacheNodes(v int64) *PendingModifiedValues type ProcessedUpdateAction struct { _ struct{} `type:"structure"` + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + // The ID of the replication group ReplicationGroupId *string `type:"string"` @@ -12405,6 +12522,12 @@ func (s ProcessedUpdateAction) GoString() string { return s.String() } +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *ProcessedUpdateAction) SetCacheClusterId(v string) *ProcessedUpdateAction { + s.CacheClusterId = &v + return s +} + // SetReplicationGroupId sets the ReplicationGroupId field's value. func (s *ProcessedUpdateAction) SetReplicationGroupId(v string) *ProcessedUpdateAction { s.ReplicationGroupId = &v @@ -13526,10 +13649,11 @@ type ServiceUpdate struct { // recommended apply-by date has expired. AutoUpdateAfterRecommendedApplyByDate *bool `type:"boolean"` - // The Redis engine to which the service update applies + // The Elasticache engine to which the update applies. Either Redis or Memcached Engine *string `type:"string"` - // The Redis engine version to which the service update applies + // The Elasticache engine version to which the update applies. Either Redis + // or Memcached engine version EngineVersion *string `type:"string"` // The estimated length of time the service update will take @@ -14209,6 +14333,9 @@ func (s *TimeRangeFilter) SetStartTime(v time.Time) *TimeRangeFilter { type UnprocessedUpdateAction struct { _ struct{} `type:"structure"` + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + // The error message that describes the reason the request was not processed ErrorMessage *string `type:"string"` @@ -14232,6 +14359,12 @@ func (s UnprocessedUpdateAction) GoString() string { return s.String() } +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *UnprocessedUpdateAction) SetCacheClusterId(v string) *UnprocessedUpdateAction { + s.CacheClusterId = &v + return s +} + // SetErrorMessage sets the ErrorMessage field's value. func (s *UnprocessedUpdateAction) SetErrorMessage(v string) *UnprocessedUpdateAction { s.ErrorMessage = &v @@ -14260,6 +14393,15 @@ func (s *UnprocessedUpdateAction) SetServiceUpdateName(v string) *UnprocessedUpd type UpdateAction struct { _ struct{} `type:"structure"` + // The ID of the cache cluster + CacheClusterId *string `type:"string"` + + // The status of the service update on the cache node + CacheNodeUpdateStatus []*CacheNodeUpdateStatus `locationNameList:"CacheNodeUpdateStatus" type:"list"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + // The estimated length of time for the update to complete EstimatedUpdateTime *string `type:"string"` @@ -14318,6 +14460,24 @@ func (s UpdateAction) GoString() string { return s.String() } +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *UpdateAction) SetCacheClusterId(v string) *UpdateAction { + s.CacheClusterId = &v + return s +} + +// SetCacheNodeUpdateStatus sets the CacheNodeUpdateStatus field's value. +func (s *UpdateAction) SetCacheNodeUpdateStatus(v []*CacheNodeUpdateStatus) *UpdateAction { + s.CacheNodeUpdateStatus = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *UpdateAction) SetEngine(v string) *UpdateAction { + s.Engine = &v + return s +} + // SetEstimatedUpdateTime sets the EstimatedUpdateTime field's value. func (s *UpdateAction) SetEstimatedUpdateTime(v string) *UpdateAction { s.EstimatedUpdateTime = &v diff --git a/service/kafka/api.go b/service/kafka/api.go index bef73a71c7..ce7262a6ac 100644 --- a/service/kafka/api.go +++ b/service/kafka/api.go @@ -1990,7 +1990,13 @@ func (s *BrokerEBSVolumeInfo) SetVolumeSizeGB(v int64) *BrokerEBSVolumeInfo { type BrokerNodeGroupInfo struct { _ struct{} `type:"structure"` - // The distribution of broker nodes across Availability Zones. + // The distribution of broker nodes across Availability Zones. This is an optional + // parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. + // You can also explicitly set this parameter to the value DEFAULT. No other + // values are currently allowed. + // + // Amazon MSK distributes the broker nodes evenly across the Availability Zones + // that correspond to the subnets you provide when you create the cluster. BrokerAZDistribution *string `locationName:"brokerAZDistribution" type:"string" enum:"BrokerAZDistribution"` // The list of subnets to connect to in the client virtual private cloud (VPC). @@ -4771,10 +4777,13 @@ func (s *ZookeeperNodeInfo) SetZookeeperVersion(v string) *ZookeeperNodeInfo { return s } -// The distribution of broker nodes across Availability Zones. By default, broker -// nodes are distributed among three Availability Zones. Currently, the only -// supported value is DEFAULT. You can either specify this value explicitly -// or leave it out. +// The distribution of broker nodes across Availability Zones. This is an optional +// parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. +// You can also explicitly set this parameter to the value DEFAULT. No other +// values are currently allowed. +// +// Amazon MSK distributes the broker nodes evenly across the Availability Zones +// that correspond to the subnets you provide when you create the cluster. const ( // BrokerAZDistributionDefault is a BrokerAZDistribution enum value BrokerAZDistributionDefault = "DEFAULT" diff --git a/service/mediaconvert/api.go b/service/mediaconvert/api.go index d35e2e70b0..ca6b366b24 100644 --- a/service/mediaconvert/api.go +++ b/service/mediaconvert/api.go @@ -2781,7 +2781,8 @@ func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings { type AccelerationSettings struct { _ struct{} `type:"structure"` - // Acceleration configuration for the job. + // Specify the conditions when the service will run your job with accelerated + // transcoding. // // Mode is a required field Mode *string `locationName:"mode" type:"string" required:"true" enum:"AccelerationMode"` @@ -4608,6 +4609,14 @@ type CmafGroupSettings struct { // to 1, your final segment is 3.5 seconds. MinFinalSegmentLength *float64 `locationName:"minFinalSegmentLength" type:"double"` + // Specify whether your DASH profile is on-demand or main. When you choose Main + // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 + // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), + // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. + // When you choose On-demand, you must also set the output group setting Segment + // control (SegmentControl) to Single file (SINGLE_FILE). + MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"CmafMpdProfile"` + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -4731,6 +4740,12 @@ func (s *CmafGroupSettings) SetMinFinalSegmentLength(v float64) *CmafGroupSettin return s } +// SetMpdProfile sets the MpdProfile field's value. +func (s *CmafGroupSettings) SetMpdProfile(v string) *CmafGroupSettings { + s.MpdProfile = &v + return s +} + // SetSegmentControl sets the SegmentControl field's value. func (s *CmafGroupSettings) SetSegmentControl(v string) *CmafGroupSettings { s.SegmentControl = &v @@ -5027,6 +5042,10 @@ type CreateJobInput struct { // your job to the time it completes the transcode or encounters an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` + // The tags that you want to add to the resource. You can tag resources with + // a key-value pair or with only a key. + Tags map[string]*string `locationName:"tags" type:"map"` + // User-defined metadata that you want to associate with an MediaConvert job. // You specify metadata in key/value pairs. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` @@ -5131,6 +5150,12 @@ func (s *CreateJobInput) SetStatusUpdateInterval(v string) *CreateJobInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateJobInput) SetTags(v map[string]*string) *CreateJobInput { + s.Tags = v + return s +} + // SetUserMetadata sets the UserMetadata field's value. func (s *CreateJobInput) SetUserMetadata(v map[string]*string) *CreateJobInput { s.UserMetadata = v @@ -5651,6 +5676,14 @@ type DashIsoGroupSettings struct { // playout. MinBufferTime *int64 `locationName:"minBufferTime" type:"integer"` + // Specify whether your DASH profile is on-demand or main. When you choose Main + // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 + // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), + // the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. + // When you choose On-demand, you must also set the output group setting Segment + // control (SegmentControl) to Single file (SINGLE_FILE). + MpdProfile *string `locationName:"mpdProfile" type:"string" enum:"DashIsoMpdProfile"` + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -5663,12 +5696,13 @@ type DashIsoGroupSettings struct { // files as in other output types. SegmentLength *int64 `locationName:"segmentLength" min:"1" type:"integer"` - // When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), - // your DASH manifest shows precise segment durations. The segment duration - // information appears inside the SegmentTimeline element, inside SegmentTemplate - // at the Representation level. When this feature isn't enabled, the segment - // durations in your DASH manifest are approximate. The segment duration information - // appears in the duration attribute of the SegmentTemplate element. + // If you get an HTTP error in the 400 range when you play back your DASH output, + // enable this setting and run your transcoding job again. When you enable this + // setting, the service writes precise segment durations in the DASH manifest. + // The segment duration information appears inside the SegmentTimeline element, + // inside SegmentTemplate at the Representation level. When you don't enable + // this setting, the service writes approximate segment durations in your DASH + // manifest. WriteSegmentTimelineInRepresentation *string `locationName:"writeSegmentTimelineInRepresentation" type:"string" enum:"DashIsoWriteSegmentTimelineInRepresentation"` } @@ -5740,6 +5774,12 @@ func (s *DashIsoGroupSettings) SetMinBufferTime(v int64) *DashIsoGroupSettings { return s } +// SetMpdProfile sets the MpdProfile field's value. +func (s *DashIsoGroupSettings) SetMpdProfile(v string) *DashIsoGroupSettings { + s.MpdProfile = &v + return s +} + // SetSegmentControl sets the SegmentControl field's value. func (s *DashIsoGroupSettings) SetSegmentControl(v string) *DashIsoGroupSettings { s.SegmentControl = &v @@ -8623,17 +8663,16 @@ type H265Settings struct { // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. UnregisteredSeiTimecode *string `locationName:"unregisteredSeiTimecode" type:"string" enum:"H265UnregisteredSeiTimecode"` - // Use this setting only for outputs encoded with H.265 that are in CMAF or - // DASH output groups. If you include writeMp4PackagingType in your JSON job - // specification for other outputs, your video might not work properly with - // downstream systems and video players. If the location of parameter set NAL - // units don't matter in your workflow, ignore this setting. The service defaults - // to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. - // This makes your output compliant with this specification: ISO IECJTC1 SC29 - // N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service - // stores parameter set NAL units in the sample headers but not in the samples - // directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, - // the service writes parameter set NAL units directly into the samples. + // If the location of parameter set NAL units doesn't matter in your workflow, + // ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. + // For file MP4 outputs, choosing HVC1 can create video that doesn't work properly + // with some downstream systems and video players. Choose HVC1 to mark your + // output as HVC1. This makes your output compliant with the following specification: + // ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these + // outputs, the service stores parameter set NAL units in the sample headers + // but not in the samples directly. The service defaults to marking your output + // as HEV1. For these outputs, the service writes parameter set NAL units directly + // into the samples. WriteMp4PackagingType *string `locationName:"writeMp4PackagingType" type:"string" enum:"H265WriteMp4PackagingType"` } @@ -10486,8 +10525,8 @@ type InsertableImage struct { // blank. Height *int64 `locationName:"height" type:"integer"` - // Specify the Amazon S3 location of the image that you want to overlay on the - // video. Use a PNG or TGA file. + // Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want + // to overlay on the video. Use a PNG or TGA file. ImageInserterInput *string `locationName:"imageInserterInput" min:"14" type:"string"` // Specify the distance, in pixels, between the inserted image and the left @@ -10617,6 +10656,19 @@ type Job struct { // complex content. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` + // Describes whether the current job is running with accelerated transcoding. + // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus + // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) + // set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. + // AccelerationStatus is IN_PROGRESS initially, while the service determines + // whether the input files and job settings are compatible with accelerated + // transcoding. If they are, AcclerationStatus is ACCELERATED. If your input + // files and job settings aren't compatible with accelerated transcoding, the + // service either fails your job or runs it without accelerated transcoding, + // depending on how you set Acceleration (AccelerationMode). When the service + // runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. + AccelerationStatus *string `locationName:"accelerationStatus" type:"string" enum:"AccelerationStatus"` + // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` @@ -10657,6 +10709,10 @@ type Job struct { // template. JobTemplate *string `locationName:"jobTemplate" type:"string"` + // Provides messages from the service about jobs that you have already successfully + // submitted. + Messages *JobMessages `locationName:"messages" type:"structure"` + // List of output group details OutputGroupDetails []*OutputGroupDetail `locationName:"outputGroupDetails" type:"list"` @@ -10723,6 +10779,12 @@ func (s *Job) SetAccelerationSettings(v *AccelerationSettings) *Job { return s } +// SetAccelerationStatus sets the AccelerationStatus field's value. +func (s *Job) SetAccelerationStatus(v string) *Job { + s.AccelerationStatus = &v + return s +} + // SetArn sets the Arn field's value. func (s *Job) SetArn(v string) *Job { s.Arn = &v @@ -10777,6 +10839,12 @@ func (s *Job) SetJobTemplate(v string) *Job { return s } +// SetMessages sets the Messages field's value. +func (s *Job) SetMessages(v *JobMessages) *Job { + s.Messages = v + return s +} + // SetOutputGroupDetails sets the OutputGroupDetails field's value. func (s *Job) SetOutputGroupDetails(v []*OutputGroupDetail) *Job { s.OutputGroupDetails = v @@ -10843,6 +10911,42 @@ func (s *Job) SetUserMetadata(v map[string]*string) *Job { return s } +// Provides messages from the service about jobs that you have already successfully +// submitted. +type JobMessages struct { + _ struct{} `type:"structure"` + + // List of messages that are informational only and don't indicate a problem + // with your job. + Info []*string `locationName:"info" type:"list"` + + // List of messages that warn about conditions that might cause your job not + // to run or to fail. + Warning []*string `locationName:"warning" type:"list"` +} + +// String returns the string representation +func (s JobMessages) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JobMessages) GoString() string { + return s.String() +} + +// SetInfo sets the Info field's value. +func (s *JobMessages) SetInfo(v []*string) *JobMessages { + s.Info = v + return s +} + +// SetWarning sets the Warning field's value. +func (s *JobMessages) SetWarning(v []*string) *JobMessages { + s.Warning = v + return s +} + // JobSettings contains all the transcode settings for a job. type JobSettings struct { _ struct{} `type:"structure"` @@ -16980,14 +17084,47 @@ const ( Ac3MetadataControlUseConfigured = "USE_CONFIGURED" ) -// Enable Acceleration (AccelerationMode) on any job that you want processed -// with accelerated transcoding. +// Specify whether the service runs your job with accelerated transcoding. Choose +// DISABLED if you don't want accelerated transcoding. Choose ENABLED if you +// want your job to run with accelerated transcoding and to fail if your input +// files or your job settings aren't compatible with accelerated transcoding. +// Choose PREFERRED if you want your job to run with accelerated transcoding +// if the job is compatible with the feature and to run at standard speed if +// it's not. const ( // AccelerationModeDisabled is a AccelerationMode enum value AccelerationModeDisabled = "DISABLED" // AccelerationModeEnabled is a AccelerationMode enum value AccelerationModeEnabled = "ENABLED" + + // AccelerationModePreferred is a AccelerationMode enum value + AccelerationModePreferred = "PREFERRED" +) + +// Describes whether the current job is running with accelerated transcoding. +// For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus +// is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) +// set to ENABLED or PREFERRED, AccelerationStatus is one of the other states. +// AccelerationStatus is IN_PROGRESS initially, while the service determines +// whether the input files and job settings are compatible with accelerated +// transcoding. If they are, AcclerationStatus is ACCELERATED. If your input +// files and job settings aren't compatible with accelerated transcoding, the +// service either fails your job or runs it without accelerated transcoding, +// depending on how you set Acceleration (AccelerationMode). When the service +// runs your job without accelerated transcoding, AccelerationStatus is NOT_ACCELERATED. +const ( + // AccelerationStatusNotApplicable is a AccelerationStatus enum value + AccelerationStatusNotApplicable = "NOT_APPLICABLE" + + // AccelerationStatusInProgress is a AccelerationStatus enum value + AccelerationStatusInProgress = "IN_PROGRESS" + + // AccelerationStatusAccelerated is a AccelerationStatus enum value + AccelerationStatusAccelerated = "ACCELERATED" + + // AccelerationStatusNotAccelerated is a AccelerationStatus enum value + AccelerationStatusNotAccelerated = "NOT_ACCELERATED" ) // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert @@ -17184,6 +17321,9 @@ const ( // BillingTagsSourceJobTemplate is a BillingTagsSource enum value BillingTagsSourceJobTemplate = "JOB_TEMPLATE" + + // BillingTagsSourceJob is a BillingTagsSource enum value + BillingTagsSourceJob = "JOB" ) // If no explicit x_position or y_position is provided, setting alignment to @@ -17442,6 +17582,20 @@ const ( CmafManifestDurationFormatInteger = "INTEGER" ) +// Specify whether your DASH profile is on-demand or main. When you choose Main +// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 +// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), +// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. +// When you choose On-demand, you must also set the output group setting Segment +// control (SegmentControl) to Single file (SINGLE_FILE). +const ( + // CmafMpdProfileMainProfile is a CmafMpdProfile enum value + CmafMpdProfileMainProfile = "MAIN_PROFILE" + + // CmafMpdProfileOnDemandProfile is a CmafMpdProfile enum value + CmafMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" +) + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -17605,6 +17759,20 @@ const ( DashIsoHbbtvComplianceNone = "NONE" ) +// Specify whether your DASH profile is on-demand or main. When you choose Main +// profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 +// in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), +// the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. +// When you choose On-demand, you must also set the output group setting Segment +// control (SegmentControl) to Single file (SINGLE_FILE). +const ( + // DashIsoMpdProfileMainProfile is a DashIsoMpdProfile enum value + DashIsoMpdProfileMainProfile = "MAIN_PROFILE" + + // DashIsoMpdProfileOnDemandProfile is a DashIsoMpdProfile enum value + DashIsoMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" +) + // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback @@ -18896,17 +19064,16 @@ const ( H265UnregisteredSeiTimecodeEnabled = "ENABLED" ) -// Use this setting only for outputs encoded with H.265 that are in CMAF or -// DASH output groups. If you include writeMp4PackagingType in your JSON job -// specification for other outputs, your video might not work properly with -// downstream systems and video players. If the location of parameter set NAL -// units don't matter in your workflow, ignore this setting. The service defaults -// to marking your output as HEV1. Choose HVC1 to mark your output as HVC1. -// This makes your output compliant with this specification: ISO IECJTC1 SC29 -// N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service -// stores parameter set NAL units in the sample headers but not in the samples -// directly. Keep the default HEV1 to mark your output as HEV1. For these outputs, -// the service writes parameter set NAL units directly into the samples. +// If the location of parameter set NAL units doesn't matter in your workflow, +// ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. +// For file MP4 outputs, choosing HVC1 can create video that doesn't work properly +// with some downstream systems and video players. Choose HVC1 to mark your +// output as HVC1. This makes your output compliant with the following specification: +// ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these +// outputs, the service stores parameter set NAL units in the sample headers +// but not in the samples directly. The service defaults to marking your output +// as HEV1. For these outputs, the service writes parameter set NAL units directly +// into the samples. const ( // H265WriteMp4PackagingTypeHvc1 is a H265WriteMp4PackagingType enum value H265WriteMp4PackagingTypeHvc1 = "HVC1"