From 48554beaab4bb87cdedb96ff6a2fb9f669747001 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 14 Mar 2024 22:05:09 +0000 Subject: [PATCH] chore(schema): update (#3566) Co-authored-by: github-actions --- samtranslator/schema/schema.json | 4847 +++++++++++++++++++--- schema_source/cloudformation-docs.json | 304 +- schema_source/cloudformation.schema.json | 4847 +++++++++++++++++++--- 3 files changed, 8567 insertions(+), 1431 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 2432b810c..1c3787c9f 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -2055,6 +2055,11 @@ "title": "Password", "type": "string" }, + "ReplicationUser": { + "markdownDescription": "Defines if this user is intended for CRDR replication purposes.", + "title": "ReplicationUser", + "type": "boolean" + }, "Username": { "markdownDescription": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data.", "title": "Username", @@ -2365,7 +2370,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "markdownDescription": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "title": "EnvironmentVariables", "type": "array" }, @@ -2474,7 +2479,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "Environment variables for the auto created branch.", + "markdownDescription": "The environment variables for the autocreated branch.", "title": "EnvironmentVariables", "type": "array" }, @@ -2551,12 +2556,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2815,6 +2820,16 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, + "Certificate": { + "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", + "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", + "title": "Certificate" + }, + "CertificateSettings": { + "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", + "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", + "title": "CertificateSettings" + }, "DomainName": { "markdownDescription": "The domain name for the domain association.", "title": "DomainName", @@ -2832,6 +2847,11 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" + }, + "UpdateStatus": { + "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", + "title": "UpdateStatus", + "type": "string" } }, "required": [ @@ -2862,6 +2882,43 @@ ], "type": "object" }, + "AWS::Amplify::Domain.Certificate": { + "additionalProperties": false, + "properties": { + "CertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CertificateArn", + "type": "string" + }, + "CertificateType": { + "markdownDescription": "The type of SSL/TLS certificate that you want to use.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CertificateVerificationDNSRecord": { + "markdownDescription": "The DNS record for certificate verification.", + "title": "CertificateVerificationDNSRecord", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Amplify::Domain.CertificateSettings": { + "additionalProperties": false, + "properties": { + "CertificateType": { + "markdownDescription": "The certificate type.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CustomCertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for the custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CustomCertificateArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::Amplify::Domain.SubDomainSetting": { "additionalProperties": false, "properties": { @@ -3024,14 +3081,6 @@ "type": "array" } }, - "required": [ - "BindingProperties", - "ComponentType", - "Name", - "Overrides", - "Properties", - "Variants" - ], "type": "object" }, "Type": { @@ -3050,8 +3099,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3169,6 +3217,11 @@ "title": "Predicates", "type": "array" }, + "SlotName": { + "markdownDescription": "The name of a component slot.", + "title": "SlotName", + "type": "string" + }, "UserAttribute": { "markdownDescription": "An authenticated user attribute.", "title": "UserAttribute", @@ -3219,6 +3272,11 @@ }, "title": "Properties", "type": "object" + }, + "SourceId": { + "markdownDescription": "The unique ID of the child component in its original source system, such as Figma.", + "title": "SourceId", + "type": "string" } }, "required": [ @@ -3312,6 +3370,11 @@ "title": "Action", "type": "string" }, + "BindingEvent": { + "markdownDescription": "Binds an event to an action on a component. When you specify a `bindingEvent` , the event is called when the action is performed.", + "title": "BindingEvent", + "type": "string" + }, "Parameters": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Component.ActionParameters", "markdownDescription": "Describes information about the action.", @@ -3518,6 +3581,11 @@ "title": "Operand", "type": "string" }, + "OperandType": { + "markdownDescription": "The type of value to use when performing the evaluation.", + "title": "OperandType", + "type": "string" + }, "Operator": { "markdownDescription": "The operator to use to perform the evaluation.", "title": "Operator", @@ -3668,15 +3736,6 @@ "type": "object" } }, - "required": [ - "DataType", - "Fields", - "FormActionType", - "Name", - "SchemaVersion", - "SectionalElements", - "Style" - ], "type": "object" }, "Type": { @@ -3695,8 +3754,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3989,9 +4047,49 @@ ], "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue": { + "additionalProperties": false, + "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties", + "markdownDescription": "Describes the properties to customize with data at runtime.", + "title": "BindingProperties" + }, + "Type": { + "markdownDescription": "The property type.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties": { + "additionalProperties": false, + "properties": { + "Model": { + "markdownDescription": "An Amplify DataStore model.", + "title": "Model", + "type": "string" + } + }, + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormInputValueProperty": { "additionalProperties": false, "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties", + "markdownDescription": "The information to bind fields to data at runtime.", + "title": "BindingProperties" + }, + "Concat": { + "items": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValueProperty" + }, + "markdownDescription": "A list of form properties to concatenate to create the value to assign to this field property.", + "title": "Concat", + "type": "array" + }, "Value": { "markdownDescription": "The value to assign to the input field.", "title": "Value", @@ -4000,6 +4098,25 @@ }, "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties": { + "additionalProperties": false, + "properties": { + "Field": { + "markdownDescription": "The data field to bind the property to.", + "title": "Field", + "type": "string" + }, + "Property": { + "markdownDescription": "The form property to bind to the data field.", + "title": "Property", + "type": "string" + } + }, + "required": [ + "Property" + ], + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormStyle": { "additionalProperties": false, "properties": { @@ -4098,6 +4215,17 @@ "AWS::AmplifyUIBuilder::Form.ValueMappings": { "additionalProperties": false, "properties": { + "BindingProperties": { + "additionalProperties": false, + "markdownDescription": "The information to bind fields to data at runtime.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue" + } + }, + "title": "BindingProperties", + "type": "object" + }, "Values": { "items": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.ValueMapping" @@ -4190,10 +4318,6 @@ "type": "array" } }, - "required": [ - "Name", - "Values" - ], "type": "object" }, "Type": { @@ -4212,8 +4336,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -9124,7 +9247,7 @@ }, "Monitors": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Monitors" + "$ref": "#/definitions/AWS::AppConfig::Environment.Monitor" }, "markdownDescription": "Amazon CloudWatch alarms to monitor during the deployment process.", "title": "Monitors", @@ -9137,7 +9260,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Tags" + "$ref": "#/definitions/Tag" }, "markdownDescription": "Metadata to assign to the environment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define.", "title": "Tags", @@ -9171,152 +9294,147 @@ ], "type": "object" }, - "AWS::AppConfig::Environment.Monitors": { + "AWS::AppConfig::Environment.Monitor": { "additionalProperties": false, "properties": { "AlarmArn": { + "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", + "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { + "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", + "title": "AlarmRoleArn", "type": "string" } }, - "type": "object" - }, - "AWS::AppConfig::Environment.Tags": { - "additionalProperties": false, - "properties": { - "Key": { - "type": "string" - }, - "Value": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::AppConfig::Extension": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "Actions": { - "markdownDescription": "The actions defined in the extension.", - "title": "Actions", - "type": "object" - }, - "Description": { - "markdownDescription": "Information about the extension.", - "title": "Description", - "type": "string" - }, - "LatestVersionNumber": { - "markdownDescription": "You can omit this field when you create an extension. When you create a new version, specify the most recent current version number. For example, you create version 3, enter 2 for this field.", - "title": "LatestVersionNumber", - "type": "number" - }, - "Name": { - "markdownDescription": "A name for the extension. Each extension name in your account must be unique. Extension versions use the same name.", - "title": "Name", - "type": "string" - }, - "Parameters": { - "additionalProperties": false, - "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "$ref": "#/definitions/AWS::AppConfig::Extension.Parameter" - } - }, - "title": "Parameters", - "type": "object" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "Adds one or more tags for the specified extension. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "Actions", - "Name" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::AppConfig::Extension" - ], - "type": "string" - }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - } - }, - "required": [ - "Type", - "Properties" - ], - "type": "object" - }, - "AWS::AppConfig::Extension.Parameter": { - "additionalProperties": false, - "properties": { - "Description": { - "markdownDescription": "Information about the parameter.", - "title": "Description", - "type": "string" - }, - "Required": { - "markdownDescription": "A parameter value must be specified in the extension association.", - "title": "Required", - "type": "boolean" - } - }, "required": [ - "Required" + "AlarmArn" ], "type": "object" }, - "AWS::AppConfig::ExtensionAssociation": { + "AWS::AppConfig::Extension": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Actions": { + "markdownDescription": "The actions defined in the extension.", + "title": "Actions", + "type": "object" + }, + "Description": { + "markdownDescription": "Information about the extension.", + "title": "Description", + "type": "string" + }, + "LatestVersionNumber": { + "markdownDescription": "You can omit this field when you create an extension. When you create a new version, specify the most recent current version number. For example, you create version 3, enter 2 for this field.", + "title": "LatestVersionNumber", + "type": "number" + }, + "Name": { + "markdownDescription": "A name for the extension. Each extension name in your account must be unique. Extension versions use the same name.", + "title": "Name", + "type": "string" + }, + "Parameters": { + "additionalProperties": false, + "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AppConfig::Extension.Parameter" + } + }, + "title": "Parameters", + "type": "object" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Adds one or more tags for the specified extension. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Actions", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppConfig::Extension" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppConfig::Extension.Parameter": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "Information about the parameter.", + "title": "Description", + "type": "string" + }, + "Required": { + "markdownDescription": "A parameter value must be specified in the extension association.", + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "Required" + ], + "type": "object" + }, + "AWS::AppConfig::ExtensionAssociation": { "additionalProperties": false, "properties": { "Condition": { @@ -18540,6 +18658,11 @@ "title": "AtRestEncryptionEnabled", "type": "boolean" }, + "HealthMetricsConfig": { + "markdownDescription": "Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:\n\n- *NetworkBandwidthOutAllowanceExceeded* : The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.\n- *EngineCPUUtilization* : The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.\n\nMetrics will be recorded by API ID. You can set the value to `ENABLED` or `DISABLED` .", + "title": "HealthMetricsConfig", + "type": "string" + }, "TransitEncryptionEnabled": { "markdownDescription": "Transit encryption flag when connecting to cache. You cannot update this setting after creation.", "title": "TransitEncryptionEnabled", @@ -18742,6 +18865,11 @@ "markdownDescription": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", "title": "LambdaConfig" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", + "title": "MetricsConfig", + "type": "string" + }, "Name": { "markdownDescription": "Friendly name for you to identify your AppSync data source after creation.", "title": "Name", @@ -19434,6 +19562,16 @@ "title": "AuthenticationType", "type": "string" }, + "EnhancedMetricsConfig": { + "$ref": "#/definitions/AWS::AppSync::GraphQLApi.EnhancedMetricsConfig", + "markdownDescription": "Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.\n\nEnhanced metrics can be configured at the resolver, data source, and operation levels. For more information, see [Monitoring and logging](https://docs.aws.amazon.com//appsync/latest/devguide/monitoring.html#cw-metrics) in the *AWS AppSync User Guide* .", + "title": "EnhancedMetricsConfig" + }, + "EnvironmentVariables": { + "markdownDescription": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .\n\n*Pattern* : `^[A-Za-z]+\\\\w*$\\\\`\n\n*Minimum* : 2\n\n*Maximum* : 64", + "title": "EnvironmentVariables", + "type": "object" + }, "IntrospectionConfig": { "markdownDescription": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "title": "IntrospectionConfig", @@ -19580,6 +19718,32 @@ }, "type": "object" }, + "AWS::AppSync::GraphQLApi.EnhancedMetricsConfig": { + "additionalProperties": false, + "properties": { + "DataSourceLevelMetricsBehavior": { + "markdownDescription": "Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:\n\n- *Requests* : The number of invocations that occured during a request.\n- *Latency* : The time to complete a data source invocation.\n- *Errors* : The number of errors that occurred during a data source invocation.\n\nThese metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. `dataSourceLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_DATA_SOURCE_METRICS` : Records and emits metric data for all data sources in the request.\n- `PER_DATA_SOURCE_METRICS` : Records and emits metric data for data sources that have the `MetricsConfig` value set to `ENABLED` .", + "title": "DataSourceLevelMetricsBehavior", + "type": "string" + }, + "OperationLevelMetricsConfig": { + "markdownDescription": "Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:\n\n- *Requests* : The number of times a specified GraphQL operation was called.\n- *GraphQL errors* : The number of GraphQL errors that occurred during a specified GraphQL operation.\n\nMetrics will be recorded by API ID and operation name. You can set the value to `ENABLED` or `DISABLED` .", + "title": "OperationLevelMetricsConfig", + "type": "string" + }, + "ResolverLevelMetricsBehavior": { + "markdownDescription": "Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:\n\n- *GraphQL errors* : The number of GraphQL errors that occurred.\n- *Requests* : The number of invocations that occurred during a request.\n- *Latency* : The time to complete a resolver invocation.\n- *Cache hits* : The number of cache hits during a request.\n- *Cache misses* : The number of cache misses during a request.\n\nThese metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. `resolverLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_RESOLVER_METRICS` : Records and emits metric data for all resolvers in the request.\n- `PER_RESOLVER_METRICS` : Records and emits metric data for resolvers that have the `MetricsConfig` value set to `ENABLED` .", + "title": "ResolverLevelMetricsBehavior", + "type": "string" + } + }, + "required": [ + "DataSourceLevelMetricsBehavior", + "OperationLevelMetricsConfig", + "ResolverLevelMetricsBehavior" + ], + "type": "object" + }, "AWS::AppSync::GraphQLApi.LambdaAuthorizerConfig": { "additionalProperties": false, "properties": { @@ -19826,6 +19990,11 @@ "title": "MaxBatchSize", "type": "number" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced resolver metrics for specified resolvers. Note that `MetricsConfig` won't be used unless the `resolverLevelMetricsBehavior` value is set to `PER_RESOLVER_METRICS` . If the `resolverLevelMetricsBehavior` is set to `FULL_REQUEST_RESOLVER_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.", + "title": "MetricsConfig", + "type": "string" + }, "PipelineConfig": { "$ref": "#/definitions/AWS::AppSync::Resolver.PipelineConfig", "markdownDescription": "Functions linked with the pipeline resolver.", @@ -20121,7 +20290,7 @@ "type": "number" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20307,7 +20476,7 @@ "type": "string" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -22452,6 +22621,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::AutoScaling::AutoScalingGroup.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", @@ -24752,9 +24926,6 @@ "title": "MappingTemplate", "type": "string" }, - "ModifiedAt": { - "type": "string" - }, "Name": { "markdownDescription": "Returns the descriptive name for the transformer.", "title": "Name", @@ -25068,6 +25239,11 @@ "markdownDescription": "Specifies the number of days after creation that a recovery point is moved to cold storage.", "title": "MoveToColdStorageAfterDays", "type": "number" + }, + "OptInToArchiveForSupportedResources": { + "markdownDescription": "", + "title": "OptInToArchiveForSupportedResources", + "type": "boolean" } }, "type": "object" @@ -26500,6 +26676,11 @@ "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", + "title": "EcsProperties" + }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", @@ -26516,13 +26697,7 @@ "title": "NodeProperties" }, "Parameters": { - "additionalProperties": true, "markdownDescription": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Parameters", "type": "object" }, @@ -26550,18 +26725,12 @@ "type": "number" }, "Tags": { - "additionalProperties": true, "markdownDescription": "The tags that are applied to the job definition.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Tags", "type": "object" }, "Timeout": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.JobTimeout", + "$ref": "#/definitions/AWS::Batch::JobDefinition.Timeout", "markdownDescription": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", "title": "Timeout" }, @@ -26597,6 +26766,22 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.AuthorizationConfig": { + "additionalProperties": false, + "properties": { + "AccessPointId": { + "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", + "title": "AccessPointId", + "type": "string" + }, + "Iam": { + "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", + "title": "Iam", + "type": "string" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.ContainerProperties": { "additionalProperties": false, "properties": { @@ -26663,7 +26848,7 @@ }, "MountPoints": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoint" + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" }, "markdownDescription": "The mount points for data volumes in your container. This parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", "title": "MountPoints", @@ -26684,6 +26869,11 @@ "title": "ReadonlyRootFilesystem", "type": "boolean" }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, "ResourceRequirements": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" @@ -26725,7 +26915,7 @@ }, "Volumes": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Volume" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" }, "markdownDescription": "A list of data volumes used in a job.", "title": "Volumes", @@ -26761,34 +26951,111 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSAuthorizationConfig": { + "AWS::Batch::JobDefinition.EcsProperties": { "additionalProperties": false, "properties": { - "AccessPointId": { + "TaskProperties": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" + }, + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "title": "TaskProperties", + "type": "array" + } + }, + "required": [ + "TaskProperties" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.EcsTaskProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerProperties" + }, + "markdownDescription": "This object is a list of containers.", + "title": "Containers", + "type": "array" + }, + "EphemeralStorage": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EphemeralStorage", + "markdownDescription": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "title": "EphemeralStorage" + }, + "ExecutionRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "title": "ExecutionRoleArn", "type": "string" }, - "Iam": { + "IpcMode": { + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "title": "IpcMode", + "type": "string" + }, + "NetworkConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "title": "NetworkConfiguration" + }, + "PidMode": { + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "title": "PidMode", + "type": "string" + }, + "PlatformVersion": { + "markdownDescription": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "PlatformVersion", + "type": "string" + }, + "RuntimePlatform": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RuntimePlatform", + "markdownDescription": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "title": "RuntimePlatform" + }, + "TaskRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "title": "TaskRoleArn", "type": "string" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" + }, + "markdownDescription": "A list of volumes that are associated with the job.", + "title": "Volumes", + "type": "array" } }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSVolumeConfiguration": { + "AWS::Batch::JobDefinition.EfsVolumeConfiguration": { "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSAuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", + "title": "AuthorizationConfig" }, "FileSystemId": { + "markdownDescription": "The Amazon EFS file system ID to use.", + "title": "FileSystemId", "type": "string" }, "RootDirectory": { + "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", + "title": "RootDirectory", "type": "string" }, "TransitEncryption": { + "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { + "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryptionPort", "type": "number" } }, @@ -26886,24 +27153,12 @@ "additionalProperties": false, "properties": { "Limits": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to reserve for the container. The values vary based on the `name` that's specified. Resources can be requested using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> To maximize your resource utilization, provide your jobs with as much memory as possible for the specific instance type that you are using. To learn how, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that's reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both places, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that's reserved for the container. Values must be a whole integer. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Limits", "type": "object" }, "Requests": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to request for the container. The values vary based on the `name` that's specified. Resources can be requested by using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that are reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that are reserved for the container. Values must be a whole integer. `nvidia.com/gpu` can be specified in `limits` , `requests` , or both. If `nvidia.com/gpu` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Requests", "type": "object" } @@ -26989,56 +27244,11 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EksMetadata": { - "additionalProperties": false, - "properties": { - "Labels": { - "additionalProperties": true, - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.EksPodProperties": { - "additionalProperties": false, - "properties": { - "Containers": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" - }, - "type": "array" - }, - "DnsPolicy": { - "type": "string" - }, - "HostNetwork": { - "type": "boolean" - }, - "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksMetadata" - }, - "ServiceAccountName": { - "type": "string" - }, - "Volumes": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.EksProperties": { "additionalProperties": false, "properties": { "PodProperties": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksPodProperties", + "$ref": "#/definitions/AWS::Batch::JobDefinition.PodProperties", "markdownDescription": "The properties for the Kubernetes pod resources of a job.", "title": "PodProperties" } @@ -27163,24 +27373,6 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.Host": { - "additionalProperties": false, - "properties": { - "SourcePath": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.JobTimeout": { - "additionalProperties": false, - "properties": { - "AttemptDurationSeconds": { - "type": "number" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -27232,13 +27424,7 @@ "type": "string" }, "Options": { - "additionalProperties": true, "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Options", "type": "object" }, @@ -27256,16 +27442,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.MountPoint": { + "AWS::Batch::JobDefinition.Metadata": { + "additionalProperties": false, + "properties": { + "Labels": { + "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", + "title": "Labels", + "type": "object" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.MountPoints": { "additionalProperties": false, "properties": { "ContainerPath": { + "markdownDescription": "The path on the container where the host volume is mounted.", + "title": "ContainerPath", "type": "string" }, "ReadOnly": { + "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", + "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { + "markdownDescription": "The name of the volume to mount.", + "title": "SourceVolume", "type": "string" } }, @@ -27319,6 +27522,19 @@ "markdownDescription": "The container details for the node range.", "title": "Container" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "title": "EcsProperties" + }, + "InstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", + "title": "InstanceTypes", + "type": "array" + }, "TargetNodes": { "markdownDescription": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties.", "title": "TargetNodes", @@ -27330,6 +27546,75 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.PodProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "title": "Containers", + "type": "array" + }, + "DnsPolicy": { + "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "title": "DnsPolicy", + "type": "string" + }, + "HostNetwork": { + "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "title": "HostNetwork", + "type": "boolean" + }, + "InitContainers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "title": "InitContainers", + "type": "array" + }, + "Metadata": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", + "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "title": "Metadata" + }, + "ServiceAccountName": { + "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "title": "ServiceAccountName", + "type": "string" + }, + "ShareProcessNamespace": { + "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", + "title": "ShareProcessNamespace", + "type": "boolean" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" + }, + "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", + "title": "Volumes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.RepositoryCredentials": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials.", + "title": "CredentialsParameter", + "type": "string" + } + }, + "required": [ + "CredentialsParameter" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.ResourceRequirement": { "additionalProperties": false, "properties": { @@ -27401,6 +27686,147 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.TaskContainerDependency": { + "additionalProperties": false, + "properties": { + "Condition": { + "markdownDescription": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "title": "Condition", + "type": "string" + }, + "ContainerName": { + "markdownDescription": "A unique identifier for the container.", + "title": "ContainerName", + "type": "string" + } + }, + "required": [ + "Condition", + "ContainerName" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.TaskContainerProperties": { + "additionalProperties": false, + "properties": { + "Command": { + "items": { + "type": "string" + }, + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "title": "Command", + "type": "array" + }, + "DependsOn": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerDependency" + }, + "markdownDescription": "A list of containers that this container depends on.", + "title": "DependsOn", + "type": "array" + }, + "Environment": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Environment" + }, + "markdownDescription": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "title": "Environment", + "type": "array" + }, + "Essential": { + "markdownDescription": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "Essential", + "type": "boolean" + }, + "Image": { + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "title": "Image", + "type": "string" + }, + "LinuxParameters": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LinuxParameters", + "markdownDescription": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "title": "LinuxParameters" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "LogConfiguration" + }, + "MountPoints": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" + }, + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "title": "MountPoints", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "title": "Name", + "type": "string" + }, + "Privileged": { + "markdownDescription": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "title": "Privileged", + "type": "boolean" + }, + "ReadonlyRootFilesystem": { + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "title": "ReadonlyRootFilesystem", + "type": "boolean" + }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, + "ResourceRequirements": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" + }, + "markdownDescription": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "title": "ResourceRequirements", + "type": "array" + }, + "Secrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Secret" + }, + "markdownDescription": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "title": "Secrets", + "type": "array" + }, + "Ulimits": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Ulimit" + }, + "markdownDescription": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "title": "Ulimits", + "type": "array" + }, + "User": { + "markdownDescription": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Image" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.Timeout": { + "additionalProperties": false, + "properties": { + "AttemptDurationSeconds": { + "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", + "title": "AttemptDurationSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.Tmpfs": { "additionalProperties": false, "properties": { @@ -27455,16 +27881,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.Volume": { + "AWS::Batch::JobDefinition.Volumes": { "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", + "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", + "title": "EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", + "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", + "title": "Host" }, "Name": { + "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", + "title": "Name", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.VolumesHost": { + "additionalProperties": false, + "properties": { + "SourcePath": { + "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", + "title": "SourcePath", "type": "string" } }, @@ -29510,6 +29953,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AutoScalingSpecifications": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSpecification", + "markdownDescription": "The optional auto scaling capacity settings for a table in provisioned capacity mode.", + "title": "AutoScalingSpecifications" + }, "BillingMode": { "$ref": "#/definitions/AWS::Cassandra::Table.BillingMode", "markdownDescription": "The billing mode for the table, which determines how you'll be charged for reads and writes:\n\n- *On-demand mode* (default) - You pay based on the actual reads and writes your application performs.\n- *Provisioned mode* - Lets you specify the number of reads and writes per second that you need for your application.\n\nIf you don't specify a value for this property, then the table will use on-demand mode.", @@ -29564,6 +30012,14 @@ "title": "RegularColumns", "type": "array" }, + "ReplicaSpecifications": { + "items": { + "$ref": "#/definitions/AWS::Cassandra::Table.ReplicaSpecification" + }, + "markdownDescription": "The AWS Region specific settings of a multi-Region table.\n\nFor a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters.\n\n- `region` : The Region where these settings are applied. (Required)\n- `readCapacityUnits` : The provisioned read capacity units. (Optional)\n- `readCapacityAutoScaling` : The read capacity auto scaling settings for the table. (Optional)", + "title": "ReplicaSpecifications", + "type": "array" + }, "TableName": { "markdownDescription": "The name of the table to be created. The table name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you can't perform updates that require replacing this resource. You can perform updates that require no interruption or some interruption. If you must replace the resource, specify a new name. \n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "title": "TableName", @@ -29605,6 +30061,48 @@ ], "type": "object" }, + "AWS::Cassandra::Table.AutoScalingSetting": { + "additionalProperties": false, + "properties": { + "AutoScalingDisabled": { + "markdownDescription": "This optional parameter enables auto scaling for the table if set to `false` .", + "title": "AutoScalingDisabled", + "type": "boolean" + }, + "MaximumUnits": { + "markdownDescription": "Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MaximumUnits", + "type": "number" + }, + "MinimumUnits": { + "markdownDescription": "The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MinimumUnits", + "type": "number" + }, + "ScalingPolicy": { + "$ref": "#/definitions/AWS::Cassandra::Table.ScalingPolicy", + "markdownDescription": "Amazon Keyspaces supports the `target tracking` auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.", + "title": "ScalingPolicy" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.AutoScalingSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's read capacity.", + "title": "ReadCapacityAutoScaling" + }, + "WriteCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's write capacity.", + "title": "WriteCapacityAutoScaling" + } + }, + "type": "object" + }, "AWS::Cassandra::Table.BillingMode": { "additionalProperties": false, "properties": { @@ -29702,6 +30200,70 @@ ], "type": "object" }, + "AWS::Cassandra::Table.ReplicaSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The read capacity auto scaling settings for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityAutoScaling" + }, + "ReadCapacityUnits": { + "markdownDescription": "The provisioned read capacity units for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityUnits", + "type": "number" + }, + "Region": { + "markdownDescription": "The AWS Region.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "Region" + ], + "type": "object" + }, + "AWS::Cassandra::Table.ScalingPolicy": { + "additionalProperties": false, + "properties": { + "TargetTrackingScalingPolicyConfiguration": { + "$ref": "#/definitions/AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration", + "markdownDescription": "The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity.", + "title": "TargetTrackingScalingPolicyConfiguration" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration": { + "additionalProperties": false, + "properties": { + "DisableScaleIn": { + "markdownDescription": "Specifies if `scale-in` is enabled.\n\nWhen auto scaling automatically decreases capacity for a table, the table *scales in* . When scaling policies are set, they can't scale in the table lower than its minimum capacity.", + "title": "DisableScaleIn", + "type": "boolean" + }, + "ScaleInCooldown": { + "markdownDescription": "Specifies a `scale-in` cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleInCooldown", + "type": "number" + }, + "ScaleOutCooldown": { + "markdownDescription": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleOutCooldown", + "type": "number" + }, + "TargetValue": { + "markdownDescription": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90.", + "title": "TargetValue", + "type": "number" + } + }, + "required": [ + "TargetValue" + ], + "type": "object" + }, "AWS::CertificateManager::Account": { "additionalProperties": false, "properties": { @@ -33316,7 +33878,7 @@ "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15.", + "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution.", "title": "Weight", "type": "number" } @@ -36274,7 +36836,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36597,7 +37159,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -37976,6 +38538,92 @@ ], "type": "object" }, + "AWS::CodeBuild::Fleet": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaseCapacity": { + "markdownDescription": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", + "title": "BaseCapacity", + "type": "number" + }, + "ComputeType": { + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "title": "ComputeType", + "type": "string" + }, + "EnvironmentType": { + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "title": "EnvironmentType", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the compute fleet.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeBuild::Fleet" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::CodeBuild::Project": { "additionalProperties": false, "properties": { @@ -38310,6 +38958,9 @@ "title": "EnvironmentVariables", "type": "array" }, + "Fleet": { + "$ref": "#/definitions/AWS::CodeBuild::Project.ProjectFleet" + }, "Image": { "markdownDescription": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", "title": "Image", @@ -38337,7 +38988,9 @@ } }, "required": [ - "Image" + "ComputeType", + "Image", + "Type" ], "type": "object" }, @@ -40558,6 +41211,11 @@ "title": "DisableInboundStageTransitions", "type": "array" }, + "ExecutionMode": { + "markdownDescription": "The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.", + "title": "ExecutionMode", + "type": "string" + }, "Name": { "markdownDescription": "The name of the pipeline.", "title": "Name", @@ -40816,9 +41474,39 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitBranchFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitConfiguration": { "additionalProperties": false, "properties": { + "PullRequest": { + "items": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPullRequestFilter" + }, + "markdownDescription": "The field where the repository event that will start the pipeline is specified as pull requests.", + "title": "PullRequest", + "type": "array" + }, "Push": { "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" @@ -40838,9 +41526,65 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::CodePipeline::Pipeline.GitPullRequestFilter": { + "additionalProperties": false, + "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the pull request trigger configuration.", + "title": "Branches" + }, + "Events": { + "items": { + "type": "string" + }, + "markdownDescription": "The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.", + "title": "Events", + "type": "array" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the pull request trigger configuration.", + "title": "FilePaths" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitPushFilter": { "additionalProperties": false, "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the push trigger configuration.", + "title": "Branches" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the push trigger configuration.", + "title": "FilePaths" + }, "Tags": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitTagFilterCriteria", "markdownDescription": "The field that contains the details for the Git tags trigger configuration.", @@ -41847,6 +42591,10 @@ "type": "boolean" } }, + "required": [ + "ClientId", + "ProviderName" + ], "type": "object" }, "AWS::Cognito::IdentityPool.CognitoStreams": { @@ -42013,12 +42761,24 @@ "type": "string" }, "RoleMappings": { + "additionalProperties": false, "markdownDescription": "How users for a specific identity provider are mapped to roles. This is a string to the `RoleMapping` object map. The string identifies the identity provider. For example: `graph.facebook.com` or `cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id` .\n\nIf the `IdentityProvider` field isn't provided in this object, the string is used as the identity provider name.\n\nFor more information, see the [RoleMapping property](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-identitypoolroleattachment-rolemapping.html) .", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::Cognito::IdentityPoolRoleAttachment.RoleMapping" + } + }, "title": "RoleMappings", "type": "object" }, "Roles": { + "additionalProperties": true, "markdownDescription": "The map of the roles associated with this pool. For a given role, the key is either \"authenticated\" or \"unauthenticated\". The value is the role ARN.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, "title": "Roles", "type": "object" } @@ -48371,6 +49131,22 @@ "title": "AssignContactCategoryActions", "type": "array" }, + "CreateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.CreateCaseAction" + }, + "markdownDescription": "", + "title": "CreateCaseActions", + "type": "array" + }, + "EndAssociatedTasksActions": { + "items": { + "type": "object" + }, + "markdownDescription": "", + "title": "EndAssociatedTasksActions", + "type": "array" + }, "EventBridgeActions": { "items": { "$ref": "#/definitions/AWS::Connect::Rule.EventBridgeAction" @@ -48394,8 +49170,39 @@ "markdownDescription": "Information about the task action. This field is required if `TriggerEventSource` is one of the following values: `OnZendeskTicketCreate` | `OnZendeskTicketStatusUpdate` | `OnSalesforceCaseCreate`", "title": "TaskActions", "type": "array" + }, + "UpdateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.UpdateCaseAction" + }, + "markdownDescription": "", + "title": "UpdateCaseActions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Connect::Rule.CreateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + }, + "TemplateId": { + "markdownDescription": "", + "title": "TemplateId", + "type": "string" } }, + "required": [ + "Fields", + "TemplateId" + ], "type": "object" }, "AWS::Connect::Rule.EventBridgeAction": { @@ -48412,6 +49219,52 @@ ], "type": "object" }, + "AWS::Connect::Rule.Field": { + "additionalProperties": false, + "properties": { + "Id": { + "markdownDescription": "", + "title": "Id", + "type": "string" + }, + "Value": { + "$ref": "#/definitions/AWS::Connect::Rule.FieldValue", + "markdownDescription": "", + "title": "Value" + } + }, + "required": [ + "Id", + "Value" + ], + "type": "object" + }, + "AWS::Connect::Rule.FieldValue": { + "additionalProperties": false, + "properties": { + "BooleanValue": { + "markdownDescription": "", + "title": "BooleanValue", + "type": "boolean" + }, + "DoubleValue": { + "markdownDescription": "", + "title": "DoubleValue", + "type": "number" + }, + "EmptyValue": { + "markdownDescription": "", + "title": "EmptyValue", + "type": "object" + }, + "StringValue": { + "markdownDescription": "", + "title": "StringValue", + "type": "string" + } + }, + "type": "object" + }, "AWS::Connect::Rule.NotificationRecipientType": { "additionalProperties": false, "properties": { @@ -48549,6 +49402,23 @@ ], "type": "object" }, + "AWS::Connect::Rule.UpdateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + } + }, + "required": [ + "Fields" + ], + "type": "object" + }, "AWS::Connect::SecurityKey": { "additionalProperties": false, "properties": { @@ -49764,7 +50634,7 @@ ], "type": "object" }, - "AWS::ControlTower::EnabledControl": { + "AWS::ControlTower::EnabledBaseline": { "additionalProperties": false, "properties": { "Condition": { @@ -49799,34 +50669,48 @@ "Properties": { "additionalProperties": false, "properties": { - "ControlIdentifier": { - "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", - "title": "ControlIdentifier", + "BaselineIdentifier": { + "markdownDescription": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", + "title": "BaselineIdentifier", + "type": "string" + }, + "BaselineVersion": { + "markdownDescription": "The enabled version of the `Baseline` .", + "title": "BaselineVersion", "type": "string" }, "Parameters": { "items": { - "$ref": "#/definitions/AWS::ControlTower::EnabledControl.EnabledControlParameter" + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline.Parameter" }, - "markdownDescription": "Array of `EnabledControlParameter` objects.", + "markdownDescription": "Parameters that are applied when enabling this `Baseline` . These parameters configure the behavior of the baseline.", "title": "Parameters", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags associated with input to `EnableBaseline` .", + "title": "Tags", + "type": "array" + }, "TargetIdentifier": { - "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "markdownDescription": "The target on which to enable the `Baseline` .", "title": "TargetIdentifier", "type": "string" } }, "required": [ - "ControlIdentifier", + "BaselineIdentifier", + "BaselineVersion", "TargetIdentifier" ], "type": "object" }, "Type": { "enum": [ - "AWS::ControlTower::EnabledControl" + "AWS::ControlTower::EnabledBaseline" ], "type": "string" }, @@ -49845,27 +50729,132 @@ ], "type": "object" }, - "AWS::ControlTower::EnabledControl.EnabledControlParameter": { + "AWS::ControlTower::EnabledBaseline.Parameter": { "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key of a key/value pair. It is of type `string` .", + "markdownDescription": "A string denoting the parameter key.", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The value of a key/value pair. It can be of type `array` , `string` , `number` , `object` , or `boolean` . [Note: The *Type* field that follows may show a single type such as Number, which is only one possible type.]", + "markdownDescription": "A low-level `Document` object of any type (for example, a Java Object).", "title": "Value", "type": "object" } }, - "required": [ - "Key", - "Value" - ], "type": "object" }, - "AWS::ControlTower::LandingZone": { + "AWS::ControlTower::EnabledControl": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ControlIdentifier": { + "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "title": "ControlIdentifier", + "type": "string" + }, + "Parameters": { + "items": { + "$ref": "#/definitions/AWS::ControlTower::EnabledControl.EnabledControlParameter" + }, + "markdownDescription": "Array of `EnabledControlParameter` objects.", + "title": "Parameters", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags to be applied to the enabled control.", + "title": "Tags", + "type": "array" + }, + "TargetIdentifier": { + "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "title": "TargetIdentifier", + "type": "string" + } + }, + "required": [ + "ControlIdentifier", + "TargetIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ControlTower::EnabledControl" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::ControlTower::EnabledControl.EnabledControlParameter": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key of a key/value pair. It is of type `string` .", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of a key/value pair. It can be of type `array` , `string` , `number` , `object` , or `boolean` . [Note: The *Type* field that follows may show a single type such as Number, which is only one possible type.]", + "title": "Value", + "type": "object" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::ControlTower::LandingZone": { "additionalProperties": false, "properties": { "Condition": { @@ -50227,6 +51216,7 @@ } }, "required": [ + "DefaultExpirationDays", "DomainName" ], "type": "object" @@ -51221,7 +52211,9 @@ } }, "required": [ - "DomainName" + "Description", + "DomainName", + "ObjectTypeName" ], "type": "object" }, @@ -59633,7 +60625,7 @@ }, "type": "object" }, - "AWS::Detective::Graph": { + "AWS::DataZone::DataSource": { "additionalProperties": false, "properties": { "Condition": { @@ -59668,25 +60660,82 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" - }, - "Tags": { + "AssetFormsInput": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::DataZone::DataSource.FormInput" }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", + "markdownDescription": "The metadata forms attached to the assets that the data source works with.", + "title": "AssetFormsInput", "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.DataSourceConfigurationInput", + "markdownDescription": "The configuration of the data source.", + "title": "Configuration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain where the data source is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnableSetting": { + "markdownDescription": "Specifies whether the data source is enabled.", + "title": "EnableSetting", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The unique identifier of the Amazon DataZone environment to which the data source publishes assets.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which you want to add this data source.", + "title": "ProjectIdentifier", + "type": "string" + }, + "PublishOnImport": { + "markdownDescription": "Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog.", + "title": "PublishOnImport", + "type": "boolean" + }, + "Recommendation": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RecommendationConfiguration", + "markdownDescription": "Specifies whether the business name generation is to be enabled for this data source.", + "title": "Recommendation" + }, + "Schedule": { + "$ref": "#/definitions/AWS::DataZone::DataSource.ScheduleConfiguration", + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule" + }, + "Type": { + "markdownDescription": "The type of the data source.", + "title": "Type", + "type": "string" } }, + "required": [ + "DomainIdentifier", + "EnvironmentIdentifier", + "Name", + "ProjectIdentifier", + "Type" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::DataZone::DataSource" ], "type": "string" }, @@ -59700,11 +60749,1027 @@ } }, "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.DataSourceConfigurationInput": { + "additionalProperties": false, + "properties": { + "GlueRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.GlueRunConfigurationInput", + "markdownDescription": "The configuration of the AWS Glue data source.", + "title": "GlueRunConfiguration" + }, + "RedshiftRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftRunConfigurationInput", + "markdownDescription": "The configuration of the Amazon Redshift data source.", + "title": "RedshiftRunConfiguration" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.FilterExpression": { + "additionalProperties": false, + "properties": { + "Expression": { + "markdownDescription": "The search filter expression.", + "title": "Expression", + "type": "string" + }, + "Type": { + "markdownDescription": "The search filter explresison type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Expression", "Type" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::DataZone::DataSource.FormInput": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the metadata form.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The name of the metadata form.", + "title": "FormName", + "type": "string" + }, + "TypeIdentifier": { + "markdownDescription": "The ID of the metadata form type.", + "title": "TypeIdentifier", + "type": "string" + }, + "TypeRevision": { + "markdownDescription": "The revision of the metadata form type.", + "title": "TypeRevision", + "type": "string" + } + }, + "required": [ + "FormName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.GlueRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RecommendationConfiguration": { + "additionalProperties": false, + "properties": { + "EnableBusinessNameGeneration": { + "markdownDescription": "Specifies whether automatic business name generation is to be enabled or not as part of the recommendation configuration.", + "title": "EnableBusinessNameGeneration", + "type": "boolean" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftClusterStorage": { + "additionalProperties": false, + "properties": { + "ClusterName": { + "markdownDescription": "The name of an Amazon Redshift cluster.", + "title": "ClusterName", + "type": "string" + } + }, + "required": [ + "ClusterName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftCredentialConfiguration": { + "additionalProperties": false, + "properties": { + "SecretManagerArn": { + "markdownDescription": "The ARN of a secret manager for an Amazon Redshift cluster.", + "title": "SecretManagerArn", + "type": "string" + } + }, + "required": [ + "SecretManagerArn" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the Amazon Redshift data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RedshiftCredentialConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftCredentialConfiguration", + "markdownDescription": "The details of the credentials required to access an Amazon Redshift cluster.", + "title": "RedshiftCredentialConfiguration" + }, + "RedshiftStorage": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftStorage", + "markdownDescription": "The details of the Amazon Redshift storage as part of the configuration of an Amazon Redshift data source run.", + "title": "RedshiftStorage" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RedshiftCredentialConfiguration", + "RedshiftStorage", + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftServerlessStorage": { + "additionalProperties": false, + "properties": { + "WorkgroupName": { + "markdownDescription": "The name of the Amazon Redshift Serverless workgroup.", + "title": "WorkgroupName", + "type": "string" + } + }, + "required": [ + "WorkgroupName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftStorage": { + "additionalProperties": false, + "properties": { + "RedshiftClusterSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftClusterStorage", + "markdownDescription": "The details of the Amazon Redshift cluster source.", + "title": "RedshiftClusterSource" + }, + "RedshiftServerlessSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftServerlessStorage", + "markdownDescription": "The details of the Amazon Redshift Serverless workgroup source.", + "title": "RedshiftServerlessSource" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RelationalFilterConfiguration": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The database name specified in the relational filter configuration for the data source.", + "title": "DatabaseName", + "type": "string" + }, + "FilterExpressions": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.FilterExpression" + }, + "markdownDescription": "The filter expressions specified in the relational filter configuration for the data source.", + "title": "FilterExpressions", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The schema name specified in the relational filter configuration for the data source.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "DatabaseName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.ScheduleConfiguration": { + "additionalProperties": false, + "properties": { + "Schedule": { + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule", + "type": "string" + }, + "Timezone": { + "markdownDescription": "The timezone of the data source run.", + "title": "Timezone", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Domain": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the Amazon DataZone domain.", + "title": "Description", + "type": "string" + }, + "DomainExecutionRole": { + "markdownDescription": "The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the AWS account that houses the Amazon DataZone domain.", + "title": "DomainExecutionRole", + "type": "string" + }, + "KmsKeyIdentifier": { + "markdownDescription": "The identifier of the AWS Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data.", + "title": "KmsKeyIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone domain.", + "title": "Name", + "type": "string" + }, + "SingleSignOn": { + "$ref": "#/definitions/AWS::DataZone::Domain.SingleSignOn", + "markdownDescription": "The single sign-on details in Amazon DataZone.", + "title": "SingleSignOn" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags specified for the Amazon DataZone domain.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainExecutionRole", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Domain" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Domain.SingleSignOn": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of single sign-on in Amazon DataZone.", + "title": "Type", + "type": "string" + }, + "UserAssignment": { + "markdownDescription": "The single sign-on user assignment in Amazon DataZone.", + "title": "UserAssignment", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Environment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the environment.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentProfileIdentifier": { + "markdownDescription": "The identifier of the environment profile that is used to create this Amazon DataZone environment.", + "title": "EnvironmentProfileIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone environment.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone environment.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which this environment is created.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::Environment.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnvironmentProfileIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Environment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Environment.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment parameter.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which an environment blueprint exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnabledRegions": { + "items": { + "type": "string" + }, + "markdownDescription": "The enabled AWS Regions specified in a blueprint configuration.", + "title": "EnabledRegions", + "type": "array" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of the environment blueprint.\n\nIn the current release, only the following values are supported: `DefaultDataLake` and `DefaultDataWarehouse` .", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "ManageAccessRoleArn": { + "markdownDescription": "The ARN of the manage access role.", + "title": "ManageAccessRoleArn", + "type": "string" + }, + "ProvisioningRoleArn": { + "markdownDescription": "The ARN of the provisioning role.", + "title": "ProvisioningRoleArn", + "type": "string" + }, + "RegionalParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter" + }, + "markdownDescription": "The regional parameters of the environment blueprint.", + "title": "RegionalParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnabledRegions", + "EnvironmentBlueprintIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentBlueprintConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter": { + "additionalProperties": false, + "properties": { + "Parameters": { + "additionalProperties": true, + "markdownDescription": "A string to string map containing parameters for the region.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Parameters", + "type": "object" + }, + "Region": { + "markdownDescription": "The region specified in the environment parameter.", + "title": "Region", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "markdownDescription": "The identifier of an AWS account in which an environment profile exists.", + "title": "AwsAccountId", + "type": "string" + }, + "AwsAccountRegion": { + "markdownDescription": "The AWS Region in which an environment profile exists.", + "title": "AwsAccountRegion", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the environment profile.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment profile exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of a blueprint with which an environment profile is created.", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the environment profile.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of a project in which an environment profile exists.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment profile.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "AwsAccountId", + "AwsAccountRegion", + "DomainIdentifier", + "EnvironmentBlueprintIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name specified in the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment profile.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Project": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a project.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of a Amazon DataZone domain where the project exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone project.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a project.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "DomainIdentifier", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Project" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicableAssetTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The asset types included in the subscription target.", + "title": "ApplicableAssetTypes", + "type": "array" + }, + "AuthorizedPrincipals": { + "items": { + "type": "string" + }, + "markdownDescription": "The authorized principals included in the subscription target.", + "title": "AuthorizedPrincipals", + "type": "array" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain in which subscription target is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The ID of the environment in which subscription target is created.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "ManageAccessRole": { + "markdownDescription": "The manage access role that is used to create the subscription target.", + "title": "ManageAccessRole", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the subscription target.", + "title": "Name", + "type": "string" + }, + "Provider": { + "markdownDescription": "The provider of the subscription target.", + "title": "Provider", + "type": "string" + }, + "SubscriptionTargetConfig": { + "items": { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm" + }, + "markdownDescription": "The configuration of the subscription target.", + "title": "SubscriptionTargetConfig", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of the subscription target.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "ApplicableAssetTypes", + "AuthorizedPrincipals", + "DomainIdentifier", + "EnvironmentIdentifier", + "ManageAccessRole", + "Name", + "SubscriptionTargetConfig", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::SubscriptionTarget" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the subscription target configuration.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The form name included in the subscription target configuration.", + "title": "FormName", + "type": "string" + } + }, + "required": [ + "Content", + "FormName" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { "additionalProperties": false, "properties": { "Condition": { @@ -61372,6 +63437,11 @@ "AWS::DynamoDB::GlobalTable.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.", "title": "StreamArn", @@ -61978,6 +64048,11 @@ "AWS::DynamoDB::Table.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.\n\nLength Constraints: Minimum length of 37. Maximum length of 1024.", "title": "StreamArn", @@ -63690,6 +65765,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::EC2Fleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -64376,7 +66456,7 @@ "type": "string" }, "DeliverLogsPermissionArn": { - "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", + "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", "title": "DeliverLogsPermissionArn", "type": "string" }, @@ -66595,7 +68675,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -66791,6 +68871,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.MemoryGiBPerVCpu", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -69723,6 +71808,11 @@ "title": "DeviceIndex", "type": "string" }, + "EnaSrdSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification", + "markdownDescription": "Configures ENA Express for the network interface that this action attaches to the instance.", + "title": "EnaSrdSpecification" + }, "InstanceId": { "markdownDescription": "The ID of the instance to which you will attach the ENI.", "title": "InstanceId", @@ -69762,6 +71852,33 @@ ], "type": "object" }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdEnabled": { + "markdownDescription": "Indicates whether ENA Express is enabled for the network interface.", + "title": "EnaSrdEnabled", + "type": "boolean" + }, + "EnaSrdUdpSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification", + "markdownDescription": "Configures ENA Express for UDP network traffic.", + "title": "EnaSrdUdpSpecification" + } + }, + "type": "object" + }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdUdpEnabled": { + "markdownDescription": "Indicates whether UDP traffic to and from the instance uses ENA Express. To specify this setting, you must first enable ENA Express.", + "title": "EnaSrdUdpEnabled", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::EC2::NetworkInterfacePermission": { "additionalProperties": false, "properties": { @@ -70076,7 +72193,6 @@ }, "required": [ "AddressFamily", - "MaxEntries", "PrefixListName" ], "type": "object" @@ -70438,12 +72554,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70453,12 +72569,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70487,12 +72603,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70578,12 +72694,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70593,12 +72709,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70686,12 +72802,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -71043,7 +73159,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -71280,6 +73396,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::SpotFleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -75831,7 +77952,7 @@ "type": "string" }, "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "title": "Encrypted", "type": "boolean" }, @@ -75879,7 +78000,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`", + "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`", "title": "VolumeType", "type": "string" } @@ -77002,7 +79123,7 @@ "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", "title": "Weight", "type": "number" } @@ -77536,7 +79657,7 @@ "additionalProperties": false, "properties": { "Field": { - "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "title": "Field", "type": "string" }, @@ -77647,6 +79768,16 @@ "markdownDescription": "The `portName` must match the name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service.", "title": "PortName", "type": "string" + }, + "Timeout": { + "$ref": "#/definitions/AWS::ECS::Service.TimeoutConfiguration", + "markdownDescription": "A reference to an object that represents the configured timeouts for Service Connect.", + "title": "Timeout" + }, + "Tls": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsConfiguration", + "markdownDescription": "A reference to an object that represents a Transport Layer Security (TLS) configuration.", + "title": "Tls" } }, "required": [ @@ -77654,6 +79785,41 @@ ], "type": "object" }, + "AWS::ECS::Service.ServiceConnectTlsCertificateAuthority": { + "additionalProperties": false, + "properties": { + "AwsPcaAuthorityArn": { + "markdownDescription": "The ARN of the AWS Private Certificate Authority certificate.", + "title": "AwsPcaAuthorityArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Service.ServiceConnectTlsConfiguration": { + "additionalProperties": false, + "properties": { + "IssuerCertificateAuthority": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsCertificateAuthority", + "markdownDescription": "The signer certificate authority.", + "title": "IssuerCertificateAuthority" + }, + "KmsKey": { + "markdownDescription": "The AWS Key Management Service key.", + "title": "KmsKey", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "IssuerCertificateAuthority" + ], + "type": "object" + }, "AWS::ECS::Service.ServiceManagedEBSVolumeConfiguration": { "additionalProperties": false, "properties": { @@ -77761,6 +79927,22 @@ ], "type": "object" }, + "AWS::ECS::Service.TimeoutConfiguration": { + "additionalProperties": false, + "properties": { + "IdleTimeoutSeconds": { + "markdownDescription": "The amount of time in seconds a connection will stay active while idle. A value of `0` can be set to disable `idleTimeout` .\n\nThe `idleTimeout` default for `HTTP` / `HTTP2` / `GRPC` is 5 minutes.\n\nThe `idleTimeout` default for `TCP` is 1 hour.", + "title": "IdleTimeoutSeconds", + "type": "number" + }, + "PerRequestTimeoutSeconds": { + "markdownDescription": "The amount of time waiting for the upstream to respond with a complete response per request. A value of `0` can be set to disable `perRequestTimeout` . `perRequestTimeout` can only be set if Service Connect `appProtocol` isn't `TCP` . Only `idleTimeout` is allowed for `TCP` `appProtocol` .", + "title": "PerRequestTimeoutSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::ECS::TaskDefinition": { "additionalProperties": false, "properties": { @@ -77954,6 +80136,14 @@ "title": "Cpu", "type": "number" }, + "CredentialSpecs": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", + "title": "CredentialSpecs", + "type": "array" + }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.ContainerDependency" @@ -78321,7 +80511,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The file type to use. The only supported value is `s3` .", + "markdownDescription": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "title": "Type", "type": "string" }, @@ -78933,6 +81123,14 @@ "title": "ServiceRegistries", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, "TaskDefinition": { "markdownDescription": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used.", "title": "TaskDefinition", @@ -80926,7 +83124,7 @@ "title": "ManagedScalingPolicy" }, "Name": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "title": "Name", "type": "string" }, @@ -81569,6 +83767,11 @@ "markdownDescription": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", "title": "TerminationProtected", "type": "boolean" + }, + "UnhealthyNodeReplacement": { + "markdownDescription": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.", + "title": "UnhealthyNodeReplacement", + "type": "boolean" } }, "type": "object" @@ -85076,7 +87279,7 @@ "Port": { "markdownDescription": "The port number that the cache engine is listening on.", "title": "Port", - "type": "number" + "type": "string" } }, "type": "object" @@ -87469,6 +89672,11 @@ "Properties": { "additionalProperties": false, "properties": { + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": { + "markdownDescription": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", + "title": "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", + "type": "string" + }, "IpAddressType": { "markdownDescription": "The IP address type. The possible values are `ipv4` (for IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.", "title": "IpAddressType", @@ -93180,7 +95388,7 @@ "type": "number" }, "Mode": { - "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value.", + "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value.", "title": "Mode", "type": "string" } @@ -93301,7 +95509,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93324,7 +95532,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -98701,6 +100909,11 @@ "title": "CatalogEncryptionMode", "type": "string" }, + "CatalogEncryptionServiceRole": { + "markdownDescription": "The role that AWS Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.", + "title": "CatalogEncryptionServiceRole", + "type": "string" + }, "SseAwsKmsKeyId": { "markdownDescription": "The ID of the AWS KMS key to use for encryption at rest.", "title": "SseAwsKmsKeyId", @@ -100906,6 +103119,117 @@ }, "type": "object" }, + "AWS::Glue::TableOptimizer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The catalog ID of the table.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The table name. For Hive compatibility, this must be entirely lowercase.", + "title": "TableName", + "type": "string" + }, + "TableOptimizerConfiguration": { + "$ref": "#/definitions/AWS::Glue::TableOptimizer.TableOptimizerConfiguration", + "markdownDescription": "Specifies configuration details of a table optimizer.", + "title": "TableOptimizerConfiguration" + }, + "Type": { + "markdownDescription": "The type of table optimizer. Currently, the only valid value is compaction.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "CatalogId", + "DatabaseName", + "TableName", + "TableOptimizerConfiguration", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Glue::TableOptimizer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Glue::TableOptimizer.TableOptimizerConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Whether the table optimization is enabled.", + "title": "Enabled", + "type": "boolean" + }, + "RoleArn": { + "markdownDescription": "A role passed by the caller which gives the service permission to update the resources associated with the optimizer on the caller's behalf.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "Enabled", + "RoleArn" + ], + "type": "object" + }, "AWS::Glue::Trigger": { "additionalProperties": false, "properties": { @@ -105814,7 +108138,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::GuardDuty::Filter.TagItem" }, "markdownDescription": "The tags to be added to a new filter resource. Each tag consists of a key and an optional value, both of which you define.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", @@ -105822,12 +108146,7 @@ } }, "required": [ - "Action", - "Description", - "DetectorId", - "FindingCriteria", - "Name", - "Rank" + "FindingCriteria" ], "type": "object" }, @@ -105934,14 +108253,37 @@ "additionalProperties": false, "properties": { "Criterion": { + "additionalProperties": false, "markdownDescription": "Represents a map of finding properties that match specified conditions and values when querying findings.\n\nFor information about JSON criterion mapping to their console equivalent, see [Finding criteria](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_filter-findings.html#filter_criteria) . The following are the available criterion:\n\n- accountId\n- id\n- region\n- severity\n\nTo filter on the basis of severity, API and CFN use the following input list for the condition:\n\n- *Low* : `[\"1\", \"2\", \"3\"]`\n- *Medium* : `[\"4\", \"5\", \"6\"]`\n- *High* : `[\"7\", \"8\", \"9\"]`\n\nFor more information, see [Severity levels for GuardDuty findings](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity) .\n- type\n- updatedAt\n\nType: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.\n- resource.accessKeyDetails.accessKeyId\n- resource.accessKeyDetails.principalId\n- resource.accessKeyDetails.userName\n- resource.accessKeyDetails.userType\n- resource.instanceDetails.iamInstanceProfile.id\n- resource.instanceDetails.imageId\n- resource.instanceDetails.instanceId\n- resource.instanceDetails.tags.key\n- resource.instanceDetails.tags.value\n- resource.instanceDetails.networkInterfaces.ipv6Addresses\n- resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress\n- resource.instanceDetails.networkInterfaces.publicDnsName\n- resource.instanceDetails.networkInterfaces.publicIp\n- resource.instanceDetails.networkInterfaces.securityGroups.groupId\n- resource.instanceDetails.networkInterfaces.securityGroups.groupName\n- resource.instanceDetails.networkInterfaces.subnetId\n- resource.instanceDetails.networkInterfaces.vpcId\n- resource.instanceDetails.outpostArn\n- resource.resourceType\n- resource.s3BucketDetails.publicAccess.effectivePermissions\n- resource.s3BucketDetails.name\n- resource.s3BucketDetails.tags.key\n- resource.s3BucketDetails.tags.value\n- resource.s3BucketDetails.type\n- service.action.actionType\n- service.action.awsApiCallAction.api\n- service.action.awsApiCallAction.callerType\n- service.action.awsApiCallAction.errorCode\n- service.action.awsApiCallAction.remoteIpDetails.city.cityName\n- service.action.awsApiCallAction.remoteIpDetails.country.countryName\n- service.action.awsApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.awsApiCallAction.remoteIpDetails.organization.asn\n- service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg\n- service.action.awsApiCallAction.serviceName\n- service.action.dnsRequestAction.domain\n- service.action.networkConnectionAction.blocked\n- service.action.networkConnectionAction.connectionDirection\n- service.action.networkConnectionAction.localPortDetails.port\n- service.action.networkConnectionAction.protocol\n- service.action.networkConnectionAction.remoteIpDetails.city.cityName\n- service.action.networkConnectionAction.remoteIpDetails.country.countryName\n- service.action.networkConnectionAction.remoteIpDetails.ipAddressV4\n- service.action.networkConnectionAction.remoteIpDetails.organization.asn\n- service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg\n- service.action.networkConnectionAction.remotePortDetails.port\n- service.action.awsApiCallAction.remoteAccountDetails.affiliated\n- service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.kubernetesApiCallAction.requestUri\n- service.action.networkConnectionAction.localIpDetails.ipAddressV4\n- service.action.networkConnectionAction.protocol\n- service.action.awsApiCallAction.serviceName\n- service.action.awsApiCallAction.remoteAccountDetails.accountId\n- service.additionalInfo.threatListName\n- service.resourceRole\n- resource.eksClusterDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.namespace\n- resource.kubernetesDetails.kubernetesUserDetails.username\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix\n- service.ebsVolumeScanDetails.scanId\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash\n- resource.ecsClusterDetails.name\n- resource.ecsClusterDetails.taskDetails.containers.image\n- resource.ecsClusterDetails.taskDetails.definitionArn\n- resource.containerDetails.image\n- resource.rdsDbInstanceDetails.dbInstanceIdentifier\n- resource.rdsDbInstanceDetails.dbClusterIdentifier\n- resource.rdsDbInstanceDetails.engine\n- resource.rdsDbUserDetails.user\n- resource.rdsDbInstanceDetails.tags.key\n- resource.rdsDbInstanceDetails.tags.value\n- service.runtimeDetails.process.executableSha256\n- service.runtimeDetails.process.name\n- service.runtimeDetails.process.name\n- resource.lambdaDetails.functionName\n- resource.lambdaDetails.functionArn\n- resource.lambdaDetails.tags.key\n- resource.lambdaDetails.tags.value", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + } + }, "title": "Criterion", "type": "object" + } + }, + "type": "object" + }, + "AWS::GuardDuty::Filter.TagItem": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "", + "title": "Key", + "type": "string" }, - "ItemType": { - "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + "Value": { + "markdownDescription": "", + "title": "Value", + "type": "string" } }, + "required": [ + "Key", + "Value" + ], "type": "object" }, "AWS::GuardDuty::IPSet": { @@ -106205,9 +108547,7 @@ } }, "required": [ - "DetectorId", - "Email", - "MemberId" + "Email" ], "type": "object" }, @@ -108445,6 +110785,77 @@ }, "type": "object" }, + "AWS::IVS::Stage": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Stage name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::Stage" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -110953,7 +113364,8 @@ } }, "required": [ - "Name" + "Name", + "SemanticVersion" ], "type": "object" }, @@ -111295,23 +113707,113 @@ "Properties": { "additionalProperties": false, "properties": { - "ResourceGroupTags": { - "items": { - "$ref": "#/definitions/Tag" + "ResourceGroupTags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "title": "ResourceGroupTags", + "type": "array" + } + }, + "required": [ + "ResourceGroupTags" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Inspector::ResourceGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ScanName": { + "markdownDescription": "The name of the CIS scan configuration.", + "title": "ScanName", + "type": "string" + }, + "Schedule": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Schedule", + "markdownDescription": "The CIS scan configuration's schedule.", + "title": "Schedule" + }, + "SecurityLevel": { + "markdownDescription": "The CIS scan configuration's CIS Benchmark level.", + "title": "SecurityLevel", + "type": "string" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "The CIS scan configuration's tags.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } }, - "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", - "title": "ResourceGroupTags", - "type": "array" + "title": "Tags", + "type": "object" + }, + "Targets": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.CisTargets", + "markdownDescription": "The CIS scan configuration's targets.", + "title": "Targets" } }, - "required": [ - "ResourceGroupTags" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Inspector::ResourceGroup" + "AWS::InspectorV2::CisScanConfiguration" ], "type": "string" }, @@ -111325,8 +113827,132 @@ } }, "required": [ - "Type", - "Properties" + "Type" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.CisTargets": { + "additionalProperties": false, + "properties": { + "AccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The CIS target account ids.", + "title": "AccountIds", + "type": "array" + }, + "TargetResourceTags": { + "markdownDescription": "The CIS target resource tags.", + "title": "TargetResourceTags", + "type": "object" + } + }, + "required": [ + "AccountIds" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.DailySchedule": { + "additionalProperties": false, + "properties": { + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The schedule start time.", + "title": "StartTime" + } + }, + "required": [ + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.MonthlySchedule": { + "additionalProperties": false, + "properties": { + "Day": { + "markdownDescription": "The monthly schedule's day.", + "title": "Day", + "type": "string" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The monthly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Day", + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Schedule": { + "additionalProperties": false, + "properties": { + "Daily": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.DailySchedule", + "markdownDescription": "A daily schedule.", + "title": "Daily" + }, + "Monthly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.MonthlySchedule", + "markdownDescription": "A monthly schedule.", + "title": "Monthly" + }, + "OneTime": { + "markdownDescription": "A one time schedule.", + "title": "OneTime", + "type": "object" + }, + "Weekly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.WeeklySchedule", + "markdownDescription": "A weekly schedule.", + "title": "Weekly" + } + }, + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Time": { + "additionalProperties": false, + "properties": { + "TimeOfDay": { + "markdownDescription": "The time of day in 24-hour format (00:00).", + "title": "TimeOfDay", + "type": "string" + }, + "TimeZone": { + "markdownDescription": "The timezone.", + "title": "TimeZone", + "type": "string" + } + }, + "required": [ + "TimeOfDay", + "TimeZone" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.WeeklySchedule": { + "additionalProperties": false, + "properties": { + "Days": { + "items": { + "type": "string" + }, + "markdownDescription": "The weekly schedule's days.", + "title": "Days", + "type": "array" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The weekly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Days", + "StartTime" ], "type": "object" }, @@ -113210,6 +115836,11 @@ "title": "ServerCertificateArns", "type": "array" }, + "ServerCertificateConfig": { + "$ref": "#/definitions/AWS::IoT::DomainConfiguration.ServerCertificateConfig", + "markdownDescription": "The server certificate configuration.\n\nFor more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "ServerCertificateConfig" + }, "ServiceType": { "markdownDescription": "The type of service delivered by the endpoint.\n\n> AWS IoT Core currently supports only the `DATA` service type.", "title": "ServiceType", @@ -113272,6 +115903,17 @@ }, "type": "object" }, + "AWS::IoT::DomainConfiguration.ServerCertificateConfig": { + "additionalProperties": false, + "properties": { + "EnableOCSPCheck": { + "markdownDescription": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "EnableOCSPCheck", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::IoT::DomainConfiguration.ServerCertificateSummary": { "additionalProperties": false, "properties": { @@ -121224,7 +123866,7 @@ "title": "AccessPolicyIdentity" }, "AccessPolicyPermission": { - "markdownDescription": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "markdownDescription": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "title": "AccessPolicyPermission", "type": "string" }, @@ -121277,7 +123919,7 @@ }, "User": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.User", - "markdownDescription": "The IAM Identity Center user to which this access policy maps.", + "markdownDescription": "An IAM Identity Center user identity.", "title": "User" } }, @@ -121288,12 +123930,12 @@ "properties": { "Portal": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Portal", - "markdownDescription": "The AWS IoT SiteWise Monitor portal for this access policy.", + "markdownDescription": "Identifies an AWS IoT SiteWise Monitor portal.", "title": "Portal" }, "Project": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Project", - "markdownDescription": "The AWS IoT SiteWise Monitor project for this access policy.", + "markdownDescription": "Identifies a specific AWS IoT SiteWise Monitor project.", "title": "Project" } }, @@ -121347,7 +123989,7 @@ "additionalProperties": false, "properties": { "id": { - "markdownDescription": "The ID of the user.", + "markdownDescription": "The IAM Identity Center ID of the user.", "title": "id", "type": "string" } @@ -121390,15 +124032,18 @@ "additionalProperties": false, "properties": { "AssetDescription": { - "markdownDescription": "A description for the asset.", + "markdownDescription": "The ID of the asset, in UUID format.", "title": "AssetDescription", "type": "string" }, + "AssetExternalId": { + "type": "string" + }, "AssetHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::Asset.AssetHierarchy" }, - "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "title": "AssetHierarchies", "type": "array" }, @@ -121408,7 +124053,7 @@ "type": "string" }, "AssetName": { - "markdownDescription": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A friendly name for the asset.", "title": "AssetName", "type": "string" }, @@ -121464,15 +124109,20 @@ "title": "ChildAssetId", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .", "title": "LogicalId", "type": "string" } }, "required": [ - "ChildAssetId", - "LogicalId" + "ChildAssetId" ], "type": "object" }, @@ -121480,17 +124130,23 @@ "additionalProperties": false, "properties": { "Alias": { - "markdownDescription": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", + "markdownDescription": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", "title": "Alias", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset property.", "title": "LogicalId", "type": "string" }, "NotificationState": { - "markdownDescription": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "markdownDescription": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "title": "NotificationState", "type": "string" }, @@ -121500,9 +124156,6 @@ "type": "string" } }, - "required": [ - "LogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::AssetModel": { @@ -121544,7 +124197,7 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel" }, - "markdownDescription": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "markdownDescription": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelCompositeModels", "type": "array" }, @@ -121553,16 +124206,19 @@ "title": "AssetModelDescription", "type": "string" }, + "AssetModelExternalId": { + "type": "string" + }, "AssetModelHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelHierarchy" }, - "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelHierarchies", "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -121570,10 +124226,13 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" }, - "markdownDescription": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelProperties", "type": "array" }, + "AssetModelType": { + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -121612,6 +124271,9 @@ "AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel": { "additionalProperties": false, "properties": { + "ComposedAssetModelId": { + "type": "string" + }, "CompositeModelProperties": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" @@ -121625,11 +124287,26 @@ "title": "Description", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "Name": { "markdownDescription": "The name of the composite model.", "title": "Name", "type": "string" }, + "ParentAssetModelCompositeModelExternalId": { + "type": "string" + }, + "Path": { + "items": { + "type": "string" + }, + "type": "array" + }, "Type": { "markdownDescription": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` .", "title": "Type", @@ -121646,24 +124323,29 @@ "additionalProperties": false, "properties": { "ChildAssetModelId": { - "markdownDescription": "The Id of the asset model.", + "markdownDescription": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ChildAssetModelId", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation.", "title": "Name", "type": "string" } }, "required": [ "ChildAssetModelId", - "LogicalId", "Name" ], "type": "object" @@ -121672,7 +124354,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "markdownDescription": "The data type of the asset model property.", "title": "DataType", "type": "string" }, @@ -121681,19 +124363,25 @@ "title": "DataTypeSpec", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset model property.", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model property.", "title": "Name", "type": "string" }, "Type": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyType", - "markdownDescription": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "markdownDescription": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "title": "Type" }, "Unit": { @@ -121704,7 +124392,6 @@ }, "required": [ "DataType", - "LogicalId", "Name", "Type" ], @@ -121725,7 +124412,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "markdownDescription": "The friendly name of the variable to be used in the expression.", "title": "Name", "type": "string" }, @@ -121781,22 +124468,34 @@ }, "type": "object" }, + "AWS::IoTSiteWise::AssetModel.PropertyPathDefinition": { + "additionalProperties": false, + "properties": { + "Name": { + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::IoTSiteWise::AssetModel.PropertyType": { "additionalProperties": false, "properties": { "Attribute": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Attribute", - "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", + "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", "title": "Attribute" }, "Metric": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Metric", - "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", + "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", "title": "Metric" }, "Transform": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Transform", - "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "title": "Transform" }, "TypeName": { @@ -121855,20 +124554,35 @@ "AWS::IoTSiteWise::AssetModel.VariableValue": { "additionalProperties": false, "properties": { + "HierarchyExternalId": { + "type": "string" + }, + "HierarchyId": { + "type": "string" + }, "HierarchyLogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", "title": "HierarchyLogicalId", "type": "string" }, + "PropertyExternalId": { + "type": "string" + }, + "PropertyId": { + "type": "string" + }, "PropertyLogicalId": { - "markdownDescription": "The `LogicalID` of the property to use as the variable.", + "markdownDescription": "The `LogicalID` of the property that is being referenced.", "title": "PropertyLogicalId", "type": "string" + }, + "PropertyPath": { + "items": { + "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyPathDefinition" + }, + "type": "array" } }, - "required": [ - "PropertyLogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::Dashboard": { @@ -122007,7 +124721,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -122061,7 +124775,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", "title": "CapabilityNamespace", "type": "string" } @@ -122091,7 +124805,7 @@ "additionalProperties": false, "properties": { "GroupArn": { - "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* .", + "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* .", "title": "GroupArn", "type": "string" } @@ -122161,7 +124875,7 @@ "type": "string" }, "PortalAuthMode": { - "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "title": "PortalAuthMode", "type": "string" }, @@ -124848,6 +127562,11 @@ "title": "Name", "type": "string" }, + "Positioning": { + "markdownDescription": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", + "title": "Positioning", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -124934,6 +127653,41 @@ ], "type": "object" }, + "AWS::IoTWireless::WirelessDevice.Application": { + "additionalProperties": false, + "properties": { + "DestinationName": { + "markdownDescription": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "title": "DestinationName", + "type": "string" + }, + "FPort": { + "markdownDescription": "The name of the new destination for the device.", + "title": "FPort", + "type": "number" + }, + "Type": { + "markdownDescription": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::IoTWireless::WirelessDevice.FPorts": { + "additionalProperties": false, + "properties": { + "Applications": { + "items": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.Application" + }, + "markdownDescription": "LoRaWAN application configuration, which can be used to perform geolocation.", + "title": "Applications", + "type": "array" + } + }, + "type": "object" + }, "AWS::IoTWireless::WirelessDevice.LoRaWANDevice": { "additionalProperties": false, "properties": { @@ -124957,6 +127711,11 @@ "title": "DeviceProfileId", "type": "string" }, + "FPorts": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.FPorts", + "markdownDescription": "List of FPort assigned for different LoRaWAN application packages to use.", + "title": "FPorts" + }, "OtaaV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.OtaaV10x", "markdownDescription": "OTAA device object for create APIs for v1.0.x", @@ -127038,7 +129797,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -127046,7 +129805,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -130702,6 +133461,11 @@ "markdownDescription": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "title": "S3DestinationConfiguration" }, + "SnowflakeDestinationConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration", + "markdownDescription": "Configure Snowflake destination", + "title": "SnowflakeDestinationConfiguration" + }, "SplunkDestinationConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkDestinationConfiguration", "markdownDescription": "The configuration of a destination in Splunk for the delivery stream.", @@ -131253,6 +134017,11 @@ "title": "CompressionFormat", "type": "string" }, + "CustomTimeZone": { + "markdownDescription": "The time zone you prefer. UTC is the default.", + "title": "CustomTimeZone", + "type": "string" + }, "DataFormatConversionConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DataFormatConversionConfiguration", "markdownDescription": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", @@ -131273,6 +134042,11 @@ "title": "ErrorOutputPrefix", "type": "string" }, + "FileExtension": { + "markdownDescription": "Specify a file extension. It will override the default file extension", + "title": "FileExtension", + "type": "string" + }, "Prefix": { "markdownDescription": "The `YYYY/MM/DD/HH` time format prefix is automatically used for delivered Amazon S3 files. For more information, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "title": "Prefix", @@ -131895,6 +134669,153 @@ }, "type": "object" }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountUrl": { + "markdownDescription": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "title": "AccountUrl", + "type": "string" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", + "markdownDescription": "", + "title": "CloudWatchLoggingOptions" + }, + "ContentColumnName": { + "markdownDescription": "The name of the record content column", + "title": "ContentColumnName", + "type": "string" + }, + "DataLoadingOption": { + "markdownDescription": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", + "title": "DataLoadingOption", + "type": "string" + }, + "Database": { + "markdownDescription": "All data in Snowflake is maintained in databases.", + "title": "Database", + "type": "string" + }, + "KeyPassphrase": { + "markdownDescription": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "KeyPassphrase", + "type": "string" + }, + "MetaDataColumnName": { + "markdownDescription": "The name of the record metadata column", + "title": "MetaDataColumnName", + "type": "string" + }, + "PrivateKey": { + "markdownDescription": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "PrivateKey", + "type": "string" + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", + "markdownDescription": "", + "title": "ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions", + "markdownDescription": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "RetryOptions" + }, + "RoleARN": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Snowflake role", + "title": "RoleARN", + "type": "string" + }, + "S3BackupMode": { + "markdownDescription": "Choose an S3 backup mode", + "title": "S3BackupMode", + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.S3DestinationConfiguration", + "markdownDescription": "", + "title": "S3Configuration" + }, + "Schema": { + "markdownDescription": "Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views", + "title": "Schema", + "type": "string" + }, + "SnowflakeRoleConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration", + "markdownDescription": "Optionally configure a Snowflake role. Otherwise the default user role will be used.", + "title": "SnowflakeRoleConfiguration" + }, + "SnowflakeVpcConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration", + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "SnowflakeVpcConfiguration" + }, + "Table": { + "markdownDescription": "All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.", + "title": "Table", + "type": "string" + }, + "User": { + "markdownDescription": "User login name for the Snowflake account.", + "title": "User", + "type": "string" + } + }, + "required": [ + "AccountUrl", + "Database", + "PrivateKey", + "RoleARN", + "S3Configuration", + "Schema", + "Table", + "User" + ], + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions": { + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "markdownDescription": "the time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "DurationInSeconds", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Enable Snowflake role", + "title": "Enabled", + "type": "boolean" + }, + "SnowflakeRole": { + "markdownDescription": "The Snowflake role you wish to configure", + "title": "SnowflakeRole", + "type": "string" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration": { + "additionalProperties": false, + "properties": { + "PrivateLinkVpceId": { + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "PrivateLinkVpceId", + "type": "string" + } + }, + "required": [ + "PrivateLinkVpceId" + ], + "type": "object" + }, "AWS::KinesisFirehose::DeliveryStream.SplunkBufferingHints": { "additionalProperties": false, "properties": { @@ -133149,6 +136070,11 @@ "Properties": { "additionalProperties": false, "properties": { + "HybridAccessEnabled": { + "markdownDescription": "Indicates whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.", + "title": "HybridAccessEnabled", + "type": "boolean" + }, "ResourceArn": { "markdownDescription": "The Amazon Resource Name (ARN) of the resource.", "title": "ResourceArn", @@ -133535,7 +136461,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -133955,7 +136881,7 @@ "title": "FilterCriteria" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -134903,7 +137829,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -135143,7 +138069,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -140463,6 +143389,14 @@ "AWS::Location::Map.MapConfiguration": { "additionalProperties": false, "properties": { + "CustomLayers": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as the `POI` layer for the VectorEsriNavigation style.\n\n> Currenlty only `VectorEsriNavigation` supports CustomLayers. For more information, see [Custom Layers](https://docs.aws.amazon.com//location/latest/developerguide/map-concepts.html#map-custom-layers) .", + "title": "CustomLayers", + "type": "array" + }, "PoliticalView": { "markdownDescription": "Specifies the map political view selected from an available data provider.", "title": "PoliticalView", @@ -154697,6 +157631,10 @@ "type": "array" } }, + "required": [ + "ChannelGroupName", + "ChannelName" + ], "type": "object" }, "Type": { @@ -154715,7 +157653,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154789,6 +157728,9 @@ "type": "array" } }, + "required": [ + "ChannelGroupName" + ], "type": "object" }, "Type": { @@ -154807,7 +157749,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154863,6 +157806,8 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", "Policy" ], "type": "object" @@ -154984,7 +157929,9 @@ } }, "required": [ - "ContainerType" + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" ], "type": "object" }, @@ -155353,6 +158300,9 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", "Policy" ], "type": "object" @@ -186245,6 +189195,14 @@ "markdownDescription": "", "title": "Definition" }, + "LinkEntities": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", + "title": "LinkEntities", + "type": "array" + }, "LinkSharingConfiguration": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.LinkSharingConfiguration", "markdownDescription": "A structure that contains the link sharing configurations that you want to apply overrides to.", @@ -214744,7 +217702,7 @@ "type": "boolean" }, "EnableHttpEndpoint": { - "markdownDescription": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "markdownDescription": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EnableHttpEndpoint", "type": "boolean" }, @@ -214899,7 +217857,7 @@ "type": "boolean" }, "StorageType": { - "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "title": "StorageType", "type": "string" }, @@ -215544,7 +218502,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "markdownDescription": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "title": "StorageType", "type": "string" }, @@ -216689,6 +219647,108 @@ ], "type": "object" }, + "AWS::RDS::Integration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdditionalEncryptionContext": { + "additionalProperties": true, + "markdownDescription": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "AdditionalEncryptionContext", + "type": "object" + }, + "IntegrationName": { + "markdownDescription": "The name of the integration.", + "title": "IntegrationName", + "type": "string" + }, + "KMSKeyId": { + "markdownDescription": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", + "title": "KMSKeyId", + "type": "string" + }, + "SourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", + "title": "SourceArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "title": "Tags", + "type": "array" + }, + "TargetArn": { + "markdownDescription": "The ARN of the Redshift data warehouse to use as the target for replication.", + "title": "TargetArn", + "type": "string" + } + }, + "required": [ + "SourceArn", + "TargetArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::RDS::Integration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::RDS::OptionGroup": { "additionalProperties": false, "properties": { @@ -217345,7 +220405,7 @@ "type": "string" }, "Port": { - "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "title": "Port", "type": "number" }, @@ -218223,191 +221283,196 @@ "Properties": { "additionalProperties": false, "properties": { - "Enable": { - "markdownDescription": "If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about `state` of the scheduled action, see `ScheduledAction` .", - "title": "Enable", - "type": "boolean" - }, - "EndTime": { - "markdownDescription": "The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.", - "title": "EndTime", - "type": "string" - }, - "IamRole": { - "markdownDescription": "The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see [Using Identity-Based Policies for Amazon Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html) in the *Amazon Redshift Cluster Management Guide* .", - "title": "IamRole", - "type": "string" - }, - "Schedule": { - "markdownDescription": "The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.\n\nFormat of at expressions is \" `at(yyyy-mm-ddThh:mm:ss)` \". For example, \" `at(2016-03-04T17:27:00)` \".\n\nFormat of cron expressions is \" `cron(Minutes Hours Day-of-month Month Day-of-week Year)` \". For example, \" `cron(0 10 ? * MON *)` \". For more information, see [Cron Expressions](https://docs.aws.amazon.com//AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch Events User Guide* .", - "title": "Schedule", - "type": "string" - }, - "ScheduledActionDescription": { - "markdownDescription": "The description of the scheduled action.", - "title": "ScheduledActionDescription", - "type": "string" - }, - "ScheduledActionName": { - "markdownDescription": "The name of the scheduled action.", - "title": "ScheduledActionName", - "type": "string" - }, - "StartTime": { - "markdownDescription": "The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.", - "title": "StartTime", - "type": "string" - }, - "TargetAction": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ScheduledActionType", - "markdownDescription": "A JSON format string of the Amazon Redshift API operation with input parameters.\n\n\" `{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}` \".", - "title": "TargetAction" - } - }, - "required": [ - "ScheduledActionName" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::Redshift::ScheduledAction" - ], - "type": "string" - }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - } - }, - "required": [ - "Type", - "Properties" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.PauseClusterMessage": { - "additionalProperties": false, - "properties": { - "ClusterIdentifier": { - "markdownDescription": "The identifier of the cluster to be paused.", - "title": "ClusterIdentifier", - "type": "string" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ResizeClusterMessage": { - "additionalProperties": false, - "properties": { - "Classic": { - "markdownDescription": "A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to `false` , the resize type is elastic.", - "title": "Classic", - "type": "boolean" - }, - "ClusterIdentifier": { - "markdownDescription": "The unique identifier for the cluster to resize.", - "title": "ClusterIdentifier", - "type": "string" - }, - "ClusterType": { - "markdownDescription": "The new cluster type for the specified cluster.", - "title": "ClusterType", - "type": "string" - }, - "NodeType": { - "markdownDescription": "The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.", - "title": "NodeType", - "type": "string" - }, - "NumberOfNodes": { - "markdownDescription": "The new number of nodes for the cluster. If not specified, the cluster's current number of nodes is used.", - "title": "NumberOfNodes", - "type": "number" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ResumeClusterMessage": { - "additionalProperties": false, - "properties": { - "ClusterIdentifier": { - "markdownDescription": "The identifier of the cluster to be resumed.", - "title": "ClusterIdentifier", - "type": "string" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ScheduledActionType": { - "additionalProperties": false, - "properties": { - "PauseCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.PauseClusterMessage", - "markdownDescription": "An action that runs a `PauseCluster` API operation.", - "title": "PauseCluster" - }, - "ResizeCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResizeClusterMessage", - "markdownDescription": "An action that runs a `ResizeCluster` API operation.", - "title": "ResizeCluster" - }, - "ResumeCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResumeClusterMessage", - "markdownDescription": "An action that runs a `ResumeCluster` API operation.", - "title": "ResumeCluster" - } - }, - "type": "object" - }, - "AWS::RedshiftServerless::Namespace": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", + "Enable": { + "markdownDescription": "If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about `state` of the scheduled action, see `ScheduledAction` .", + "title": "Enable", + "type": "boolean" + }, + "EndTime": { + "markdownDescription": "The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.", + "title": "EndTime", + "type": "string" + }, + "IamRole": { + "markdownDescription": "The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see [Using Identity-Based Policies for Amazon Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html) in the *Amazon Redshift Cluster Management Guide* .", + "title": "IamRole", + "type": "string" + }, + "Schedule": { + "markdownDescription": "The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.\n\nFormat of at expressions is \" `at(yyyy-mm-ddThh:mm:ss)` \". For example, \" `at(2016-03-04T17:27:00)` \".\n\nFormat of cron expressions is \" `cron(Minutes Hours Day-of-month Month Day-of-week Year)` \". For example, \" `cron(0 10 ? * MON *)` \". For more information, see [Cron Expressions](https://docs.aws.amazon.com//AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch Events User Guide* .", + "title": "Schedule", + "type": "string" + }, + "ScheduledActionDescription": { + "markdownDescription": "The description of the scheduled action.", + "title": "ScheduledActionDescription", + "type": "string" + }, + "ScheduledActionName": { + "markdownDescription": "The name of the scheduled action.", + "title": "ScheduledActionName", + "type": "string" + }, + "StartTime": { + "markdownDescription": "The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.", + "title": "StartTime", + "type": "string" + }, + "TargetAction": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ScheduledActionType", + "markdownDescription": "A JSON format string of the Amazon Redshift API operation with input parameters.\n\n\" `{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}` \".", + "title": "TargetAction" + } + }, + "required": [ + "ScheduledActionName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Redshift::ScheduledAction" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.PauseClusterMessage": { + "additionalProperties": false, + "properties": { + "ClusterIdentifier": { + "markdownDescription": "The identifier of the cluster to be paused.", + "title": "ClusterIdentifier", + "type": "string" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ResizeClusterMessage": { + "additionalProperties": false, + "properties": { + "Classic": { + "markdownDescription": "A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to `false` , the resize type is elastic.", + "title": "Classic", + "type": "boolean" + }, + "ClusterIdentifier": { + "markdownDescription": "The unique identifier for the cluster to resize.", + "title": "ClusterIdentifier", + "type": "string" + }, + "ClusterType": { + "markdownDescription": "The new cluster type for the specified cluster.", + "title": "ClusterType", + "type": "string" + }, + "NodeType": { + "markdownDescription": "The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.", + "title": "NodeType", + "type": "string" + }, + "NumberOfNodes": { + "markdownDescription": "The new number of nodes for the cluster. If not specified, the cluster's current number of nodes is used.", + "title": "NumberOfNodes", + "type": "number" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ResumeClusterMessage": { + "additionalProperties": false, + "properties": { + "ClusterIdentifier": { + "markdownDescription": "The identifier of the cluster to be resumed.", + "title": "ClusterIdentifier", + "type": "string" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ScheduledActionType": { + "additionalProperties": false, + "properties": { + "PauseCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.PauseClusterMessage", + "markdownDescription": "An action that runs a `PauseCluster` API operation.", + "title": "PauseCluster" + }, + "ResizeCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResizeClusterMessage", + "markdownDescription": "An action that runs a `ResizeCluster` API operation.", + "title": "ResizeCluster" + }, + "ResumeCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResumeClusterMessage", + "markdownDescription": "An action that runs a `ResumeCluster` API operation.", + "title": "ResumeCluster" + } + }, + "type": "object" + }, + "AWS::RedshiftServerless::Namespace": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret. You can only use this parameter if `ManageAdminPassword` is `true` .", + "title": "AdminPasswordSecretKmsKeyId", "type": "string" }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { "AdminUserPassword": { "markdownDescription": "The password of the administrator for the primary database created in the namespace.", "title": "AdminUserPassword", @@ -218459,11 +221524,26 @@ "title": "LogExports", "type": "array" }, + "ManageAdminPassword": { + "markdownDescription": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials. You can't use `AdminUserPassword` if `ManageAdminPassword` is true. If `ManageAdminPassword` is `false` or not set, Amazon Redshift uses `AdminUserPassword` for the admin user account's password.", + "title": "ManageAdminPassword", + "type": "boolean" + }, "NamespaceName": { "markdownDescription": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "title": "NamespaceName", "type": "string" }, + "NamespaceResourcePolicy": { + "markdownDescription": "The resource policy that will be attached to the namespace.", + "title": "NamespaceResourcePolicy", + "type": "object" + }, + "RedshiftIdcApplicationArn": { + "markdownDescription": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "title": "RedshiftIdcApplicationArn", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -218502,6 +221582,16 @@ "AWS::RedshiftServerless::Namespace.Namespace": { "additionalProperties": false, "properties": { + "AdminPasswordSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the namespace's admin user credentials secret.", + "title": "AdminPasswordSecretArn", + "type": "string" + }, + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUsername": { "markdownDescription": "The username of the administrator for the first database created in the namespace.", "title": "AdminUsername", @@ -218619,6 +221709,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -218811,6 +221906,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -222315,6 +225415,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSet.GeoProximityLocation", + "markdownDescription": "*GeoproximityLocation resource record sets only:* A complex type that lets you control how Route\u00a053 responds to DNS queries based on the geographic origin of the query and your resources.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -222446,6 +225551,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSet.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSet.GeoLocation": { "additionalProperties": false, "properties": { @@ -222467,6 +225592,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSet.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSet.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup": { "additionalProperties": false, "properties": { @@ -222593,6 +225744,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSetGroup.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSetGroup.GeoLocation": { "additionalProperties": false, "properties": { @@ -222614,6 +225785,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSetGroup.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup.RecordSet": { "additionalProperties": false, "properties": { @@ -222637,6 +225834,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.GeoProximityLocation", + "markdownDescription": "A complex type that contains information about a geographic location.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -223754,6 +226956,11 @@ "markdownDescription": "The priority of the rule in the rule group. This value must be unique within the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting.", "title": "Priority", "type": "number" + }, + "Qtype": { + "markdownDescription": "The DNS query type you want the rule to evaluate. Allowed values are;\n\n- A: Returns an IPv4 address.\n- AAAA: Returns an Ipv6 address.\n- CAA: Restricts CAs that can create SSL/TLS certifications for the domain.\n- CNAME: Returns another domain name.\n- DS: Record that identifies the DNSSEC signing key of a delegated zone.\n- MX: Specifies mail servers.\n- NAPTR: Regular-expression-based rewriting of domain names.\n- NS: Authoritative name servers.\n- PTR: Maps an IP address to a domain name.\n- SOA: Start of authority record for the zone.\n- SPF: Lists the servers authorized to send emails from a domain.\n- SRV: Application specific values that identify servers.\n- TXT: Verifies email senders and application-specific values.\n- A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPE NUMBER , where the NUMBER can be 1-65334, for example, TYPE28. For more information, see [List of DNS record types](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/List_of_DNS_record_types) .", + "title": "Qtype", + "type": "string" } }, "required": [ @@ -231848,7 +235055,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "title": "RejectedPatchesAction", "type": "string" }, @@ -233913,6 +237120,11 @@ "title": "AppImageConfigName", "type": "string" }, + "JupyterLabAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "JupyterLabAppImageConfig" + }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", @@ -233953,6 +237165,56 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.ContainerConfig": { + "additionalProperties": false, + "properties": { + "ContainerArguments": { + "items": { + "type": "string" + }, + "markdownDescription": "The arguments for the container when you're running the application.", + "title": "ContainerArguments", + "type": "array" + }, + "ContainerEntrypoint": { + "items": { + "type": "string" + }, + "markdownDescription": "The entrypoint used to run the application in the container.", + "title": "ContainerEntrypoint", + "type": "array" + }, + "ContainerEnvironmentVariables": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable" + }, + "markdownDescription": "The environment variables to set in the container", + "title": "ContainerEnvironmentVariables", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that identifies a container environment variable.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the container environment variable.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, "AWS::SageMaker::AppImageConfig.FileSystemConfig": { "additionalProperties": false, "properties": { @@ -233974,6 +237236,17 @@ }, "type": "object" }, + "AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "The configuration used to run the application image container.", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig": { "additionalProperties": false, "properties": { @@ -235133,9 +238406,33 @@ }, "type": "object" }, + "AWS::SageMaker::Domain.DockerSettings": { + "additionalProperties": false, + "properties": { + "EnableDockerAccess": { + "markdownDescription": "Indicates whether the domain can access Docker.", + "title": "EnableDockerAccess", + "type": "string" + }, + "VpcOnlyTrustedAccounts": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of AWS accounts that are trusted when the domain is created in VPC-only mode.", + "title": "VpcOnlyTrustedAccounts", + "type": "array" + } + }, + "type": "object" + }, "AWS::SageMaker::Domain.DomainSettings": { "additionalProperties": false, "properties": { + "DockerSettings": { + "$ref": "#/definitions/AWS::SageMaker::Domain.DockerSettings", + "markdownDescription": "A collection of settings that configure the domain's Docker interaction.", + "title": "DockerSettings" + }, "RStudioServerProDomainSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.RStudioServerProDomainSettings", "markdownDescription": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", @@ -236279,7 +239576,7 @@ "type": "number" }, "ProvisionedConcurrency": { - "markdownDescription": "", + "markdownDescription": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) .", "title": "ProvisionedConcurrency", "type": "number" } @@ -236525,6 +239822,11 @@ "markdownDescription": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval.", "title": "StorageType", "type": "string" + }, + "TtlDuration": { + "$ref": "#/definitions/AWS::SageMaker::FeatureGroup.TtlDuration", + "markdownDescription": "Time to live duration, where the record is hard deleted after the expiration time is reached; `ExpiresAt` = `EventTime` + `TtlDuration` . For information on HardDelete, see the [DeleteRecord](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_feature_store_DeleteRecord.html) API in the Amazon SageMaker API Reference guide.", + "title": "TtlDuration" } }, "type": "object" @@ -236583,6 +239885,22 @@ ], "type": "object" }, + "AWS::SageMaker::FeatureGroup.TtlDuration": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "`TtlDuration` time unit.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "`TtlDuration` time value.", + "title": "Value", + "type": "number" + } + }, + "type": "object" + }, "AWS::SageMaker::Image": { "additionalProperties": false, "properties": { @@ -242348,6 +245666,16 @@ "title": "DomainId", "type": "string" }, + "OwnershipSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.OwnershipSettings", + "markdownDescription": "The collection of ownership settings for a space.", + "title": "OwnershipSettings" + }, + "SpaceDisplayName": { + "markdownDescription": "The name of the space that appears in the Studio UI.", + "title": "SpaceDisplayName", + "type": "string" + }, "SpaceName": { "markdownDescription": "The name of the space.", "title": "SpaceName", @@ -242358,6 +245686,11 @@ "markdownDescription": "A collection of space settings.", "title": "SpaceSettings" }, + "SpaceSharingSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceSharingSettings", + "markdownDescription": "A collection of space sharing settings.", + "title": "SpaceSharingSettings" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -242394,6 +245727,31 @@ ], "type": "object" }, + "AWS::SageMaker::Space.CodeRepository": { + "additionalProperties": false, + "properties": { + "RepositoryUrl": { + "markdownDescription": "The URL of the Git repository.", + "title": "RepositoryUrl", + "type": "string" + } + }, + "required": [ + "RepositoryUrl" + ], + "type": "object" + }, + "AWS::SageMaker::Space.CustomFileSystem": { + "additionalProperties": false, + "properties": { + "EFSFileSystem": { + "$ref": "#/definitions/AWS::SageMaker::Space.EFSFileSystem", + "markdownDescription": "A custom file system in Amazon EFS.", + "title": "EFSFileSystem" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.CustomImage": { "additionalProperties": false, "properties": { @@ -242419,6 +245777,34 @@ ], "type": "object" }, + "AWS::SageMaker::Space.EFSFileSystem": { + "additionalProperties": false, + "properties": { + "FileSystemId": { + "markdownDescription": "The ID of your Amazon EFS file system.", + "title": "FileSystemId", + "type": "string" + } + }, + "required": [ + "FileSystemId" + ], + "type": "object" + }, + "AWS::SageMaker::Space.EbsStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsVolumeSizeInGb": { + "markdownDescription": "The size of an EBS storage volume for a private space.", + "title": "EbsVolumeSizeInGb", + "type": "number" + } + }, + "required": [ + "EbsVolumeSizeInGb" + ], + "type": "object" + }, "AWS::SageMaker::Space.JupyterServerAppSettings": { "additionalProperties": false, "properties": { @@ -242449,6 +245835,20 @@ }, "type": "object" }, + "AWS::SageMaker::Space.OwnershipSettings": { + "additionalProperties": false, + "properties": { + "OwnerUserProfileName": { + "markdownDescription": "The user profile who is the owner of the private space.", + "title": "OwnerUserProfileName", + "type": "string" + } + }, + "required": [ + "OwnerUserProfileName" + ], + "type": "object" + }, "AWS::SageMaker::Space.ResourceSpec": { "additionalProperties": false, "properties": { @@ -242470,9 +245870,62 @@ }, "type": "object" }, + "AWS::SageMaker::Space.SpaceCodeEditorAppSettings": { + "additionalProperties": false, + "properties": { + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceJupyterLabAppSettings": { + "additionalProperties": false, + "properties": { + "CodeRepositories": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CodeRepository" + }, + "markdownDescription": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", + "title": "CodeRepositories", + "type": "array" + }, + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.SpaceSettings": { "additionalProperties": false, "properties": { + "AppType": { + "markdownDescription": "The type of app created within the space.", + "title": "AppType", + "type": "string" + }, + "CodeEditorAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceCodeEditorAppSettings", + "markdownDescription": "The Code Editor application settings.", + "title": "CodeEditorAppSettings" + }, + "CustomFileSystems": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" + }, + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "title": "CustomFileSystems", + "type": "array" + }, + "JupyterLabAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceJupyterLabAppSettings", + "markdownDescription": "The settings for the JupyterLab application.", + "title": "JupyterLabAppSettings" + }, "JupyterServerAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Space.JupyterServerAppSettings", "markdownDescription": "The JupyterServer app settings.", @@ -242482,6 +245935,36 @@ "$ref": "#/definitions/AWS::SageMaker::Space.KernelGatewayAppSettings", "markdownDescription": "The KernelGateway app settings.", "title": "KernelGatewayAppSettings" + }, + "SpaceStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceStorageSettings", + "markdownDescription": "The storage settings for a private space.", + "title": "SpaceStorageSettings" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceSharingSettings": { + "additionalProperties": false, + "properties": { + "SharingType": { + "markdownDescription": "Specifies the sharing type of the space.", + "title": "SharingType", + "type": "string" + } + }, + "required": [ + "SharingType" + ], + "type": "object" + }, + "AWS::SageMaker::Space.SpaceStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.EbsStorageSettings", + "markdownDescription": "A collection of EBS storage settings for a private space.", + "title": "EbsStorageSettings" } }, "type": "object" @@ -247600,7 +251083,7 @@ "type": "array" }, "RoleArn": { - "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", + "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", "title": "RoleArn", "type": "string" } @@ -247897,7 +251380,7 @@ "additionalProperties": false, "properties": { "Aggregation": { - "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "title": "Aggregation", "type": "string" }, @@ -251585,7 +255068,8 @@ } }, "required": [ - "Configuration" + "Configuration", + "PolicyStoreId" ], "type": "object" }, @@ -251981,6 +255465,7 @@ } }, "required": [ + "PolicyStoreId", "Statement" ], "type": "object" @@ -255580,9 +259065,6 @@ "AWS::WAFv2::LoggingConfiguration.FieldToMatch": { "additionalProperties": false, "properties": { - "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" - }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "title": "Method", @@ -255635,25 +259117,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.JsonBody": { - "additionalProperties": false, - "properties": { - "InvalidFallbackBehavior": { - "type": "string" - }, - "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" - }, - "MatchScope": { - "type": "string" - } - }, - "required": [ - "MatchPattern", - "MatchScope" - ], - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.LabelNameCondition": { "additionalProperties": false, "properties": { @@ -255691,21 +259154,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.MatchPattern": { - "additionalProperties": false, - "properties": { - "All": { - "type": "object" - }, - "IncludedPaths": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.SingleHeader": { "additionalProperties": false, "properties": { @@ -255989,7 +259437,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256237,7 +259685,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -256250,9 +259698,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -256434,6 +259887,20 @@ ], "type": "object" }, + "AWS::WAFv2::RuleGroup.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::RuleGroup.JsonBody": { "additionalProperties": false, "properties": { @@ -256453,7 +259920,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256575,6 +260042,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -257055,7 +260527,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -257177,7 +260649,7 @@ "properties": { "AssociationConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.AssociationConfig", - "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "title": "AssociationConfig" }, "CaptchaConfig": { @@ -257396,7 +260868,7 @@ "properties": { "RequestBody": { "additionalProperties": false, - "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "patternProperties": { "^[a-zA-Z0-9]+$": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RequestBodyAssociatedResourceTypeConfig" @@ -257423,7 +260895,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -257715,7 +261187,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -257728,9 +261200,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::WebACL.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::WebACL.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -257912,6 +261389,20 @@ ], "type": "object" }, + "AWS::WAFv2::WebACL.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::WebACL.JsonBody": { "additionalProperties": false, "properties": { @@ -257931,7 +261422,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -258153,6 +261644,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -258405,7 +261901,7 @@ "additionalProperties": false, "properties": { "DefaultSizeInspectionLimit": { - "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", + "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", "title": "DefaultSizeInspectionLimit", "type": "string" } @@ -258925,7 +262421,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -269487,6 +272983,9 @@ { "$ref": "#/definitions/AWS::CodeArtifact::Repository" }, + { + "$ref": "#/definitions/AWS::CodeBuild::Fleet" + }, { "$ref": "#/definitions/AWS::CodeBuild::Project" }, @@ -269688,6 +273187,9 @@ { "$ref": "#/definitions/AWS::ConnectCampaigns::Campaign" }, + { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline" + }, { "$ref": "#/definitions/AWS::ControlTower::EnabledControl" }, @@ -269814,6 +273316,27 @@ { "$ref": "#/definitions/AWS::DataSync::Task" }, + { + "$ref": "#/definitions/AWS::DataZone::DataSource" + }, + { + "$ref": "#/definitions/AWS::DataZone::Domain" + }, + { + "$ref": "#/definitions/AWS::DataZone::Environment" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile" + }, + { + "$ref": "#/definitions/AWS::DataZone::Project" + }, + { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -270510,6 +274033,9 @@ { "$ref": "#/definitions/AWS::Glue::Table" }, + { + "$ref": "#/definitions/AWS::Glue::TableOptimizer" + }, { "$ref": "#/definitions/AWS::Glue::Trigger" }, @@ -270663,6 +274189,9 @@ { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, + { + "$ref": "#/definitions/AWS::IVS::Stage" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -270714,6 +274243,9 @@ { "$ref": "#/definitions/AWS::Inspector::ResourceGroup" }, + { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration" + }, { "$ref": "#/definitions/AWS::InspectorV2::Filter" }, @@ -271701,6 +275233,9 @@ { "$ref": "#/definitions/AWS::RDS::GlobalCluster" }, + { + "$ref": "#/definitions/AWS::RDS::Integration" + }, { "$ref": "#/definitions/AWS::RDS::OptionGroup" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 64000d8da..3c5422d06 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -362,7 +362,7 @@ "CustomRules": "The custom rewrite and redirect rules for an Amplify app.", "Description": "The description of the Amplify app.", "EnableBranchAutoDeletion": "Automatically disconnect a branch in Amplify Hosting when you delete a branch from your Git repository.", - "EnvironmentVariables": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "EnvironmentVariables": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "IAMServiceRole": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", "Name": "The name of the Amplify app.", "OauthToken": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", @@ -378,7 +378,7 @@ "EnableAutoBuild": "Enables auto building for the auto created branch.", "EnablePerformanceMode": "Enables performance mode for the branch.\n\nPerformance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.", "EnablePullRequestPreview": "Sets whether pull request previews are enabled for each branch that Amplify Hosting automatically creates for your app. Amplify creates previews by deploying your app to a unique URL whenever a pull request is opened for the branch. Development and QA teams can use this preview to test the pull request before it's merged into a production or integration branch.\n\nTo provide backend support for your preview, Amplify Hosting automatically provisions a temporary backend environment that it deletes when the pull request is closed. If you want to specify a dedicated backend environment for your previews, use the `PullRequestEnvironmentName` property.\n\nFor more information, see [Web Previews](https://docs.aws.amazon.com/amplify/latest/userguide/pr-previews.html) in the *AWS Amplify Hosting User Guide* .", - "EnvironmentVariables": "Environment variables for the auto created branch.", + "EnvironmentVariables": "The environment variables for the autocreated branch.", "Framework": "The framework for the autocreated branch.", "PullRequestEnvironmentName": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "Stage": "Stage for the auto created branch." @@ -395,8 +395,8 @@ "Target": "The target pattern for a URL rewrite or redirect rule." }, "AWS::Amplify::App EnvironmentVariable": { - "Name": "", - "Value": "" + "Name": "The environment variable name.", + "Value": "The environment variable value." }, "AWS::Amplify::App Tag": { "Key": "Specifies the key for the tag.", @@ -1327,6 +1327,7 @@ }, "AWS::AppConfig::Extension Parameter": { "Description": "Information about the parameter.", + "Dynamic": "Indicates whether this parameter's value can be supplied at the extension's action point instead of during extension association. Dynamic parameters can't be marked `Required` .", "Required": "A parameter value must be specified in the extension association." }, "AWS::AppConfig::Extension Tag": { @@ -3196,7 +3197,7 @@ "AWS::ApplicationAutoScaling::ScalableTarget": { "MaxCapacity": "The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand.", "MinCapacity": "The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand.", - "ResourceId": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "ResourceId": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "RoleARN": "Specify the Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that allows Application Auto Scaling to modify the scalable target on your behalf. This can be either an IAM service role that Application Auto Scaling can assume to make calls to other AWS resources on your behalf, or a service-linked role for the specified service. For more information, see [How Application Auto Scaling works with IAM](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html) in the *Application Auto Scaling User Guide* .\n\nTo automatically create a service-linked role (recommended), specify the full ARN of the service-linked role in your stack template. To find the exact ARN of the service-linked role for your AWS or custom resource, see the [Service-linked roles](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html) topic in the *Application Auto Scaling User Guide* . Look for the ARN in the table at the bottom of the page.", "ScalableDimension": "The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.\n\n- `ecs:service:DesiredCount` - The desired task count of an ECS service.\n- `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.\n- `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot Fleet.\n- `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.\n- `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.\n- `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.\n- `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.\n- `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.\n- `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.\n- `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for a SageMaker model endpoint variant.\n- `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.\n- `comprehend:document-classifier-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend document classification endpoint.\n- `comprehend:entity-recognizer-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend entity recognizer endpoint.\n- `lambda:function:ProvisionedConcurrency` - The provisioned concurrency for a Lambda function.\n- `cassandra:table:ReadCapacityUnits` - The provisioned read capacity for an Amazon Keyspaces table.\n- `cassandra:table:WriteCapacityUnits` - The provisioned write capacity for an Amazon Keyspaces table.\n- `kafka:broker-storage:VolumeSize` - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.\n- `elasticache:replication-group:NodeGroups` - The number of node groups for an Amazon ElastiCache replication group.\n- `elasticache:replication-group:Replicas` - The number of replicas per node group for an Amazon ElastiCache replication group.\n- `neptune:cluster:ReadReplicaCount` - The count of read replicas in an Amazon Neptune DB cluster.\n- `sagemaker:variant:DesiredProvisionedConcurrency` - The provisioned concurrency for a SageMaker serverless endpoint.\n- `sagemaker:inference-component:DesiredCopyCount` - The number of copies across an endpoint for a SageMaker inference component.", "ScheduledActions": "The scheduled actions for the scalable target. Duplicates aren't allowed.", @@ -3223,7 +3224,7 @@ "AWS::ApplicationAutoScaling::ScalingPolicy": { "PolicyName": "The name of the scaling policy.\n\nUpdates to the name of a target tracking scaling policy are not supported, unless you also update the metric used for scaling. To change only a target tracking scaling policy's name, first delete the policy by removing the existing `AWS::ApplicationAutoScaling::ScalingPolicy` resource from the template and updating the stack. Then, recreate the resource with the same settings and a different name.", "PolicyType": "The scaling policy type.\n\nThe following policy types are supported:\n\n`TargetTrackingScaling` \u2014Not supported for Amazon EMR\n\n`StepScaling` \u2014Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune.", - "ResourceId": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "ResourceId": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "ScalableDimension": "The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n\n- `ecs:service:DesiredCount` - The desired task count of an ECS service.\n- `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.\n- `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot Fleet.\n- `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.\n- `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.\n- `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.\n- `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.\n- `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.\n- `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.\n- `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for a SageMaker model endpoint variant.\n- `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.\n- `comprehend:document-classifier-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend document classification endpoint.\n- `comprehend:entity-recognizer-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend entity recognizer endpoint.\n- `lambda:function:ProvisionedConcurrency` - The provisioned concurrency for a Lambda function.\n- `cassandra:table:ReadCapacityUnits` - The provisioned read capacity for an Amazon Keyspaces table.\n- `cassandra:table:WriteCapacityUnits` - The provisioned write capacity for an Amazon Keyspaces table.\n- `kafka:broker-storage:VolumeSize` - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.\n- `elasticache:replication-group:NodeGroups` - The number of node groups for an Amazon ElastiCache replication group.\n- `elasticache:replication-group:Replicas` - The number of replicas per node group for an Amazon ElastiCache replication group.\n- `neptune:cluster:ReadReplicaCount` - The count of read replicas in an Amazon Neptune DB cluster.\n- `sagemaker:variant:DesiredProvisionedConcurrency` - The provisioned concurrency for a SageMaker serverless endpoint.\n- `sagemaker:inference-component:DesiredCopyCount` - The number of copies across an endpoint for a SageMaker inference component.", "ScalingTargetId": "The CloudFormation-generated ID of an Application Auto Scaling scalable target. For more information about the ID, see the Return Value section of the `AWS::ApplicationAutoScaling::ScalableTarget` resource.\n\n> You must specify either the `ScalingTargetId` property, or the `ResourceId` , `ScalableDimension` , and `ServiceNamespace` properties, but not both.", "ServiceNamespace": "The namespace of the AWS service that provides the resource, or a `custom-resource` .", @@ -3288,6 +3289,7 @@ "TargetValue": "The target value for the metric. Although this property accepts numbers of type Double, it won't accept values that are either too small or too large. Values must be in the range of -2^360 to 2^360. The value must be a valid number based on the choice of metric. For example, if the metric is CPU utilization, then the target value is a percent value that represents how much of the CPU can be used before scaling out." }, "AWS::ApplicationInsights::Application": { + "AttachMissingPermission": "If set to true, the managed policies for SSM and CW will be attached to the instance roles if they are missing.", "AutoConfigurationEnabled": "If set to `true` , the application components will be configured with the monitoring configuration recommended by Application Insights.", "CWEMonitorEnabled": "Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as `instance terminated` , `failed deployment` , and others.", "ComponentMonitoringSettings": "The monitoring settings of the components.", @@ -3325,6 +3327,9 @@ "HANAPrometheusExporter": "The HANA DB Prometheus Exporter settings.", "JMXPrometheusExporter": "A list of Java metrics to monitor for the component.", "Logs": "A list of logs to monitor for the component. Only Amazon EC2 instances can use `Logs` .", + "NetWeaverPrometheusExporter": "", + "Processes": "", + "SQLServerPrometheusExporter": "", "WindowsEvents": "A list of Windows Events to monitor for the component. Only Amazon EC2 instances running on Windows can use `WindowsEvents` ." }, "AWS::ApplicationInsights::Application CustomComponent": { @@ -3362,9 +3367,23 @@ "LogPatterns": "A list of objects that define the log patterns that belong to `LogPatternSet` .", "PatternSetName": "The name of the log pattern. A log pattern name can contain up to 30 characters, and it cannot be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore." }, + "AWS::ApplicationInsights::Application NetWeaverPrometheusExporter": { + "InstanceNumbers": "", + "PrometheusPort": "", + "SAPSID": "" + }, + "AWS::ApplicationInsights::Application Process": { + "AlarmMetrics": "", + "ProcessName": "" + }, + "AWS::ApplicationInsights::Application SQLServerPrometheusExporter": { + "PrometheusPort": "", + "SQLSecretName": "" + }, "AWS::ApplicationInsights::Application SubComponentConfigurationDetails": { "AlarmMetrics": "A list of metrics to monitor for the component. All component types can use `AlarmMetrics` .", "Logs": "A list of logs to monitor for the component. Only Amazon EC2 instances can use `Logs` .", + "Processes": "", "WindowsEvents": "A list of Windows Events to monitor for the component. Only Amazon EC2 instances running on Windows can use `WindowsEvents` ." }, "AWS::ApplicationInsights::Application SubComponentTypeConfiguration": { @@ -4198,6 +4217,7 @@ }, "AWS::Batch::JobDefinition": { "ContainerProperties": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", + "EcsProperties": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", "EksProperties": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", "JobDefinitionName": "The name of the job definition.", "NodeProperties": "An object with properties that are specific to multi-node parallel jobs. When `nodeProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `eksProperties` .\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", @@ -4244,6 +4264,21 @@ "HostPath": "The path for the device on the host container instance.", "Permissions": "The explicit permissions to provide to the container for the device. By default, the container has permissions for `read` , `write` , and `mknod` for the device." }, + "AWS::Batch::JobDefinition EcsProperties": { + "TaskProperties": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element." + }, + "AWS::Batch::JobDefinition EcsTaskProperties": { + "Containers": "This object is a list of containers.", + "EphemeralStorage": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "NetworkConfiguration": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "PlatformVersion": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "RuntimePlatform": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "TaskRoleArn": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "Volumes": "A list of volumes that are associated with the job." + }, "AWS::Batch::JobDefinition EfsVolumeConfiguration": { "AuthorizationConfig": "The authorization configuration details for the Amazon EFS file system.", "FileSystemId": "The Amazon EFS file system ID to use.", @@ -4349,14 +4384,18 @@ }, "AWS::Batch::JobDefinition NodeRangeProperty": { "Container": "The container details for the node range.", + "EcsProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, "AWS::Batch::JobDefinition PodProperties": { "Containers": "The properties of the container that's used on the Amazon EKS pod.", "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "ShareProcessNamespace": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", "Volumes": "Specifies the volumes for a job definition that uses Amazon EKS resources." }, "AWS::Batch::JobDefinition RepositoryCredentials": { @@ -4378,6 +4417,28 @@ "Name": "The name of the secret.", "ValueFrom": "The secret to expose to the container. The supported values are either the full Amazon Resource Name (ARN) of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.\n\n> If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full Amazon Resource Name (ARN) or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified." }, + "AWS::Batch::JobDefinition TaskContainerDependency": { + "Condition": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "ContainerName": "A unique identifier for the container." + }, + "AWS::Batch::JobDefinition TaskContainerProperties": { + "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "DependsOn": "A list of containers that this container depends on.", + "Environment": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "Essential": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "Name": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "Privileged": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "RepositoryCredentials": "The private repository authentication credentials to use.", + "ResourceRequirements": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "Secrets": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "Ulimits": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "User": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers." + }, "AWS::Batch::JobDefinition Timeout": { "AttemptDurationSeconds": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes." }, @@ -5191,7 +5252,7 @@ }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightConfig": { "SessionStickinessConfig": "Session stickiness provides the ability to define multiple requests from a single viewer as a single session. This prevents the potentially inconsistent experience of sending some of a given user's requests to your staging distribution, while others are sent to your primary distribution. Define the session duration using TTL values.", - "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15." + "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightPolicyConfig": { "SessionStickinessConfig": "", @@ -5638,7 +5699,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -5680,7 +5741,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -10275,7 +10336,7 @@ }, "AWS::EC2::FlowLog": { "DeliverCrossAccountRole": "The ARN of the IAM role that allows the service to publish flow logs across accounts.", - "DeliverLogsPermissionArn": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", + "DeliverLogsPermissionArn": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", "DestinationOptions": "The destination options. The following options are supported:\n\n- `FileFormat` - The format for the flow log ( `plain-text` | `parquet` ). The default is `plain-text` .\n- `HiveCompatiblePartitions` - Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3 ( `true` | `false` ). The default is `false` .\n- `PerHourPartition` - Indicates whether to partition the flow log per hour ( `true` | `false` ). The default is `false` .", "LogDestination": "The destination for the flow log data. The meaning of this parameter depends on the destination type.\n\n- If the destination type is `cloud-watch-logs` , specify the ARN of a CloudWatch Logs log group. For example:\n\narn:aws:logs: *region* : *account_id* :log-group: *my_group*\n\nAlternatively, use the `LogGroupName` parameter.\n- If the destination type is `s3` , specify the ARN of an S3 bucket. For example:\n\narn:aws:s3::: *my_bucket* / *my_subfolder* /\n\nThe subfolder is optional. Note that you can't use `AWSLogs` as a subfolder name.\n- If the destination type is `kinesis-data-firehose` , specify the ARN of a Kinesis Data Firehose delivery stream. For example:\n\narn:aws:firehose: *region* : *account_id* :deliverystream: *my_stream*", "LogDestinationType": "The type of destination for the flow log data.\n\nDefault: `cloud-watch-logs`", @@ -10605,7 +10666,7 @@ "SnapshotId": "The ID of the snapshot.", "Throughput": "The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. The following are the supported volumes sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", - "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* ." + "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* ." }, "AWS::EC2::LaunchTemplate ElasticGpuSpecification": { "Type": "The type of Elastic Graphics accelerator. For more information about the values to specify for `Type` , see [Elastic Graphics Basics](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html#elastic-graphics-basics) , specifically the Elastic Graphics accelerator column, in the *Amazon Elastic Compute Cloud User Guide for Windows Instances* ." @@ -11229,18 +11290,19 @@ "VpcId": "The ID of the VPC for the security group. If you do not specify a VPC, the default is to use the default VPC for the Region. If there's no specified VPC and no default VPC, security group creation fails." }, "AWS::EC2::SecurityGroup Egress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "A description for the security group rule.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", - "DestinationPrefixListId": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", - "DestinationSecurityGroupId": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "DestinationPrefixListId": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", + "DestinationSecurityGroupId": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", + "SourceSecurityGroupId": "", "ToPort": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes)." }, "AWS::EC2::SecurityGroup Ingress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "Updates the description of an ingress (inbound) security group rule. You can replace an existing description, or add a description to a rule that did not have one previously.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", @@ -11255,19 +11317,19 @@ "Value": "The tag value." }, "AWS::EC2::SecurityGroupEgress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "The description of an egress (outbound) security group rule.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", - "DestinationPrefixListId": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", - "DestinationSecurityGroupId": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "DestinationPrefixListId": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", + "DestinationSecurityGroupId": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "GroupId": "The ID of the security group. You must specify either the security group ID or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID.", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", "ToPort": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes)." }, "AWS::EC2::SecurityGroupIngress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "Updates the description of an ingress (inbound) security group rule. You can replace an existing description, or add a description to a rule that did not have one previously.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", "FromPort": "The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of `-1` indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.\n\nUse this for ICMP and any protocol that uses ports.", "GroupId": "The ID of the security group.", @@ -11315,7 +11377,7 @@ "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nFor `io2` volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . On other instances, you can achieve performance up to 32,000 IOPS.\n\nThis parameter is required for `io1` and `io2` volumes. The default for `gp3` volumes is 3,000 IOPS.", "SnapshotId": "The ID of the snapshot.", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.\n\nThe following are the supported sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", - "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* ." + "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* ." }, "AWS::EC2::SpotFleet FleetLaunchTemplateSpecification": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -11944,7 +12006,7 @@ "AWS::EC2::Volume": { "AutoEnableIO": "Indicates whether the volume is auto-enabled for I/O operations. By default, Amazon EBS disables I/O to the volume from attached EC2 instances when it determines that a volume's data is potentially inconsistent. If the consistency of the volume is not a concern, and you prefer that the volume be made available immediately if it's impaired, you can configure the volume to automatically enable I/O.", "AvailabilityZone": "The ID of the Availability Zone in which to create the volume. For example, `us-east-1a` .", - "Encrypted": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "Encrypted": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nFor `io2` volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . On other instances, you can achieve performance up to 32,000 IOPS.\n\nThis parameter is required for `io1` and `io2` volumes. The default for `gp3` volumes is 3,000 IOPS. This parameter is not supported for `gp2` , `st1` , `sc1` , or `standard` volumes.", "KmsKeyId": "The identifier of the AWS KMS key to use for Amazon EBS encryption. If `KmsKeyId` is specified, the encrypted state must be `true` .\n\nIf you omit this property and your account is enabled for encryption by default, or *Encrypted* is set to `true` , then the volume is encrypted using the default key specified for your account. If your account does not have a default key, then the volume is encrypted using the AWS managed key .\n\nAlternatively, if you want to specify a different key, you can specify one of the following:\n\n- Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.\n- Key alias. Specify the alias for the key, prefixed with `alias/` . For example, for a key with the alias `my_cmk` , use `alias/my_cmk` . Or to specify the AWS managed key , use `alias/aws/ebs` .\n- Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.\n- Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.", "MultiAttachEnabled": "Indicates whether Amazon EBS Multi-Attach is enabled.\n\nAWS CloudFormation does not currently support updating a single-attach volume to be multi-attach enabled, updating a multi-attach enabled volume to be single-attach, or updating the size or number of I/O operations per second (IOPS) of a multi-attach enabled volume.", @@ -11953,7 +12015,7 @@ "SnapshotId": "The snapshot from which to create the volume. You must specify either a snapshot ID or a volume size.", "Tags": "The tags to apply to the volume during creation.", "Throughput": "The throughput to provision for a volume, with a maximum of 1,000 MiB/s.\n\nThis parameter is valid only for `gp3` volumes. The default value is 125.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", - "VolumeType": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`" + "VolumeType": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`" }, "AWS::EC2::Volume Tag": { "Key": "The tag key.", @@ -12103,7 +12165,7 @@ "AWS::ECS::ClusterCapacityProviderAssociations CapacityProviderStrategy": { "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* ." + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* ." }, "AWS::ECS::PrimaryTaskSet": { "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists in.", @@ -12187,7 +12249,7 @@ "Type": "The type of constraint. Use `distinctInstance` to ensure that each task in a particular group is running on a different container instance. Use `memberOf` to restrict the selection to a group of valid candidates." }, "AWS::ECS::Service PlacementStrategy": { - "Field": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "Field": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "Type": "The type of placement strategy. The `random` placement strategy randomly places tasks on available candidates. The `spread` placement strategy spreads placement across available candidates evenly based on the `field` parameter. The `binpack` strategy places tasks on available candidates that have the least available amount of the resource that's specified with the `field` parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory but still enough to run the task." }, "AWS::ECS::Service Secret": { @@ -12339,7 +12401,7 @@ "TransitEncryptionPort": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* ." }, "AWS::ECS::TaskDefinition EnvironmentFile": { - "Type": "The file type to use. The only supported value is `s3` .", + "Type": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "Value": "The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file." }, "AWS::ECS::TaskDefinition EphemeralStorage": { @@ -12771,7 +12833,7 @@ "LogEncryptionKmsKeyId": "The AWS KMS key used for encrypting log files. This attribute is only available with Amazon EMR 5.30.0 and later, excluding Amazon EMR 6.0.0.", "LogUri": "The path to the Amazon S3 location where logs for this cluster are stored.", "ManagedScalingPolicy": "Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as Amazon EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.", - "Name": "The name of the cluster.", + "Name": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "OSReleaseLabel": "The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.", "PlacementGroupConfigs": "", "ReleaseLabel": "The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form `emr-x.x.x` , where x.x.x is an Amazon EMR release version such as `emr-5.14.0` . For more information about Amazon EMR release versions and included application versions and features, see [](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/) . The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use `AmiVersion` .", @@ -12887,7 +12949,7 @@ "TaskInstanceFleets": "Describes the EC2 instances and instance configurations for the task instance fleets when using clusters with the instance fleet configuration. These task instance fleets are added to the cluster as part of the cluster launch. Each task instance fleet must have a unique name specified so that CloudFormation can differentiate between the task instance fleets.\n\n> You can currently specify only one task instance fleet for a cluster. After creating the cluster, you can only modify the mutable properties of `InstanceFleetConfig` , which are `TargetOnDemandCapacity` and `TargetSpotCapacity` . Modifying any other property results in cluster replacement. > To allow a maximum of 30 Amazon EC2 instance types per fleet, include `TaskInstanceFleets` when you create your cluster. If you create your cluster without `TaskInstanceFleets` , Amazon EMR uses its default allocation strategy, which allows for a maximum of five Amazon EC2 instance types.", "TaskInstanceGroups": "Describes the EC2 instances and instance configurations for task instance groups when using clusters with the uniform instance group configuration. These task instance groups are added to the cluster as part of the cluster launch. Each task instance group must have a unique name specified so that CloudFormation can differentiate between the task instance groups.\n\n> After creating the cluster, you can only modify the mutable properties of `InstanceGroupConfig` , which are `AutoScalingPolicy` and `InstanceCount` . Modifying any other property results in cluster replacement.", "TerminationProtected": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", - "UnhealthyNodeReplacement": "" + "UnhealthyNodeReplacement": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster." }, "AWS::EMR::Cluster KerberosAttributes": { "ADDomainJoinPassword": "The Active Directory password for `ADDomainJoinUser` .", @@ -14674,7 +14736,7 @@ }, "AWS::FSx::FileSystem DiskIopsConfiguration": { "Iops": "The total number of SSD IOPS provisioned for the file system.\n\nThe minimum and maximum values for this property depend on the value of `HAPairs` and `StorageCapacity` . The minimum value is calculated as `StorageCapacity` * 3 * `HAPairs` (3 IOPS per GB of `StorageCapacity` ). The maximum value is calculated as 200,000 * `HAPairs` .\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) if the value of `Iops` is outside of the minimum or maximum values.", - "Mode": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value." + "Mode": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value." }, "AWS::FSx::FileSystem LustreConfiguration": { "AutoImportPolicy": "(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. `AutoImportPolicy` can have the following values:\n\n- `NONE` - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.\n- `NEW` - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.\n- `NEW_CHANGED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.\n- `NEW_CHANGED_DELETED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.\n\nFor more information, see [Automatically import updates from your S3 bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) .\n\n> This parameter is not supported for Lustre file systems with a data repository association.", @@ -14700,11 +14762,11 @@ "DiskIopsConfiguration": "The SSD IOPS configuration for the FSx for ONTAP file system.", "EndpointIpAddressRange": "(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC\u2019s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.", "FsxAdminPassword": "The ONTAP administrative password for the `fsxadmin` user with which you administer your file system using the NetApp ONTAP CLI and REST API.", - "HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "PreferredSubnetId": "Required when `DeploymentType` is set to `MULTI_AZ_1` . This specifies the subnet in which you want the preferred file server to be located.", "RouteTableIds": "(Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.\n\n> Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with `Key: AmazonFSx; Value: ManagedByAmazonFSx` . When creating FSx for ONTAP Multi-AZ file systems using AWS CloudFormation we recommend that you add the `Key: AmazonFSx; Value: ManagedByAmazonFSx` tag manually.", "ThroughputCapacity": "Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see [Managing throughput capacity](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-throughput-capacity.html) in the FSx for ONTAP User Guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value.\n- The value of `ThroughputCapacity` when divided by the value of `HAPairs` is outside of the valid range for `ThroughputCapacity` .", - "ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "WeeklyMaintenanceStartTime": "A recurring weekly time, in the format `D:HH:MM` .\n\n`D` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see [the ISO-8601 spec as described on Wikipedia](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date) .\n\n`HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour.\n\nFor example, `1:05:00` specifies maintenance at 5 AM Monday." }, "AWS::FSx::FileSystem OpenZFSConfiguration": { @@ -15869,7 +15931,7 @@ "CatalogId": "The catalog ID of the table.", "DatabaseName": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", "TableName": "The table name. For Hive compatibility, this must be entirely lowercase.", - "TableOptimizerConfiguration": "", + "TableOptimizerConfiguration": "Specifies configuration details of a table optimizer.", "Type": "The type of table optimizer. Currently, the only valid value is compaction." }, "AWS::Glue::TableOptimizer TableOptimizerConfiguration": { @@ -18980,17 +19042,17 @@ }, "AWS::IoTSiteWise::AccessPolicy": { "AccessPolicyIdentity": "The identity for this access policy. Choose an IAM Identity Center user, an IAM Identity Center group, or an IAM user.", - "AccessPolicyPermission": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "AccessPolicyPermission": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "AccessPolicyResource": "The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project." }, "AWS::IoTSiteWise::AccessPolicy AccessPolicyIdentity": { "IamRole": "An IAM role identity.", "IamUser": "An IAM user identity.", - "User": "The IAM Identity Center user to which this access policy maps." + "User": "An IAM Identity Center user identity." }, "AWS::IoTSiteWise::AccessPolicy AccessPolicyResource": { - "Portal": "The AWS IoT SiteWise Monitor portal for this access policy.", - "Project": "The AWS IoT SiteWise Monitor project for this access policy." + "Portal": "Identifies an AWS IoT SiteWise Monitor portal.", + "Project": "Identifies a specific AWS IoT SiteWise Monitor project." }, "AWS::IoTSiteWise::AccessPolicy IamRole": { "arn": "The ARN of the IAM role. For more information, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in the *IAM User Guide* ." @@ -19005,36 +19067,36 @@ "id": "The ID of the project." }, "AWS::IoTSiteWise::AccessPolicy User": { - "id": "The ID of the user." + "id": "The IAM Identity Center ID of the user." }, "AWS::IoTSiteWise::Asset": { - "AssetDescription": "A description for the asset.", - "AssetHierarchies": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "AssetDescription": "The ID of the asset, in UUID format.", + "AssetHierarchies": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "AssetModelId": "The ID of the asset model from which to create the asset. This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", - "AssetName": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "AssetName": "A friendly name for the asset.", "AssetProperties": "The list of asset properties for the asset.\n\nThis object doesn't include properties that you define in composite models. You can find composite model properties in the `assetCompositeModels` object.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Asset AssetHierarchy": { "ChildAssetId": "The Id of the child asset.", - "LogicalId": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` ." + "LogicalId": "The ID of the hierarchy. This ID is a `hierarchyId` ." }, "AWS::IoTSiteWise::Asset AssetProperty": { - "Alias": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", - "LogicalId": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "NotificationState": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "Alias": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", + "LogicalId": "The `LogicalID` of the asset property.", + "NotificationState": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "Unit": "The unit (such as `Newtons` or `RPM` ) of the asset property." }, "AWS::IoTSiteWise::Asset Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::AssetModel": { - "AssetModelCompositeModels": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "AssetModelCompositeModels": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "AssetModelDescription": "A description for the asset model.", - "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", - "AssetModelName": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "AssetModelProperties": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "AssetModelName": "A unique, friendly name for the asset model.", + "AssetModelProperties": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::AssetModel AssetModelCompositeModel": { @@ -19044,23 +19106,23 @@ "Type": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` ." }, "AWS::IoTSiteWise::AssetModel AssetModelHierarchy": { - "ChildAssetModelId": "The Id of the asset model.", - "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", - "Name": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` ." + "ChildAssetModelId": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", + "Name": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation." }, "AWS::IoTSiteWise::AssetModel AssetModelProperty": { - "DataType": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "DataType": "The data type of the asset model property.", "DataTypeSpec": "The data type of the structure for this property. This parameter exists on properties that have the `STRUCT` data type.", - "LogicalId": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", - "Name": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "Type": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "LogicalId": "The `LogicalID` of the asset model property.", + "Name": "The name of the asset model property.", + "Type": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "Unit": "The unit of the asset model property, such as `Newtons` or `RPM` ." }, "AWS::IoTSiteWise::AssetModel Attribute": { "DefaultValue": "The default value of the asset model property attribute. All assets that you create from the asset model contain this attribute value. You can update an attribute's value after you create an asset. For more information, see [Updating attribute values](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/update-attribute-values.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::AssetModel ExpressionVariable": { - "Name": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "Name": "The friendly name of the variable to be used in the expression.", "Value": "The variable that identifies an asset property from which to use values." }, "AWS::IoTSiteWise::AssetModel Metric": { @@ -19072,14 +19134,14 @@ "Tumbling": "The tumbling time interval window." }, "AWS::IoTSiteWise::AssetModel PropertyType": { - "Attribute": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", - "Metric": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", - "Transform": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "Attribute": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", + "Metric": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", + "Transform": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "TypeName": "The type of property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` ." }, "AWS::IoTSiteWise::AssetModel Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::AssetModel Transform": { "Expression": "The mathematical expression that defines the transformation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.\n\nFor more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", @@ -19090,8 +19152,8 @@ "Offset": "The offset for the tumbling window. The `offset` parameter accepts the following:\n\n- The offset time.\n\nFor example, if you specify `18h` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.\n- If you create the metric after 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) the next day.\n- The ISO 8601 format.\n\nFor example, if you specify `PT18H` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.\n- If you create the metric after 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) the next day.\n- The 24-hour clock.\n\nFor example, if you specify `00:03:00` for `offset` , `5m` for `interval` , and you create the metric at 2 PM (UTC), you get the first aggregation result at 2:03 PM (UTC). You get the second aggregation result at 2:08 PM (UTC).\n- The offset time zone.\n\nFor example, if you specify `2021-07-23T18:00-08` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (PST), you get the first aggregation result at 6 PM (PST) on the day when you create the metric.\n- If you create the metric after 6 PM (PST), you get the first aggregation result at 6 PM (PST) the next day." }, "AWS::IoTSiteWise::AssetModel VariableValue": { - "HierarchyLogicalId": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", - "PropertyLogicalId": "The `LogicalID` of the property to use as the variable." + "HierarchyLogicalId": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "PropertyLogicalId": "The `LogicalID` of the property that is being referenced." }, "AWS::IoTSiteWise::Dashboard": { "DashboardDefinition": "The dashboard definition specified in a JSON literal. For detailed information, see [Creating dashboards (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html) in the *AWS IoT SiteWise User Guide* .", @@ -19101,37 +19163,37 @@ "Tags": "A list of key-value pairs that contain metadata for the dashboard. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Dashboard Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Gateway": { "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", - "GatewayName": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "GatewayName": "A unique, friendly name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Gateway GatewayCapabilitySummary": { "CapabilityConfiguration": "The JSON document that defines the configuration for the gateway capability. For more information, see [Configuring data sources (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/configure-sources.html#configure-source-cli) in the *AWS IoT SiteWise User Guide* .", - "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` ." + "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` ." }, "AWS::IoTSiteWise::Gateway GatewayPlatform": { "Greengrass": "A gateway that runs on AWS IoT Greengrass .", "GreengrassV2": "A gateway that runs on AWS IoT Greengrass V2 ." }, "AWS::IoTSiteWise::Gateway Greengrass": { - "GroupArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* ." + "GroupArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* ." }, "AWS::IoTSiteWise::Gateway GreengrassV2": { "CoreDeviceThingName": "The name of the AWS IoT thing for your AWS IoT Greengrass V2 core device." }, "AWS::IoTSiteWise::Gateway Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Portal": { "Alarms": "Contains the configuration information of an alarm created in an AWS IoT SiteWise Monitor portal. You can use the alarm to monitor an asset property and get notified when the asset property value is outside a specified range. For more information, see [Monitoring with alarms](https://docs.aws.amazon.com/iot-sitewise/latest/appguide/monitor-alarms.html) in the *AWS IoT SiteWise Application Guide* .", "NotificationSenderEmail": "The email address that sends alarm notifications.\n\n> If you use the [AWS IoT Events managed Lambda function](https://docs.aws.amazon.com/iotevents/latest/developerguide/lambda-support.html) to manage your emails, you must [verify the sender email address in Amazon SES](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html) .", - "PortalAuthMode": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "PortalAuthMode": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "PortalContactEmail": "The AWS administrator's contact email address.", "PortalDescription": "A description for the portal.", "PortalName": "A friendly name for the portal.", @@ -19143,8 +19205,8 @@ "NotificationLambdaArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Lambda function that manages alarm notifications. For more information, see [Managing alarm notifications](https://docs.aws.amazon.com/iotevents/latest/developerguide/lambda-support.html) in the *AWS IoT Events Developer Guide* ." }, "AWS::IoTSiteWise::Portal Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Project": { "AssetIds": "A list that contains the IDs of each asset associated with the project.", @@ -19154,8 +19216,8 @@ "Tags": "A list of key-value pairs that contain metadata for the project. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Project Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTTwinMaker::ComponentType": { "ComponentTypeId": "The ID of the component type.", @@ -19523,7 +19585,7 @@ "LastUplinkReceivedAt": "The date and time when the most recent uplink was received.", "LoRaWAN": "The device configuration information to use to create the wireless device. Must be at least one of OtaaV10x, OtaaV11, AbpV11, or AbpV10x.", "Name": "The name of the new resource.", - "Positioning": "", + "Positioning": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", "Tags": "The tags are an array of key-value pairs to attach to the specified resource. Tags can have a minimum of 0 and a maximum of 50 items.", "ThingArn": "The ARN of the thing to associate with the wireless device.", "Type": "The wireless device type." @@ -19537,19 +19599,19 @@ "SessionKeys": "Session keys for ABP v1.1." }, "AWS::IoTWireless::WirelessDevice Application": { - "DestinationName": "", - "FPort": "", - "Type": "" + "DestinationName": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "FPort": "The name of the new destination for the device.", + "Type": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device." }, "AWS::IoTWireless::WirelessDevice FPorts": { - "Applications": "" + "Applications": "LoRaWAN application configuration, which can be used to perform geolocation." }, "AWS::IoTWireless::WirelessDevice LoRaWANDevice": { "AbpV10x": "ABP device object for LoRaWAN specification v1.0.x.", "AbpV11": "ABP device object for create APIs for v1.1.", "DevEui": "The DevEUI value.", "DeviceProfileId": "The ID of the device profile for the new wireless device.", - "FPorts": "", + "FPorts": "List of FPort assigned for different LoRaWAN application packages to use.", "OtaaV10x": "OTAA device object for create APIs for v1.0.x", "OtaaV11": "OTAA device object for v1.1 for create APIs.", "ServiceProfileId": "The ID of the service profile." @@ -19902,8 +19964,8 @@ "AccessControlListConfiguration": "Provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see [Access control for S3 data sources](https://docs.aws.amazon.com/kendra/latest/dg/s3-acl.html) .", "BucketName": "The name of the bucket that contains the documents.", "DocumentsMetadataConfiguration": "Specifies document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document.", - "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", - "InclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "InclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "InclusionPrefixes": "A list of S3 prefixes for the documents that should be included in the index." }, "AWS::Kendra::DataSource S3Path": { @@ -21042,7 +21104,7 @@ }, "AWS::Lambda::Alias": { "Description": "A description of the alias.", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionVersion": "The function version that the alias invokes.", "Name": "The name of the alias.", "ProvisionedConcurrencyConfig": "Specifies a [provisioned concurrency](https://docs.aws.amazon.com/lambda/latest/dg/configuration-concurrency.html) configuration for a function's alias.", @@ -21095,7 +21157,7 @@ "Enabled": "When true, the event source mapping is active. When false, Lambda pauses polling and invocation.\n\nDefault: True", "EventSourceArn": "The Amazon Resource Name (ARN) of the event source.\n\n- *Amazon Kinesis* \u2013 The ARN of the data stream or a stream consumer.\n- *Amazon DynamoDB Streams* \u2013 The ARN of the stream.\n- *Amazon Simple Queue Service* \u2013 The ARN of the queue.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) ).\n- *Amazon MQ* \u2013 The ARN of the broker.\n- *Amazon DocumentDB* \u2013 The ARN of the DocumentDB change stream.", "FilterCriteria": "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) .", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "FunctionResponseTypes": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", @@ -21250,7 +21312,7 @@ "AWS::Lambda::Permission": { "Action": "The action that the principal can use on the function. For example, `lambda:InvokeFunction` or `lambda:GetFunction` .", "EventSourceToken": "For Alexa Smart Home functions, a token that the invoker must supply.", - "FunctionName": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionUrlAuthType": "The type of authentication that your function URL uses. Set to `AWS_IAM` if you want to restrict access to authenticated users only. Set to `NONE` if you want to bypass IAM authentication to create a public endpoint. For more information, see [Security and auth model for Lambda function URLs](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) .", "Principal": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "PrincipalOrgID": "The identifier for your organization in AWS Organizations . Use this to grant permissions to all the AWS accounts under this organization.", @@ -21275,7 +21337,7 @@ "AWS::Lambda::Version": { "CodeSha256": "Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. Updates are not supported for this property.", "Description": "A description for the version to override the description in the function configuration. Updates are not supported for this property.", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "ProvisionedConcurrencyConfig": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", "RuntimePolicy": "" }, @@ -35785,7 +35847,7 @@ "DomainIAMRoleName": "Specifies the name of the IAM role to use when making API calls to the Directory Service.\n\nValid for: Aurora DB clusters only", "EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see [Publishing Database Logs to Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the *Amazon Aurora User Guide* .\n\n*Aurora MySQL*\n\nValid values: `audit` , `error` , `general` , `slowquery`\n\n*Aurora PostgreSQL*\n\nValid values: `postgresql`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "EnableGlobalWriteForwarding": "Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.\n\nYou can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.\n\nValid for Cluster Type: Aurora DB clusters only", - "EnableHttpEndpoint": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "EnableHttpEndpoint": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nFor more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.*\n\nValid for: Aurora DB clusters only", "Engine": "The name of the database engine to be used for this DB cluster.\n\nValid Values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only supports Aurora Serverless v1.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n- [Limitations of parallel query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n\nValid for: Aurora DB clusters only", @@ -35817,7 +35879,7 @@ "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "Tags": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "UseLatestRestorableTime": "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this DB cluster.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" @@ -35934,7 +35996,7 @@ "SourceRegion": "The ID of the region that contains the source DB instance for the read replica.", "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB instance is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB instance to be encrypted, then don't set this property or set it to `false` .\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", - "StorageType": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "Tags": "An optional array of key-value pairs to apply to this DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", @@ -36197,7 +36259,7 @@ "NodeType": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge` | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", "NumberOfNodes": "The number of compute nodes in the cluster. This parameter is required when the *ClusterType* parameter is specified as `multi-node` .\n\nFor information about determining how many nodes you need, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nIf you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.\n\nDefault: `1`\n\nConstraints: Value must be at least 1 and no more than 100.", "OwnerAccount": "The AWS account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.", - "Port": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "Port": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "PreferredMaintenanceWindow": "The weekly time range (in UTC) during which automated cluster maintenance can occur.\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nDefault: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see [Maintenance Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) in Amazon Redshift Cluster Management Guide.\n\nValid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\n\nConstraints: Minimum 30-minute window.", "PubliclyAccessible": "If `true` , the cluster can be accessed from a public network.", "ResourceAction": "The Amazon Redshift operation to be performed. Supported operations are `pause-cluster` , `resume-cluster` , and `failover-primary-compute` .", @@ -36346,7 +36408,6 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "NamespaceResourcePolicy": "The resource policy that will be attached to the namespace.", "RedshiftIdcApplicationArn": "The ARN for the Redshift application that integrates with IAM Identity Center.", - "SnapshotCopyConfigurations": "", "Tags": "The map of the key-value pairs used to tag the namespace." }, "AWS::RedshiftServerless::Namespace Namespace": { @@ -36364,11 +36425,6 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "Status": "The status of the namespace." }, - "AWS::RedshiftServerless::Namespace SnapshotCopyConfiguration": { - "DestinationKmsKeyId": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", - "DestinationRegion": "The destination AWS Region to copy snapshots to.", - "SnapshotRetentionPeriod": "The retention period of snapshots that are copied to the destination AWS Region ." - }, "AWS::RedshiftServerless::Namespace Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -38229,7 +38285,7 @@ "OperatingSystem": "Defines the operating system the patch baseline applies to. The default value is `WINDOWS` .", "PatchGroups": "The name of the patch group to be registered with the patch baseline.", "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", - "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "Sources": "Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only.", "Tags": "Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to." }, @@ -38951,7 +39007,7 @@ "AWS::SageMaker::EndpointConfig ServerlessConfig": { "MaxConcurrency": "The maximum number of concurrent invocations your serverless endpoint can process.", "MemorySizeInMB": "The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.", - "ProvisionedConcurrency": "" + "ProvisionedConcurrency": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) ." }, "AWS::SageMaker::EndpointConfig Tag": { "Key": "The tag key. Tag keys must be unique per resource.", @@ -40776,7 +40832,7 @@ }, "AWS::Shield::DRTAccess": { "LogBucketList": "Authorizes the Shield Response Team (SRT) to access the specified Amazon S3 bucket containing log data such as Application Load Balancer access logs, CloudFront logs, or logs from third party sources. You can associate up to 10 Amazon S3 buckets with your subscription.\n\nUse this to share information with the SRT that's not available in AWS WAF logs.\n\nTo use the services of the SRT, you must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .", - "RoleArn": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you." + "RoleArn": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you." }, "AWS::Shield::ProactiveEngagement": { "EmergencyContactList": "The list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you for escalations to the SRT and to initiate proactive customer support, plus any relevant notes.\n\nTo enable proactive engagement, the contact list must include at least one phone number.\n\nIf you provide more than one contact, in the notes, indicate the circumstances under which each contact should be used. Include primary and secondary contact designations, and provide the hours of availability and time zones for each contact.\n\nExample contact notes:\n\n- This is a hotline that's staffed 24x7x365. Please work with the responding analyst and they will get the appropriate person on the call.\n- Please contact the secondary phone number if the hotline doesn't respond within 5 minutes.", @@ -40807,7 +40863,7 @@ "Value": "Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive." }, "AWS::Shield::ProtectionGroup": { - "Aggregation": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "Aggregation": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "Members": "The ARNs (Amazon Resource Names) of the resources to include in the protection group. You must set this when you set `Pattern` to `ARBITRARY` and you must not set it for any other `Pattern` setting.", "Pattern": "The criteria to use to choose the protected resources for inclusion in the group. You can include all resources that have protections, provide a list of resource ARNs (Amazon Resource Names), or include all resources of a specified resource type.", "ProtectionGroupId": "The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.", @@ -41865,7 +41921,7 @@ "CustomResponse": "Defines a custom response for the web request.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* ." }, "AWS::WAFv2::RuleGroup Body": { - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::RuleGroup ByteMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", @@ -41917,10 +41973,11 @@ }, "AWS::WAFv2::RuleGroup FieldToMatch": { "AllQueryArguments": "Inspect all query arguments.", - "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "Cookies": "Inspect the request cookies. You must configure scope and pattern matching filters in the `Cookies` object, to define the set of cookies and the parts of the cookies that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's cookies and only the first 200 cookies are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize cookie content in the `Cookies` object. AWS WAF applies the pattern matching filters to the cookies that it receives from the underlying host service.", "Headers": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", - "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "JA3Fingerprint": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "Method": "Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "QueryString": "Inspect the query string. This is the part of a URL that appears after a `?` character, if any.", "SingleHeader": "Inspect a single header. Provide the name of the header to inspect, for example, `User-Agent` or `Referer` . This setting isn't case sensitive.\n\nExample JSON: `\"SingleHeader\": { \"Name\": \"haystack\" }`\n\nAlternately, you can filter and inspect all headers with the `Headers` `FieldToMatch` setting.", @@ -41957,11 +42014,14 @@ "AWS::WAFv2::RuleGroup ImmunityTimeProperty": { "ImmunityTime": "The amount of time, in seconds, that a `CAPTCHA` or challenge timestamp is considered valid by AWS WAF . The default setting is 300.\n\nFor the Challenge action, the minimum setting is 300." }, + "AWS::WAFv2::RuleGroup JA3Fingerprint": { + "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + }, "AWS::WAFv2::RuleGroup JsonBody": { "InvalidFallbackBehavior": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", "MatchPattern": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", "MatchScope": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::RuleGroup JsonMatchPattern": { "All": "Match all of the elements. See also `MatchScope` in the `JsonBody` `FieldToMatch` specification.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", @@ -41986,6 +42046,7 @@ "AWS::WAFv2::RuleGroup RateBasedStatement": { "AggregateKeyType": "Setting that indicates how to aggregate the request counts.\n\n> Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling. \n\n- `CONSTANT` - Count and limit the requests that match the rate-based rule's scope-down statement. With this option, the counted requests aren't further aggregated. The scope-down statement is the only specification used. When the count of all requests that satisfy the scope-down statement goes over the limit, AWS WAF applies the rule action to all requests that satisfy the scope-down statement.\n\nWith this option, you must configure the `ScopeDownStatement` property.\n- `CUSTOM_KEYS` - Aggregate the request counts using one or more web request components as the aggregate keys.\n\nWith this option, you must specify the aggregate keys in the `CustomKeys` property.\n\nTo aggregate on only the IP address or only the forwarded IP address, don't use custom keys. Instead, set the aggregate key type to `IP` or `FORWARDED_IP` .\n- `FORWARDED_IP` - Aggregate the request counts on the first IP address in an HTTP header.\n\nWith this option, you must specify the header to use in the `ForwardedIPConfig` property.\n\nTo aggregate on a combination of the forwarded IP address with other aggregate keys, use `CUSTOM_KEYS` .\n- `IP` - Aggregate the request counts on the IP address from the web request origin.\n\nTo aggregate on a combination of the IP address with other aggregate keys, use `CUSTOM_KEYS` .", "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", + "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." @@ -42077,7 +42138,7 @@ "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", - "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "SqliMatchStatement": "A rule statement that inspects for malicious SQL code. Attackers insert malicious SQL code into web requests to do things like modify your database or extract data from it.", "XssMatchStatement": "A rule statement that inspects for cross-site scripting (XSS) attacks. In XSS attacks, the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers." }, @@ -42099,7 +42160,7 @@ "TextTransformations": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by `FieldToMatch` , starting from the lowest priority setting, before inspecting the content for a match." }, "AWS::WAFv2::WebACL": { - "AssociationConfig": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "AssociationConfig": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "CaptchaConfig": "Specifies how AWS WAF should handle `CAPTCHA` evaluations for rules that don't have their own `CaptchaConfig` settings. If you don't specify this, AWS WAF uses its default settings for `CaptchaConfig` .", "ChallengeConfig": "Specifies how AWS WAF should handle challenge evaluations for rules that don't have their own `ChallengeConfig` settings. If you don't specify this, AWS WAF uses its default settings for `ChallengeConfig` .", "CustomResponseBodies": "A map of custom response keys and content bodies. When you create a rule with a block action, you can send a custom response to the web request. You define these for the web ACL, and then use them in the rules and default actions that you define in the web ACL.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* .\n\nFor information about the limits on count and size for custom request and response settings, see [AWS WAF quotas](https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) in the *AWS WAF Developer Guide* .", @@ -42136,13 +42197,13 @@ "Statements": "The statements to combine with AND logic. You can use any statements that can be nested." }, "AWS::WAFv2::WebACL AssociationConfig": { - "RequestBody": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) ." + "RequestBody": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes)." }, "AWS::WAFv2::WebACL BlockAction": { "CustomResponse": "Defines a custom response for the web request.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* ." }, "AWS::WAFv2::WebACL Body": { - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::WebACL ByteMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", @@ -42204,10 +42265,11 @@ }, "AWS::WAFv2::WebACL FieldToMatch": { "AllQueryArguments": "Inspect all query arguments.", - "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "Cookies": "Inspect the request cookies. You must configure scope and pattern matching filters in the `Cookies` object, to define the set of cookies and the parts of the cookies that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's cookies and only the first 200 cookies are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize cookie content in the `Cookies` object. AWS WAF applies the pattern matching filters to the cookies that it receives from the underlying host service.", "Headers": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", - "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "JA3Fingerprint": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "Method": "Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "QueryString": "Inspect the query string. This is the part of a URL that appears after a `?` character, if any.", "SingleHeader": "Inspect a single header. Provide the name of the header to inspect, for example, `User-Agent` or `Referer` . This setting isn't case sensitive.\n\nExample JSON: `\"SingleHeader\": { \"Name\": \"haystack\" }`\n\nAlternately, you can filter and inspect all headers with the `Headers` `FieldToMatch` setting.", @@ -42244,11 +42306,14 @@ "AWS::WAFv2::WebACL ImmunityTimeProperty": { "ImmunityTime": "The amount of time, in seconds, that a `CAPTCHA` or challenge timestamp is considered valid by AWS WAF . The default setting is 300.\n\nFor the Challenge action, the minimum setting is 300." }, + "AWS::WAFv2::WebACL JA3Fingerprint": { + "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + }, "AWS::WAFv2::WebACL JsonBody": { "InvalidFallbackBehavior": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", "MatchPattern": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", "MatchScope": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::WebACL JsonMatchPattern": { "All": "Match all of the elements. See also `MatchScope` in the `JsonBody` `FieldToMatch` specification.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", @@ -42292,6 +42357,7 @@ "AWS::WAFv2::WebACL RateBasedStatement": { "AggregateKeyType": "Setting that indicates how to aggregate the request counts.\n\n> Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling. \n\n- `CONSTANT` - Count and limit the requests that match the rate-based rule's scope-down statement. With this option, the counted requests aren't further aggregated. The scope-down statement is the only specification used. When the count of all requests that satisfy the scope-down statement goes over the limit, AWS WAF applies the rule action to all requests that satisfy the scope-down statement.\n\nWith this option, you must configure the `ScopeDownStatement` property.\n- `CUSTOM_KEYS` - Aggregate the request counts using one or more web request components as the aggregate keys.\n\nWith this option, you must specify the aggregate keys in the `CustomKeys` property.\n\nTo aggregate on only the IP address or only the forwarded IP address, don't use custom keys. Instead, set the aggregate key type to `IP` or `FORWARDED_IP` .\n- `FORWARDED_IP` - Aggregate the request counts on the first IP address in an HTTP header.\n\nWith this option, you must specify the header to use in the `ForwardedIPConfig` property.\n\nTo aggregate on a combination of the forwarded IP address with other aggregate keys, use `CUSTOM_KEYS` .\n- `IP` - Aggregate the request counts on the IP address from the web request origin.\n\nTo aggregate on a combination of the IP address with other aggregate keys, use `CUSTOM_KEYS` .", "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", + "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." @@ -42339,7 +42405,7 @@ "TextTransformations": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by `FieldToMatch` , starting from the lowest priority setting, before inspecting the content for a match." }, "AWS::WAFv2::WebACL RequestBodyAssociatedResourceTypeConfig": { - "DefaultSizeInspectionLimit": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`" + "DefaultSizeInspectionLimit": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`" }, "AWS::WAFv2::WebACL RequestInspection": { "PasswordField": "The name of the field in the request payload that contains your customer's password.\n\nHow you specify this depends on the request inspection payload type.\n\n- For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation [JavaScript Object Notation (JSON) Pointer](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6901) .\n\nFor example, for the JSON payload `{ \"form\": { \"password\": \"THE_PASSWORD\" } }` , the password field specification is `/form/password` .\n- For form encoded payload types, use the HTML form names.\n\nFor example, for an HTML form with the input element named `password1` , the password field specification is `password1` .", @@ -42435,7 +42501,7 @@ "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", "RuleGroupReferenceStatement": "A rule statement used to run the rules that are defined in a `RuleGroup` . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\n\nYou cannot nest a `RuleGroupReferenceStatement` , for example for use inside a `NotStatement` or `OrStatement` . You cannot use a rule group reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.", - "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "SqliMatchStatement": "A rule statement that inspects for malicious SQL code. Attackers insert malicious SQL code into web requests to do things like modify your database or extract data from it.", "XssMatchStatement": "A rule statement that inspects for cross-site scripting (XSS) attacks. In XSS attacks, the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers." }, diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index c2258435f..cab54ebdc 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -2055,6 +2055,11 @@ "title": "Password", "type": "string" }, + "ReplicationUser": { + "markdownDescription": "Defines if this user is intended for CRDR replication purposes.", + "title": "ReplicationUser", + "type": "boolean" + }, "Username": { "markdownDescription": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data.", "title": "Username", @@ -2365,7 +2370,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "markdownDescription": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "title": "EnvironmentVariables", "type": "array" }, @@ -2474,7 +2479,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "Environment variables for the auto created branch.", + "markdownDescription": "The environment variables for the autocreated branch.", "title": "EnvironmentVariables", "type": "array" }, @@ -2551,12 +2556,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2815,6 +2820,16 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, + "Certificate": { + "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", + "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", + "title": "Certificate" + }, + "CertificateSettings": { + "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", + "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", + "title": "CertificateSettings" + }, "DomainName": { "markdownDescription": "The domain name for the domain association.", "title": "DomainName", @@ -2832,6 +2847,11 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" + }, + "UpdateStatus": { + "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", + "title": "UpdateStatus", + "type": "string" } }, "required": [ @@ -2862,6 +2882,43 @@ ], "type": "object" }, + "AWS::Amplify::Domain.Certificate": { + "additionalProperties": false, + "properties": { + "CertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CertificateArn", + "type": "string" + }, + "CertificateType": { + "markdownDescription": "The type of SSL/TLS certificate that you want to use.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CertificateVerificationDNSRecord": { + "markdownDescription": "The DNS record for certificate verification.", + "title": "CertificateVerificationDNSRecord", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Amplify::Domain.CertificateSettings": { + "additionalProperties": false, + "properties": { + "CertificateType": { + "markdownDescription": "The certificate type.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CustomCertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for the custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CustomCertificateArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::Amplify::Domain.SubDomainSetting": { "additionalProperties": false, "properties": { @@ -3024,14 +3081,6 @@ "type": "array" } }, - "required": [ - "BindingProperties", - "ComponentType", - "Name", - "Overrides", - "Properties", - "Variants" - ], "type": "object" }, "Type": { @@ -3050,8 +3099,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3169,6 +3217,11 @@ "title": "Predicates", "type": "array" }, + "SlotName": { + "markdownDescription": "The name of a component slot.", + "title": "SlotName", + "type": "string" + }, "UserAttribute": { "markdownDescription": "An authenticated user attribute.", "title": "UserAttribute", @@ -3219,6 +3272,11 @@ }, "title": "Properties", "type": "object" + }, + "SourceId": { + "markdownDescription": "The unique ID of the child component in its original source system, such as Figma.", + "title": "SourceId", + "type": "string" } }, "required": [ @@ -3312,6 +3370,11 @@ "title": "Action", "type": "string" }, + "BindingEvent": { + "markdownDescription": "Binds an event to an action on a component. When you specify a `bindingEvent` , the event is called when the action is performed.", + "title": "BindingEvent", + "type": "string" + }, "Parameters": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Component.ActionParameters", "markdownDescription": "Describes information about the action.", @@ -3518,6 +3581,11 @@ "title": "Operand", "type": "string" }, + "OperandType": { + "markdownDescription": "The type of value to use when performing the evaluation.", + "title": "OperandType", + "type": "string" + }, "Operator": { "markdownDescription": "The operator to use to perform the evaluation.", "title": "Operator", @@ -3668,15 +3736,6 @@ "type": "object" } }, - "required": [ - "DataType", - "Fields", - "FormActionType", - "Name", - "SchemaVersion", - "SectionalElements", - "Style" - ], "type": "object" }, "Type": { @@ -3695,8 +3754,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3989,9 +4047,49 @@ ], "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue": { + "additionalProperties": false, + "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties", + "markdownDescription": "Describes the properties to customize with data at runtime.", + "title": "BindingProperties" + }, + "Type": { + "markdownDescription": "The property type.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties": { + "additionalProperties": false, + "properties": { + "Model": { + "markdownDescription": "An Amplify DataStore model.", + "title": "Model", + "type": "string" + } + }, + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormInputValueProperty": { "additionalProperties": false, "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties", + "markdownDescription": "The information to bind fields to data at runtime.", + "title": "BindingProperties" + }, + "Concat": { + "items": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValueProperty" + }, + "markdownDescription": "A list of form properties to concatenate to create the value to assign to this field property.", + "title": "Concat", + "type": "array" + }, "Value": { "markdownDescription": "The value to assign to the input field.", "title": "Value", @@ -4000,6 +4098,25 @@ }, "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties": { + "additionalProperties": false, + "properties": { + "Field": { + "markdownDescription": "The data field to bind the property to.", + "title": "Field", + "type": "string" + }, + "Property": { + "markdownDescription": "The form property to bind to the data field.", + "title": "Property", + "type": "string" + } + }, + "required": [ + "Property" + ], + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormStyle": { "additionalProperties": false, "properties": { @@ -4098,6 +4215,17 @@ "AWS::AmplifyUIBuilder::Form.ValueMappings": { "additionalProperties": false, "properties": { + "BindingProperties": { + "additionalProperties": false, + "markdownDescription": "The information to bind fields to data at runtime.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue" + } + }, + "title": "BindingProperties", + "type": "object" + }, "Values": { "items": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.ValueMapping" @@ -4190,10 +4318,6 @@ "type": "array" } }, - "required": [ - "Name", - "Values" - ], "type": "object" }, "Type": { @@ -4212,8 +4336,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -9110,7 +9233,7 @@ }, "Monitors": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Monitors" + "$ref": "#/definitions/AWS::AppConfig::Environment.Monitor" }, "markdownDescription": "Amazon CloudWatch alarms to monitor during the deployment process.", "title": "Monitors", @@ -9123,7 +9246,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Tags" + "$ref": "#/definitions/Tag" }, "markdownDescription": "Metadata to assign to the environment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define.", "title": "Tags", @@ -9157,152 +9280,147 @@ ], "type": "object" }, - "AWS::AppConfig::Environment.Monitors": { + "AWS::AppConfig::Environment.Monitor": { "additionalProperties": false, "properties": { "AlarmArn": { + "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", + "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { + "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", + "title": "AlarmRoleArn", "type": "string" } }, - "type": "object" - }, - "AWS::AppConfig::Environment.Tags": { - "additionalProperties": false, - "properties": { - "Key": { - "type": "string" - }, - "Value": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::AppConfig::Extension": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "Actions": { - "markdownDescription": "The actions defined in the extension.", - "title": "Actions", - "type": "object" - }, - "Description": { - "markdownDescription": "Information about the extension.", - "title": "Description", - "type": "string" - }, - "LatestVersionNumber": { - "markdownDescription": "You can omit this field when you create an extension. When you create a new version, specify the most recent current version number. For example, you create version 3, enter 2 for this field.", - "title": "LatestVersionNumber", - "type": "number" - }, - "Name": { - "markdownDescription": "A name for the extension. Each extension name in your account must be unique. Extension versions use the same name.", - "title": "Name", - "type": "string" - }, - "Parameters": { - "additionalProperties": false, - "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "$ref": "#/definitions/AWS::AppConfig::Extension.Parameter" - } - }, - "title": "Parameters", - "type": "object" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "Adds one or more tags for the specified extension. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "Actions", - "Name" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::AppConfig::Extension" - ], - "type": "string" - }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - } - }, - "required": [ - "Type", - "Properties" - ], - "type": "object" - }, - "AWS::AppConfig::Extension.Parameter": { - "additionalProperties": false, - "properties": { - "Description": { - "markdownDescription": "Information about the parameter.", - "title": "Description", - "type": "string" - }, - "Required": { - "markdownDescription": "A parameter value must be specified in the extension association.", - "title": "Required", - "type": "boolean" - } - }, "required": [ - "Required" + "AlarmArn" ], "type": "object" }, - "AWS::AppConfig::ExtensionAssociation": { + "AWS::AppConfig::Extension": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Actions": { + "markdownDescription": "The actions defined in the extension.", + "title": "Actions", + "type": "object" + }, + "Description": { + "markdownDescription": "Information about the extension.", + "title": "Description", + "type": "string" + }, + "LatestVersionNumber": { + "markdownDescription": "You can omit this field when you create an extension. When you create a new version, specify the most recent current version number. For example, you create version 3, enter 2 for this field.", + "title": "LatestVersionNumber", + "type": "number" + }, + "Name": { + "markdownDescription": "A name for the extension. Each extension name in your account must be unique. Extension versions use the same name.", + "title": "Name", + "type": "string" + }, + "Parameters": { + "additionalProperties": false, + "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AppConfig::Extension.Parameter" + } + }, + "title": "Parameters", + "type": "object" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Adds one or more tags for the specified extension. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Actions", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppConfig::Extension" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppConfig::Extension.Parameter": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "Information about the parameter.", + "title": "Description", + "type": "string" + }, + "Required": { + "markdownDescription": "A parameter value must be specified in the extension association.", + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "Required" + ], + "type": "object" + }, + "AWS::AppConfig::ExtensionAssociation": { "additionalProperties": false, "properties": { "Condition": { @@ -18526,6 +18644,11 @@ "title": "AtRestEncryptionEnabled", "type": "boolean" }, + "HealthMetricsConfig": { + "markdownDescription": "Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:\n\n- *NetworkBandwidthOutAllowanceExceeded* : The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.\n- *EngineCPUUtilization* : The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.\n\nMetrics will be recorded by API ID. You can set the value to `ENABLED` or `DISABLED` .", + "title": "HealthMetricsConfig", + "type": "string" + }, "TransitEncryptionEnabled": { "markdownDescription": "Transit encryption flag when connecting to cache. You cannot update this setting after creation.", "title": "TransitEncryptionEnabled", @@ -18721,6 +18844,11 @@ "markdownDescription": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", "title": "LambdaConfig" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", + "title": "MetricsConfig", + "type": "string" + }, "Name": { "markdownDescription": "Friendly name for you to identify your AppSync data source after creation.", "title": "Name", @@ -19406,6 +19534,16 @@ "title": "AuthenticationType", "type": "string" }, + "EnhancedMetricsConfig": { + "$ref": "#/definitions/AWS::AppSync::GraphQLApi.EnhancedMetricsConfig", + "markdownDescription": "Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.\n\nEnhanced metrics can be configured at the resolver, data source, and operation levels. For more information, see [Monitoring and logging](https://docs.aws.amazon.com//appsync/latest/devguide/monitoring.html#cw-metrics) in the *AWS AppSync User Guide* .", + "title": "EnhancedMetricsConfig" + }, + "EnvironmentVariables": { + "markdownDescription": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .\n\n*Pattern* : `^[A-Za-z]+\\\\w*$\\\\`\n\n*Minimum* : 2\n\n*Maximum* : 64", + "title": "EnvironmentVariables", + "type": "object" + }, "IntrospectionConfig": { "markdownDescription": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "title": "IntrospectionConfig", @@ -19552,6 +19690,32 @@ }, "type": "object" }, + "AWS::AppSync::GraphQLApi.EnhancedMetricsConfig": { + "additionalProperties": false, + "properties": { + "DataSourceLevelMetricsBehavior": { + "markdownDescription": "Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:\n\n- *Requests* : The number of invocations that occured during a request.\n- *Latency* : The time to complete a data source invocation.\n- *Errors* : The number of errors that occurred during a data source invocation.\n\nThese metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. `dataSourceLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_DATA_SOURCE_METRICS` : Records and emits metric data for all data sources in the request.\n- `PER_DATA_SOURCE_METRICS` : Records and emits metric data for data sources that have the `MetricsConfig` value set to `ENABLED` .", + "title": "DataSourceLevelMetricsBehavior", + "type": "string" + }, + "OperationLevelMetricsConfig": { + "markdownDescription": "Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:\n\n- *Requests* : The number of times a specified GraphQL operation was called.\n- *GraphQL errors* : The number of GraphQL errors that occurred during a specified GraphQL operation.\n\nMetrics will be recorded by API ID and operation name. You can set the value to `ENABLED` or `DISABLED` .", + "title": "OperationLevelMetricsConfig", + "type": "string" + }, + "ResolverLevelMetricsBehavior": { + "markdownDescription": "Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:\n\n- *GraphQL errors* : The number of GraphQL errors that occurred.\n- *Requests* : The number of invocations that occurred during a request.\n- *Latency* : The time to complete a resolver invocation.\n- *Cache hits* : The number of cache hits during a request.\n- *Cache misses* : The number of cache misses during a request.\n\nThese metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. `resolverLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_RESOLVER_METRICS` : Records and emits metric data for all resolvers in the request.\n- `PER_RESOLVER_METRICS` : Records and emits metric data for resolvers that have the `MetricsConfig` value set to `ENABLED` .", + "title": "ResolverLevelMetricsBehavior", + "type": "string" + } + }, + "required": [ + "DataSourceLevelMetricsBehavior", + "OperationLevelMetricsConfig", + "ResolverLevelMetricsBehavior" + ], + "type": "object" + }, "AWS::AppSync::GraphQLApi.LambdaAuthorizerConfig": { "additionalProperties": false, "properties": { @@ -19798,6 +19962,11 @@ "title": "MaxBatchSize", "type": "number" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced resolver metrics for specified resolvers. Note that `MetricsConfig` won't be used unless the `resolverLevelMetricsBehavior` value is set to `PER_RESOLVER_METRICS` . If the `resolverLevelMetricsBehavior` is set to `FULL_REQUEST_RESOLVER_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.", + "title": "MetricsConfig", + "type": "string" + }, "PipelineConfig": { "$ref": "#/definitions/AWS::AppSync::Resolver.PipelineConfig", "markdownDescription": "Functions linked with the pipeline resolver.", @@ -20093,7 +20262,7 @@ "type": "number" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20279,7 +20448,7 @@ "type": "string" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -22424,6 +22593,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::AutoScaling::AutoScalingGroup.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", @@ -24724,9 +24898,6 @@ "title": "MappingTemplate", "type": "string" }, - "ModifiedAt": { - "type": "string" - }, "Name": { "markdownDescription": "Returns the descriptive name for the transformer.", "title": "Name", @@ -25040,6 +25211,11 @@ "markdownDescription": "Specifies the number of days after creation that a recovery point is moved to cold storage.", "title": "MoveToColdStorageAfterDays", "type": "number" + }, + "OptInToArchiveForSupportedResources": { + "markdownDescription": "", + "title": "OptInToArchiveForSupportedResources", + "type": "boolean" } }, "type": "object" @@ -26472,6 +26648,11 @@ "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", + "title": "EcsProperties" + }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", @@ -26488,13 +26669,7 @@ "title": "NodeProperties" }, "Parameters": { - "additionalProperties": true, "markdownDescription": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Parameters", "type": "object" }, @@ -26522,18 +26697,12 @@ "type": "number" }, "Tags": { - "additionalProperties": true, "markdownDescription": "The tags that are applied to the job definition.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Tags", "type": "object" }, "Timeout": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.JobTimeout", + "$ref": "#/definitions/AWS::Batch::JobDefinition.Timeout", "markdownDescription": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", "title": "Timeout" }, @@ -26569,6 +26738,22 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.AuthorizationConfig": { + "additionalProperties": false, + "properties": { + "AccessPointId": { + "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", + "title": "AccessPointId", + "type": "string" + }, + "Iam": { + "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", + "title": "Iam", + "type": "string" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.ContainerProperties": { "additionalProperties": false, "properties": { @@ -26635,7 +26820,7 @@ }, "MountPoints": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoint" + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" }, "markdownDescription": "The mount points for data volumes in your container. This parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", "title": "MountPoints", @@ -26656,6 +26841,11 @@ "title": "ReadonlyRootFilesystem", "type": "boolean" }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, "ResourceRequirements": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" @@ -26697,7 +26887,7 @@ }, "Volumes": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Volume" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" }, "markdownDescription": "A list of data volumes used in a job.", "title": "Volumes", @@ -26733,34 +26923,111 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSAuthorizationConfig": { + "AWS::Batch::JobDefinition.EcsProperties": { "additionalProperties": false, "properties": { - "AccessPointId": { + "TaskProperties": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" + }, + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "title": "TaskProperties", + "type": "array" + } + }, + "required": [ + "TaskProperties" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.EcsTaskProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerProperties" + }, + "markdownDescription": "This object is a list of containers.", + "title": "Containers", + "type": "array" + }, + "EphemeralStorage": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EphemeralStorage", + "markdownDescription": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "title": "EphemeralStorage" + }, + "ExecutionRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "title": "ExecutionRoleArn", "type": "string" }, - "Iam": { + "IpcMode": { + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "title": "IpcMode", + "type": "string" + }, + "NetworkConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "title": "NetworkConfiguration" + }, + "PidMode": { + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "title": "PidMode", + "type": "string" + }, + "PlatformVersion": { + "markdownDescription": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "PlatformVersion", + "type": "string" + }, + "RuntimePlatform": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RuntimePlatform", + "markdownDescription": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "title": "RuntimePlatform" + }, + "TaskRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "title": "TaskRoleArn", "type": "string" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" + }, + "markdownDescription": "A list of volumes that are associated with the job.", + "title": "Volumes", + "type": "array" } }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSVolumeConfiguration": { + "AWS::Batch::JobDefinition.EfsVolumeConfiguration": { "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSAuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", + "title": "AuthorizationConfig" }, "FileSystemId": { + "markdownDescription": "The Amazon EFS file system ID to use.", + "title": "FileSystemId", "type": "string" }, "RootDirectory": { + "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", + "title": "RootDirectory", "type": "string" }, "TransitEncryption": { + "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { + "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryptionPort", "type": "number" } }, @@ -26858,24 +27125,12 @@ "additionalProperties": false, "properties": { "Limits": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to reserve for the container. The values vary based on the `name` that's specified. Resources can be requested using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> To maximize your resource utilization, provide your jobs with as much memory as possible for the specific instance type that you are using. To learn how, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that's reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both places, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that's reserved for the container. Values must be a whole integer. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Limits", "type": "object" }, "Requests": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to request for the container. The values vary based on the `name` that's specified. Resources can be requested by using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that are reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that are reserved for the container. Values must be a whole integer. `nvidia.com/gpu` can be specified in `limits` , `requests` , or both. If `nvidia.com/gpu` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Requests", "type": "object" } @@ -26961,56 +27216,11 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EksMetadata": { - "additionalProperties": false, - "properties": { - "Labels": { - "additionalProperties": true, - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.EksPodProperties": { - "additionalProperties": false, - "properties": { - "Containers": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" - }, - "type": "array" - }, - "DnsPolicy": { - "type": "string" - }, - "HostNetwork": { - "type": "boolean" - }, - "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksMetadata" - }, - "ServiceAccountName": { - "type": "string" - }, - "Volumes": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.EksProperties": { "additionalProperties": false, "properties": { "PodProperties": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksPodProperties", + "$ref": "#/definitions/AWS::Batch::JobDefinition.PodProperties", "markdownDescription": "The properties for the Kubernetes pod resources of a job.", "title": "PodProperties" } @@ -27135,24 +27345,6 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.Host": { - "additionalProperties": false, - "properties": { - "SourcePath": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.JobTimeout": { - "additionalProperties": false, - "properties": { - "AttemptDurationSeconds": { - "type": "number" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -27204,13 +27396,7 @@ "type": "string" }, "Options": { - "additionalProperties": true, "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Options", "type": "object" }, @@ -27228,16 +27414,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.MountPoint": { + "AWS::Batch::JobDefinition.Metadata": { + "additionalProperties": false, + "properties": { + "Labels": { + "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", + "title": "Labels", + "type": "object" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.MountPoints": { "additionalProperties": false, "properties": { "ContainerPath": { + "markdownDescription": "The path on the container where the host volume is mounted.", + "title": "ContainerPath", "type": "string" }, "ReadOnly": { + "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", + "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { + "markdownDescription": "The name of the volume to mount.", + "title": "SourceVolume", "type": "string" } }, @@ -27291,6 +27494,19 @@ "markdownDescription": "The container details for the node range.", "title": "Container" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "title": "EcsProperties" + }, + "InstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", + "title": "InstanceTypes", + "type": "array" + }, "TargetNodes": { "markdownDescription": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties.", "title": "TargetNodes", @@ -27302,6 +27518,75 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.PodProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "title": "Containers", + "type": "array" + }, + "DnsPolicy": { + "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "title": "DnsPolicy", + "type": "string" + }, + "HostNetwork": { + "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "title": "HostNetwork", + "type": "boolean" + }, + "InitContainers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "title": "InitContainers", + "type": "array" + }, + "Metadata": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", + "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "title": "Metadata" + }, + "ServiceAccountName": { + "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "title": "ServiceAccountName", + "type": "string" + }, + "ShareProcessNamespace": { + "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", + "title": "ShareProcessNamespace", + "type": "boolean" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" + }, + "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", + "title": "Volumes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.RepositoryCredentials": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials.", + "title": "CredentialsParameter", + "type": "string" + } + }, + "required": [ + "CredentialsParameter" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.ResourceRequirement": { "additionalProperties": false, "properties": { @@ -27373,6 +27658,147 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.TaskContainerDependency": { + "additionalProperties": false, + "properties": { + "Condition": { + "markdownDescription": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "title": "Condition", + "type": "string" + }, + "ContainerName": { + "markdownDescription": "A unique identifier for the container.", + "title": "ContainerName", + "type": "string" + } + }, + "required": [ + "Condition", + "ContainerName" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.TaskContainerProperties": { + "additionalProperties": false, + "properties": { + "Command": { + "items": { + "type": "string" + }, + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "title": "Command", + "type": "array" + }, + "DependsOn": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerDependency" + }, + "markdownDescription": "A list of containers that this container depends on.", + "title": "DependsOn", + "type": "array" + }, + "Environment": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Environment" + }, + "markdownDescription": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "title": "Environment", + "type": "array" + }, + "Essential": { + "markdownDescription": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "Essential", + "type": "boolean" + }, + "Image": { + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "title": "Image", + "type": "string" + }, + "LinuxParameters": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LinuxParameters", + "markdownDescription": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "title": "LinuxParameters" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "LogConfiguration" + }, + "MountPoints": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" + }, + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "title": "MountPoints", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "title": "Name", + "type": "string" + }, + "Privileged": { + "markdownDescription": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "title": "Privileged", + "type": "boolean" + }, + "ReadonlyRootFilesystem": { + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "title": "ReadonlyRootFilesystem", + "type": "boolean" + }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, + "ResourceRequirements": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" + }, + "markdownDescription": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "title": "ResourceRequirements", + "type": "array" + }, + "Secrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Secret" + }, + "markdownDescription": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "title": "Secrets", + "type": "array" + }, + "Ulimits": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Ulimit" + }, + "markdownDescription": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "title": "Ulimits", + "type": "array" + }, + "User": { + "markdownDescription": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Image" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.Timeout": { + "additionalProperties": false, + "properties": { + "AttemptDurationSeconds": { + "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", + "title": "AttemptDurationSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.Tmpfs": { "additionalProperties": false, "properties": { @@ -27427,16 +27853,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.Volume": { + "AWS::Batch::JobDefinition.Volumes": { "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", + "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", + "title": "EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", + "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", + "title": "Host" }, "Name": { + "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", + "title": "Name", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.VolumesHost": { + "additionalProperties": false, + "properties": { + "SourcePath": { + "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", + "title": "SourcePath", "type": "string" } }, @@ -29482,6 +29925,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AutoScalingSpecifications": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSpecification", + "markdownDescription": "The optional auto scaling capacity settings for a table in provisioned capacity mode.", + "title": "AutoScalingSpecifications" + }, "BillingMode": { "$ref": "#/definitions/AWS::Cassandra::Table.BillingMode", "markdownDescription": "The billing mode for the table, which determines how you'll be charged for reads and writes:\n\n- *On-demand mode* (default) - You pay based on the actual reads and writes your application performs.\n- *Provisioned mode* - Lets you specify the number of reads and writes per second that you need for your application.\n\nIf you don't specify a value for this property, then the table will use on-demand mode.", @@ -29536,6 +29984,14 @@ "title": "RegularColumns", "type": "array" }, + "ReplicaSpecifications": { + "items": { + "$ref": "#/definitions/AWS::Cassandra::Table.ReplicaSpecification" + }, + "markdownDescription": "The AWS Region specific settings of a multi-Region table.\n\nFor a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters.\n\n- `region` : The Region where these settings are applied. (Required)\n- `readCapacityUnits` : The provisioned read capacity units. (Optional)\n- `readCapacityAutoScaling` : The read capacity auto scaling settings for the table. (Optional)", + "title": "ReplicaSpecifications", + "type": "array" + }, "TableName": { "markdownDescription": "The name of the table to be created. The table name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you can't perform updates that require replacing this resource. You can perform updates that require no interruption or some interruption. If you must replace the resource, specify a new name. \n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "title": "TableName", @@ -29577,6 +30033,48 @@ ], "type": "object" }, + "AWS::Cassandra::Table.AutoScalingSetting": { + "additionalProperties": false, + "properties": { + "AutoScalingDisabled": { + "markdownDescription": "This optional parameter enables auto scaling for the table if set to `false` .", + "title": "AutoScalingDisabled", + "type": "boolean" + }, + "MaximumUnits": { + "markdownDescription": "Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MaximumUnits", + "type": "number" + }, + "MinimumUnits": { + "markdownDescription": "The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MinimumUnits", + "type": "number" + }, + "ScalingPolicy": { + "$ref": "#/definitions/AWS::Cassandra::Table.ScalingPolicy", + "markdownDescription": "Amazon Keyspaces supports the `target tracking` auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.", + "title": "ScalingPolicy" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.AutoScalingSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's read capacity.", + "title": "ReadCapacityAutoScaling" + }, + "WriteCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's write capacity.", + "title": "WriteCapacityAutoScaling" + } + }, + "type": "object" + }, "AWS::Cassandra::Table.BillingMode": { "additionalProperties": false, "properties": { @@ -29674,6 +30172,70 @@ ], "type": "object" }, + "AWS::Cassandra::Table.ReplicaSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The read capacity auto scaling settings for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityAutoScaling" + }, + "ReadCapacityUnits": { + "markdownDescription": "The provisioned read capacity units for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityUnits", + "type": "number" + }, + "Region": { + "markdownDescription": "The AWS Region.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "Region" + ], + "type": "object" + }, + "AWS::Cassandra::Table.ScalingPolicy": { + "additionalProperties": false, + "properties": { + "TargetTrackingScalingPolicyConfiguration": { + "$ref": "#/definitions/AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration", + "markdownDescription": "The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity.", + "title": "TargetTrackingScalingPolicyConfiguration" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration": { + "additionalProperties": false, + "properties": { + "DisableScaleIn": { + "markdownDescription": "Specifies if `scale-in` is enabled.\n\nWhen auto scaling automatically decreases capacity for a table, the table *scales in* . When scaling policies are set, they can't scale in the table lower than its minimum capacity.", + "title": "DisableScaleIn", + "type": "boolean" + }, + "ScaleInCooldown": { + "markdownDescription": "Specifies a `scale-in` cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleInCooldown", + "type": "number" + }, + "ScaleOutCooldown": { + "markdownDescription": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleOutCooldown", + "type": "number" + }, + "TargetValue": { + "markdownDescription": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90.", + "title": "TargetValue", + "type": "number" + } + }, + "required": [ + "TargetValue" + ], + "type": "object" + }, "AWS::CertificateManager::Account": { "additionalProperties": false, "properties": { @@ -33288,7 +33850,7 @@ "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15.", + "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution.", "title": "Weight", "type": "number" } @@ -36246,7 +36808,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36569,7 +37131,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -37948,6 +38510,92 @@ ], "type": "object" }, + "AWS::CodeBuild::Fleet": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaseCapacity": { + "markdownDescription": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", + "title": "BaseCapacity", + "type": "number" + }, + "ComputeType": { + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "title": "ComputeType", + "type": "string" + }, + "EnvironmentType": { + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "title": "EnvironmentType", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the compute fleet.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeBuild::Fleet" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::CodeBuild::Project": { "additionalProperties": false, "properties": { @@ -38282,6 +38930,9 @@ "title": "EnvironmentVariables", "type": "array" }, + "Fleet": { + "$ref": "#/definitions/AWS::CodeBuild::Project.ProjectFleet" + }, "Image": { "markdownDescription": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", "title": "Image", @@ -38309,7 +38960,9 @@ } }, "required": [ - "Image" + "ComputeType", + "Image", + "Type" ], "type": "object" }, @@ -40530,6 +41183,11 @@ "title": "DisableInboundStageTransitions", "type": "array" }, + "ExecutionMode": { + "markdownDescription": "The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.", + "title": "ExecutionMode", + "type": "string" + }, "Name": { "markdownDescription": "The name of the pipeline.", "title": "Name", @@ -40788,9 +41446,39 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitBranchFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitConfiguration": { "additionalProperties": false, "properties": { + "PullRequest": { + "items": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPullRequestFilter" + }, + "markdownDescription": "The field where the repository event that will start the pipeline is specified as pull requests.", + "title": "PullRequest", + "type": "array" + }, "Push": { "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" @@ -40810,9 +41498,65 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::CodePipeline::Pipeline.GitPullRequestFilter": { + "additionalProperties": false, + "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the pull request trigger configuration.", + "title": "Branches" + }, + "Events": { + "items": { + "type": "string" + }, + "markdownDescription": "The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.", + "title": "Events", + "type": "array" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the pull request trigger configuration.", + "title": "FilePaths" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitPushFilter": { "additionalProperties": false, "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the push trigger configuration.", + "title": "Branches" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the push trigger configuration.", + "title": "FilePaths" + }, "Tags": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitTagFilterCriteria", "markdownDescription": "The field that contains the details for the Git tags trigger configuration.", @@ -41819,6 +42563,10 @@ "type": "boolean" } }, + "required": [ + "ClientId", + "ProviderName" + ], "type": "object" }, "AWS::Cognito::IdentityPool.CognitoStreams": { @@ -41985,12 +42733,24 @@ "type": "string" }, "RoleMappings": { + "additionalProperties": false, "markdownDescription": "How users for a specific identity provider are mapped to roles. This is a string to the `RoleMapping` object map. The string identifies the identity provider. For example: `graph.facebook.com` or `cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id` .\n\nIf the `IdentityProvider` field isn't provided in this object, the string is used as the identity provider name.\n\nFor more information, see the [RoleMapping property](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-identitypoolroleattachment-rolemapping.html) .", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::Cognito::IdentityPoolRoleAttachment.RoleMapping" + } + }, "title": "RoleMappings", "type": "object" }, "Roles": { + "additionalProperties": true, "markdownDescription": "The map of the roles associated with this pool. For a given role, the key is either \"authenticated\" or \"unauthenticated\". The value is the role ARN.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, "title": "Roles", "type": "object" } @@ -48343,6 +49103,22 @@ "title": "AssignContactCategoryActions", "type": "array" }, + "CreateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.CreateCaseAction" + }, + "markdownDescription": "", + "title": "CreateCaseActions", + "type": "array" + }, + "EndAssociatedTasksActions": { + "items": { + "type": "object" + }, + "markdownDescription": "", + "title": "EndAssociatedTasksActions", + "type": "array" + }, "EventBridgeActions": { "items": { "$ref": "#/definitions/AWS::Connect::Rule.EventBridgeAction" @@ -48366,8 +49142,39 @@ "markdownDescription": "Information about the task action. This field is required if `TriggerEventSource` is one of the following values: `OnZendeskTicketCreate` | `OnZendeskTicketStatusUpdate` | `OnSalesforceCaseCreate`", "title": "TaskActions", "type": "array" + }, + "UpdateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.UpdateCaseAction" + }, + "markdownDescription": "", + "title": "UpdateCaseActions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Connect::Rule.CreateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + }, + "TemplateId": { + "markdownDescription": "", + "title": "TemplateId", + "type": "string" } }, + "required": [ + "Fields", + "TemplateId" + ], "type": "object" }, "AWS::Connect::Rule.EventBridgeAction": { @@ -48384,6 +49191,52 @@ ], "type": "object" }, + "AWS::Connect::Rule.Field": { + "additionalProperties": false, + "properties": { + "Id": { + "markdownDescription": "", + "title": "Id", + "type": "string" + }, + "Value": { + "$ref": "#/definitions/AWS::Connect::Rule.FieldValue", + "markdownDescription": "", + "title": "Value" + } + }, + "required": [ + "Id", + "Value" + ], + "type": "object" + }, + "AWS::Connect::Rule.FieldValue": { + "additionalProperties": false, + "properties": { + "BooleanValue": { + "markdownDescription": "", + "title": "BooleanValue", + "type": "boolean" + }, + "DoubleValue": { + "markdownDescription": "", + "title": "DoubleValue", + "type": "number" + }, + "EmptyValue": { + "markdownDescription": "", + "title": "EmptyValue", + "type": "object" + }, + "StringValue": { + "markdownDescription": "", + "title": "StringValue", + "type": "string" + } + }, + "type": "object" + }, "AWS::Connect::Rule.NotificationRecipientType": { "additionalProperties": false, "properties": { @@ -48521,6 +49374,23 @@ ], "type": "object" }, + "AWS::Connect::Rule.UpdateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + } + }, + "required": [ + "Fields" + ], + "type": "object" + }, "AWS::Connect::SecurityKey": { "additionalProperties": false, "properties": { @@ -49736,7 +50606,7 @@ ], "type": "object" }, - "AWS::ControlTower::EnabledControl": { + "AWS::ControlTower::EnabledBaseline": { "additionalProperties": false, "properties": { "Condition": { @@ -49771,34 +50641,48 @@ "Properties": { "additionalProperties": false, "properties": { - "ControlIdentifier": { - "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", - "title": "ControlIdentifier", + "BaselineIdentifier": { + "markdownDescription": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", + "title": "BaselineIdentifier", + "type": "string" + }, + "BaselineVersion": { + "markdownDescription": "The enabled version of the `Baseline` .", + "title": "BaselineVersion", "type": "string" }, "Parameters": { "items": { - "$ref": "#/definitions/AWS::ControlTower::EnabledControl.EnabledControlParameter" + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline.Parameter" }, - "markdownDescription": "Array of `EnabledControlParameter` objects.", + "markdownDescription": "Parameters that are applied when enabling this `Baseline` . These parameters configure the behavior of the baseline.", "title": "Parameters", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags associated with input to `EnableBaseline` .", + "title": "Tags", + "type": "array" + }, "TargetIdentifier": { - "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "markdownDescription": "The target on which to enable the `Baseline` .", "title": "TargetIdentifier", "type": "string" } }, "required": [ - "ControlIdentifier", + "BaselineIdentifier", + "BaselineVersion", "TargetIdentifier" ], "type": "object" }, "Type": { "enum": [ - "AWS::ControlTower::EnabledControl" + "AWS::ControlTower::EnabledBaseline" ], "type": "string" }, @@ -49817,27 +50701,132 @@ ], "type": "object" }, - "AWS::ControlTower::EnabledControl.EnabledControlParameter": { + "AWS::ControlTower::EnabledBaseline.Parameter": { "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key of a key/value pair. It is of type `string` .", + "markdownDescription": "A string denoting the parameter key.", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The value of a key/value pair. It can be of type `array` , `string` , `number` , `object` , or `boolean` . [Note: The *Type* field that follows may show a single type such as Number, which is only one possible type.]", + "markdownDescription": "A low-level `Document` object of any type (for example, a Java Object).", "title": "Value", "type": "object" } }, - "required": [ - "Key", - "Value" - ], "type": "object" }, - "AWS::ControlTower::LandingZone": { + "AWS::ControlTower::EnabledControl": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ControlIdentifier": { + "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "title": "ControlIdentifier", + "type": "string" + }, + "Parameters": { + "items": { + "$ref": "#/definitions/AWS::ControlTower::EnabledControl.EnabledControlParameter" + }, + "markdownDescription": "Array of `EnabledControlParameter` objects.", + "title": "Parameters", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags to be applied to the enabled control.", + "title": "Tags", + "type": "array" + }, + "TargetIdentifier": { + "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "title": "TargetIdentifier", + "type": "string" + } + }, + "required": [ + "ControlIdentifier", + "TargetIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ControlTower::EnabledControl" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::ControlTower::EnabledControl.EnabledControlParameter": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key of a key/value pair. It is of type `string` .", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of a key/value pair. It can be of type `array` , `string` , `number` , `object` , or `boolean` . [Note: The *Type* field that follows may show a single type such as Number, which is only one possible type.]", + "title": "Value", + "type": "object" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::ControlTower::LandingZone": { "additionalProperties": false, "properties": { "Condition": { @@ -50199,6 +51188,7 @@ } }, "required": [ + "DefaultExpirationDays", "DomainName" ], "type": "object" @@ -51193,7 +52183,9 @@ } }, "required": [ - "DomainName" + "Description", + "DomainName", + "ObjectTypeName" ], "type": "object" }, @@ -59605,7 +60597,7 @@ }, "type": "object" }, - "AWS::Detective::Graph": { + "AWS::DataZone::DataSource": { "additionalProperties": false, "properties": { "Condition": { @@ -59640,25 +60632,82 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" - }, - "Tags": { + "AssetFormsInput": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::DataZone::DataSource.FormInput" }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", + "markdownDescription": "The metadata forms attached to the assets that the data source works with.", + "title": "AssetFormsInput", "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.DataSourceConfigurationInput", + "markdownDescription": "The configuration of the data source.", + "title": "Configuration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain where the data source is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnableSetting": { + "markdownDescription": "Specifies whether the data source is enabled.", + "title": "EnableSetting", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The unique identifier of the Amazon DataZone environment to which the data source publishes assets.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which you want to add this data source.", + "title": "ProjectIdentifier", + "type": "string" + }, + "PublishOnImport": { + "markdownDescription": "Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog.", + "title": "PublishOnImport", + "type": "boolean" + }, + "Recommendation": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RecommendationConfiguration", + "markdownDescription": "Specifies whether the business name generation is to be enabled for this data source.", + "title": "Recommendation" + }, + "Schedule": { + "$ref": "#/definitions/AWS::DataZone::DataSource.ScheduleConfiguration", + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule" + }, + "Type": { + "markdownDescription": "The type of the data source.", + "title": "Type", + "type": "string" } }, + "required": [ + "DomainIdentifier", + "EnvironmentIdentifier", + "Name", + "ProjectIdentifier", + "Type" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::DataZone::DataSource" ], "type": "string" }, @@ -59672,11 +60721,1027 @@ } }, "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.DataSourceConfigurationInput": { + "additionalProperties": false, + "properties": { + "GlueRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.GlueRunConfigurationInput", + "markdownDescription": "The configuration of the AWS Glue data source.", + "title": "GlueRunConfiguration" + }, + "RedshiftRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftRunConfigurationInput", + "markdownDescription": "The configuration of the Amazon Redshift data source.", + "title": "RedshiftRunConfiguration" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.FilterExpression": { + "additionalProperties": false, + "properties": { + "Expression": { + "markdownDescription": "The search filter expression.", + "title": "Expression", + "type": "string" + }, + "Type": { + "markdownDescription": "The search filter explresison type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Expression", "Type" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::DataZone::DataSource.FormInput": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the metadata form.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The name of the metadata form.", + "title": "FormName", + "type": "string" + }, + "TypeIdentifier": { + "markdownDescription": "The ID of the metadata form type.", + "title": "TypeIdentifier", + "type": "string" + }, + "TypeRevision": { + "markdownDescription": "The revision of the metadata form type.", + "title": "TypeRevision", + "type": "string" + } + }, + "required": [ + "FormName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.GlueRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RecommendationConfiguration": { + "additionalProperties": false, + "properties": { + "EnableBusinessNameGeneration": { + "markdownDescription": "Specifies whether automatic business name generation is to be enabled or not as part of the recommendation configuration.", + "title": "EnableBusinessNameGeneration", + "type": "boolean" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftClusterStorage": { + "additionalProperties": false, + "properties": { + "ClusterName": { + "markdownDescription": "The name of an Amazon Redshift cluster.", + "title": "ClusterName", + "type": "string" + } + }, + "required": [ + "ClusterName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftCredentialConfiguration": { + "additionalProperties": false, + "properties": { + "SecretManagerArn": { + "markdownDescription": "The ARN of a secret manager for an Amazon Redshift cluster.", + "title": "SecretManagerArn", + "type": "string" + } + }, + "required": [ + "SecretManagerArn" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the Amazon Redshift data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RedshiftCredentialConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftCredentialConfiguration", + "markdownDescription": "The details of the credentials required to access an Amazon Redshift cluster.", + "title": "RedshiftCredentialConfiguration" + }, + "RedshiftStorage": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftStorage", + "markdownDescription": "The details of the Amazon Redshift storage as part of the configuration of an Amazon Redshift data source run.", + "title": "RedshiftStorage" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RedshiftCredentialConfiguration", + "RedshiftStorage", + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftServerlessStorage": { + "additionalProperties": false, + "properties": { + "WorkgroupName": { + "markdownDescription": "The name of the Amazon Redshift Serverless workgroup.", + "title": "WorkgroupName", + "type": "string" + } + }, + "required": [ + "WorkgroupName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftStorage": { + "additionalProperties": false, + "properties": { + "RedshiftClusterSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftClusterStorage", + "markdownDescription": "The details of the Amazon Redshift cluster source.", + "title": "RedshiftClusterSource" + }, + "RedshiftServerlessSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftServerlessStorage", + "markdownDescription": "The details of the Amazon Redshift Serverless workgroup source.", + "title": "RedshiftServerlessSource" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RelationalFilterConfiguration": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The database name specified in the relational filter configuration for the data source.", + "title": "DatabaseName", + "type": "string" + }, + "FilterExpressions": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.FilterExpression" + }, + "markdownDescription": "The filter expressions specified in the relational filter configuration for the data source.", + "title": "FilterExpressions", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The schema name specified in the relational filter configuration for the data source.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "DatabaseName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.ScheduleConfiguration": { + "additionalProperties": false, + "properties": { + "Schedule": { + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule", + "type": "string" + }, + "Timezone": { + "markdownDescription": "The timezone of the data source run.", + "title": "Timezone", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Domain": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the Amazon DataZone domain.", + "title": "Description", + "type": "string" + }, + "DomainExecutionRole": { + "markdownDescription": "The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the AWS account that houses the Amazon DataZone domain.", + "title": "DomainExecutionRole", + "type": "string" + }, + "KmsKeyIdentifier": { + "markdownDescription": "The identifier of the AWS Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data.", + "title": "KmsKeyIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone domain.", + "title": "Name", + "type": "string" + }, + "SingleSignOn": { + "$ref": "#/definitions/AWS::DataZone::Domain.SingleSignOn", + "markdownDescription": "The single sign-on details in Amazon DataZone.", + "title": "SingleSignOn" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags specified for the Amazon DataZone domain.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainExecutionRole", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Domain" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Domain.SingleSignOn": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of single sign-on in Amazon DataZone.", + "title": "Type", + "type": "string" + }, + "UserAssignment": { + "markdownDescription": "The single sign-on user assignment in Amazon DataZone.", + "title": "UserAssignment", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Environment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the environment.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentProfileIdentifier": { + "markdownDescription": "The identifier of the environment profile that is used to create this Amazon DataZone environment.", + "title": "EnvironmentProfileIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone environment.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone environment.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which this environment is created.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::Environment.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnvironmentProfileIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Environment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Environment.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment parameter.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which an environment blueprint exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnabledRegions": { + "items": { + "type": "string" + }, + "markdownDescription": "The enabled AWS Regions specified in a blueprint configuration.", + "title": "EnabledRegions", + "type": "array" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of the environment blueprint.\n\nIn the current release, only the following values are supported: `DefaultDataLake` and `DefaultDataWarehouse` .", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "ManageAccessRoleArn": { + "markdownDescription": "The ARN of the manage access role.", + "title": "ManageAccessRoleArn", + "type": "string" + }, + "ProvisioningRoleArn": { + "markdownDescription": "The ARN of the provisioning role.", + "title": "ProvisioningRoleArn", + "type": "string" + }, + "RegionalParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter" + }, + "markdownDescription": "The regional parameters of the environment blueprint.", + "title": "RegionalParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnabledRegions", + "EnvironmentBlueprintIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentBlueprintConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter": { + "additionalProperties": false, + "properties": { + "Parameters": { + "additionalProperties": true, + "markdownDescription": "A string to string map containing parameters for the region.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Parameters", + "type": "object" + }, + "Region": { + "markdownDescription": "The region specified in the environment parameter.", + "title": "Region", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "markdownDescription": "The identifier of an AWS account in which an environment profile exists.", + "title": "AwsAccountId", + "type": "string" + }, + "AwsAccountRegion": { + "markdownDescription": "The AWS Region in which an environment profile exists.", + "title": "AwsAccountRegion", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the environment profile.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment profile exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of a blueprint with which an environment profile is created.", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the environment profile.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of a project in which an environment profile exists.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment profile.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "AwsAccountId", + "AwsAccountRegion", + "DomainIdentifier", + "EnvironmentBlueprintIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name specified in the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment profile.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Project": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a project.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of a Amazon DataZone domain where the project exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone project.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a project.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "DomainIdentifier", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Project" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicableAssetTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The asset types included in the subscription target.", + "title": "ApplicableAssetTypes", + "type": "array" + }, + "AuthorizedPrincipals": { + "items": { + "type": "string" + }, + "markdownDescription": "The authorized principals included in the subscription target.", + "title": "AuthorizedPrincipals", + "type": "array" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain in which subscription target is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The ID of the environment in which subscription target is created.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "ManageAccessRole": { + "markdownDescription": "The manage access role that is used to create the subscription target.", + "title": "ManageAccessRole", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the subscription target.", + "title": "Name", + "type": "string" + }, + "Provider": { + "markdownDescription": "The provider of the subscription target.", + "title": "Provider", + "type": "string" + }, + "SubscriptionTargetConfig": { + "items": { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm" + }, + "markdownDescription": "The configuration of the subscription target.", + "title": "SubscriptionTargetConfig", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of the subscription target.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "ApplicableAssetTypes", + "AuthorizedPrincipals", + "DomainIdentifier", + "EnvironmentIdentifier", + "ManageAccessRole", + "Name", + "SubscriptionTargetConfig", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::SubscriptionTarget" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the subscription target configuration.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The form name included in the subscription target configuration.", + "title": "FormName", + "type": "string" + } + }, + "required": [ + "Content", + "FormName" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { "additionalProperties": false, "properties": { "Condition": { @@ -61344,6 +63409,11 @@ "AWS::DynamoDB::GlobalTable.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.", "title": "StreamArn", @@ -61943,6 +64013,11 @@ "AWS::DynamoDB::Table.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.\n\nLength Constraints: Minimum length of 37. Maximum length of 1024.", "title": "StreamArn", @@ -63655,6 +65730,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::EC2Fleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -64341,7 +66421,7 @@ "type": "string" }, "DeliverLogsPermissionArn": { - "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", + "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", "title": "DeliverLogsPermissionArn", "type": "string" }, @@ -66560,7 +68640,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -66756,6 +68836,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.MemoryGiBPerVCpu", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -69688,6 +71773,11 @@ "title": "DeviceIndex", "type": "string" }, + "EnaSrdSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification", + "markdownDescription": "Configures ENA Express for the network interface that this action attaches to the instance.", + "title": "EnaSrdSpecification" + }, "InstanceId": { "markdownDescription": "The ID of the instance to which you will attach the ENI.", "title": "InstanceId", @@ -69727,6 +71817,33 @@ ], "type": "object" }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdEnabled": { + "markdownDescription": "Indicates whether ENA Express is enabled for the network interface.", + "title": "EnaSrdEnabled", + "type": "boolean" + }, + "EnaSrdUdpSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification", + "markdownDescription": "Configures ENA Express for UDP network traffic.", + "title": "EnaSrdUdpSpecification" + } + }, + "type": "object" + }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdUdpEnabled": { + "markdownDescription": "Indicates whether UDP traffic to and from the instance uses ENA Express. To specify this setting, you must first enable ENA Express.", + "title": "EnaSrdUdpEnabled", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::EC2::NetworkInterfacePermission": { "additionalProperties": false, "properties": { @@ -70041,7 +72158,6 @@ }, "required": [ "AddressFamily", - "MaxEntries", "PrefixListName" ], "type": "object" @@ -70403,12 +72519,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70418,12 +72534,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70452,12 +72568,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70543,12 +72659,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70558,12 +72674,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70651,12 +72767,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -71008,7 +73124,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -71245,6 +73361,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::SpotFleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -75796,7 +77917,7 @@ "type": "string" }, "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "title": "Encrypted", "type": "boolean" }, @@ -75844,7 +77965,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`", + "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`", "title": "VolumeType", "type": "string" } @@ -76967,7 +79088,7 @@ "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", "title": "Weight", "type": "number" } @@ -77501,7 +79622,7 @@ "additionalProperties": false, "properties": { "Field": { - "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "title": "Field", "type": "string" }, @@ -77612,6 +79733,16 @@ "markdownDescription": "The `portName` must match the name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service.", "title": "PortName", "type": "string" + }, + "Timeout": { + "$ref": "#/definitions/AWS::ECS::Service.TimeoutConfiguration", + "markdownDescription": "A reference to an object that represents the configured timeouts for Service Connect.", + "title": "Timeout" + }, + "Tls": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsConfiguration", + "markdownDescription": "A reference to an object that represents a Transport Layer Security (TLS) configuration.", + "title": "Tls" } }, "required": [ @@ -77619,6 +79750,41 @@ ], "type": "object" }, + "AWS::ECS::Service.ServiceConnectTlsCertificateAuthority": { + "additionalProperties": false, + "properties": { + "AwsPcaAuthorityArn": { + "markdownDescription": "The ARN of the AWS Private Certificate Authority certificate.", + "title": "AwsPcaAuthorityArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Service.ServiceConnectTlsConfiguration": { + "additionalProperties": false, + "properties": { + "IssuerCertificateAuthority": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsCertificateAuthority", + "markdownDescription": "The signer certificate authority.", + "title": "IssuerCertificateAuthority" + }, + "KmsKey": { + "markdownDescription": "The AWS Key Management Service key.", + "title": "KmsKey", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "IssuerCertificateAuthority" + ], + "type": "object" + }, "AWS::ECS::Service.ServiceManagedEBSVolumeConfiguration": { "additionalProperties": false, "properties": { @@ -77726,6 +79892,22 @@ ], "type": "object" }, + "AWS::ECS::Service.TimeoutConfiguration": { + "additionalProperties": false, + "properties": { + "IdleTimeoutSeconds": { + "markdownDescription": "The amount of time in seconds a connection will stay active while idle. A value of `0` can be set to disable `idleTimeout` .\n\nThe `idleTimeout` default for `HTTP` / `HTTP2` / `GRPC` is 5 minutes.\n\nThe `idleTimeout` default for `TCP` is 1 hour.", + "title": "IdleTimeoutSeconds", + "type": "number" + }, + "PerRequestTimeoutSeconds": { + "markdownDescription": "The amount of time waiting for the upstream to respond with a complete response per request. A value of `0` can be set to disable `perRequestTimeout` . `perRequestTimeout` can only be set if Service Connect `appProtocol` isn't `TCP` . Only `idleTimeout` is allowed for `TCP` `appProtocol` .", + "title": "PerRequestTimeoutSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::ECS::TaskDefinition": { "additionalProperties": false, "properties": { @@ -77919,6 +80101,14 @@ "title": "Cpu", "type": "number" }, + "CredentialSpecs": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", + "title": "CredentialSpecs", + "type": "array" + }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.ContainerDependency" @@ -78286,7 +80476,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The file type to use. The only supported value is `s3` .", + "markdownDescription": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "title": "Type", "type": "string" }, @@ -78898,6 +81088,14 @@ "title": "ServiceRegistries", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, "TaskDefinition": { "markdownDescription": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used.", "title": "TaskDefinition", @@ -80891,7 +83089,7 @@ "title": "ManagedScalingPolicy" }, "Name": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "title": "Name", "type": "string" }, @@ -81534,6 +83732,11 @@ "markdownDescription": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", "title": "TerminationProtected", "type": "boolean" + }, + "UnhealthyNodeReplacement": { + "markdownDescription": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.", + "title": "UnhealthyNodeReplacement", + "type": "boolean" } }, "type": "object" @@ -85041,7 +87244,7 @@ "Port": { "markdownDescription": "The port number that the cache engine is listening on.", "title": "Port", - "type": "number" + "type": "string" } }, "type": "object" @@ -87434,6 +89637,11 @@ "Properties": { "additionalProperties": false, "properties": { + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": { + "markdownDescription": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", + "title": "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", + "type": "string" + }, "IpAddressType": { "markdownDescription": "The IP address type. The possible values are `ipv4` (for IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.", "title": "IpAddressType", @@ -93138,7 +95346,7 @@ "type": "number" }, "Mode": { - "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value.", + "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value.", "title": "Mode", "type": "string" } @@ -93259,7 +95467,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93282,7 +95490,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -98659,6 +100867,11 @@ "title": "CatalogEncryptionMode", "type": "string" }, + "CatalogEncryptionServiceRole": { + "markdownDescription": "The role that AWS Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.", + "title": "CatalogEncryptionServiceRole", + "type": "string" + }, "SseAwsKmsKeyId": { "markdownDescription": "The ID of the AWS KMS key to use for encryption at rest.", "title": "SseAwsKmsKeyId", @@ -100864,6 +103077,117 @@ }, "type": "object" }, + "AWS::Glue::TableOptimizer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The catalog ID of the table.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The table name. For Hive compatibility, this must be entirely lowercase.", + "title": "TableName", + "type": "string" + }, + "TableOptimizerConfiguration": { + "$ref": "#/definitions/AWS::Glue::TableOptimizer.TableOptimizerConfiguration", + "markdownDescription": "Specifies configuration details of a table optimizer.", + "title": "TableOptimizerConfiguration" + }, + "Type": { + "markdownDescription": "The type of table optimizer. Currently, the only valid value is compaction.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "CatalogId", + "DatabaseName", + "TableName", + "TableOptimizerConfiguration", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Glue::TableOptimizer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Glue::TableOptimizer.TableOptimizerConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Whether the table optimization is enabled.", + "title": "Enabled", + "type": "boolean" + }, + "RoleArn": { + "markdownDescription": "A role passed by the caller which gives the service permission to update the resources associated with the optimizer on the caller's behalf.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "Enabled", + "RoleArn" + ], + "type": "object" + }, "AWS::Glue::Trigger": { "additionalProperties": false, "properties": { @@ -105772,7 +108096,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::GuardDuty::Filter.TagItem" }, "markdownDescription": "The tags to be added to a new filter resource. Each tag consists of a key and an optional value, both of which you define.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", @@ -105780,12 +108104,7 @@ } }, "required": [ - "Action", - "Description", - "DetectorId", - "FindingCriteria", - "Name", - "Rank" + "FindingCriteria" ], "type": "object" }, @@ -105892,14 +108211,37 @@ "additionalProperties": false, "properties": { "Criterion": { + "additionalProperties": false, "markdownDescription": "Represents a map of finding properties that match specified conditions and values when querying findings.\n\nFor information about JSON criterion mapping to their console equivalent, see [Finding criteria](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_filter-findings.html#filter_criteria) . The following are the available criterion:\n\n- accountId\n- id\n- region\n- severity\n\nTo filter on the basis of severity, API and CFN use the following input list for the condition:\n\n- *Low* : `[\"1\", \"2\", \"3\"]`\n- *Medium* : `[\"4\", \"5\", \"6\"]`\n- *High* : `[\"7\", \"8\", \"9\"]`\n\nFor more information, see [Severity levels for GuardDuty findings](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity) .\n- type\n- updatedAt\n\nType: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.\n- resource.accessKeyDetails.accessKeyId\n- resource.accessKeyDetails.principalId\n- resource.accessKeyDetails.userName\n- resource.accessKeyDetails.userType\n- resource.instanceDetails.iamInstanceProfile.id\n- resource.instanceDetails.imageId\n- resource.instanceDetails.instanceId\n- resource.instanceDetails.tags.key\n- resource.instanceDetails.tags.value\n- resource.instanceDetails.networkInterfaces.ipv6Addresses\n- resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress\n- resource.instanceDetails.networkInterfaces.publicDnsName\n- resource.instanceDetails.networkInterfaces.publicIp\n- resource.instanceDetails.networkInterfaces.securityGroups.groupId\n- resource.instanceDetails.networkInterfaces.securityGroups.groupName\n- resource.instanceDetails.networkInterfaces.subnetId\n- resource.instanceDetails.networkInterfaces.vpcId\n- resource.instanceDetails.outpostArn\n- resource.resourceType\n- resource.s3BucketDetails.publicAccess.effectivePermissions\n- resource.s3BucketDetails.name\n- resource.s3BucketDetails.tags.key\n- resource.s3BucketDetails.tags.value\n- resource.s3BucketDetails.type\n- service.action.actionType\n- service.action.awsApiCallAction.api\n- service.action.awsApiCallAction.callerType\n- service.action.awsApiCallAction.errorCode\n- service.action.awsApiCallAction.remoteIpDetails.city.cityName\n- service.action.awsApiCallAction.remoteIpDetails.country.countryName\n- service.action.awsApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.awsApiCallAction.remoteIpDetails.organization.asn\n- service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg\n- service.action.awsApiCallAction.serviceName\n- service.action.dnsRequestAction.domain\n- service.action.networkConnectionAction.blocked\n- service.action.networkConnectionAction.connectionDirection\n- service.action.networkConnectionAction.localPortDetails.port\n- service.action.networkConnectionAction.protocol\n- service.action.networkConnectionAction.remoteIpDetails.city.cityName\n- service.action.networkConnectionAction.remoteIpDetails.country.countryName\n- service.action.networkConnectionAction.remoteIpDetails.ipAddressV4\n- service.action.networkConnectionAction.remoteIpDetails.organization.asn\n- service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg\n- service.action.networkConnectionAction.remotePortDetails.port\n- service.action.awsApiCallAction.remoteAccountDetails.affiliated\n- service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.kubernetesApiCallAction.requestUri\n- service.action.networkConnectionAction.localIpDetails.ipAddressV4\n- service.action.networkConnectionAction.protocol\n- service.action.awsApiCallAction.serviceName\n- service.action.awsApiCallAction.remoteAccountDetails.accountId\n- service.additionalInfo.threatListName\n- service.resourceRole\n- resource.eksClusterDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.namespace\n- resource.kubernetesDetails.kubernetesUserDetails.username\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix\n- service.ebsVolumeScanDetails.scanId\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash\n- resource.ecsClusterDetails.name\n- resource.ecsClusterDetails.taskDetails.containers.image\n- resource.ecsClusterDetails.taskDetails.definitionArn\n- resource.containerDetails.image\n- resource.rdsDbInstanceDetails.dbInstanceIdentifier\n- resource.rdsDbInstanceDetails.dbClusterIdentifier\n- resource.rdsDbInstanceDetails.engine\n- resource.rdsDbUserDetails.user\n- resource.rdsDbInstanceDetails.tags.key\n- resource.rdsDbInstanceDetails.tags.value\n- service.runtimeDetails.process.executableSha256\n- service.runtimeDetails.process.name\n- service.runtimeDetails.process.name\n- resource.lambdaDetails.functionName\n- resource.lambdaDetails.functionArn\n- resource.lambdaDetails.tags.key\n- resource.lambdaDetails.tags.value", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + } + }, "title": "Criterion", "type": "object" + } + }, + "type": "object" + }, + "AWS::GuardDuty::Filter.TagItem": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "", + "title": "Key", + "type": "string" }, - "ItemType": { - "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + "Value": { + "markdownDescription": "", + "title": "Value", + "type": "string" } }, + "required": [ + "Key", + "Value" + ], "type": "object" }, "AWS::GuardDuty::IPSet": { @@ -106163,9 +108505,7 @@ } }, "required": [ - "DetectorId", - "Email", - "MemberId" + "Email" ], "type": "object" }, @@ -108403,6 +110743,77 @@ }, "type": "object" }, + "AWS::IVS::Stage": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Stage name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::Stage" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -110911,7 +113322,8 @@ } }, "required": [ - "Name" + "Name", + "SemanticVersion" ], "type": "object" }, @@ -111253,23 +113665,113 @@ "Properties": { "additionalProperties": false, "properties": { - "ResourceGroupTags": { - "items": { - "$ref": "#/definitions/Tag" + "ResourceGroupTags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "title": "ResourceGroupTags", + "type": "array" + } + }, + "required": [ + "ResourceGroupTags" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Inspector::ResourceGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ScanName": { + "markdownDescription": "The name of the CIS scan configuration.", + "title": "ScanName", + "type": "string" + }, + "Schedule": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Schedule", + "markdownDescription": "The CIS scan configuration's schedule.", + "title": "Schedule" + }, + "SecurityLevel": { + "markdownDescription": "The CIS scan configuration's CIS Benchmark level.", + "title": "SecurityLevel", + "type": "string" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "The CIS scan configuration's tags.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } }, - "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", - "title": "ResourceGroupTags", - "type": "array" + "title": "Tags", + "type": "object" + }, + "Targets": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.CisTargets", + "markdownDescription": "The CIS scan configuration's targets.", + "title": "Targets" } }, - "required": [ - "ResourceGroupTags" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Inspector::ResourceGroup" + "AWS::InspectorV2::CisScanConfiguration" ], "type": "string" }, @@ -111283,8 +113785,132 @@ } }, "required": [ - "Type", - "Properties" + "Type" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.CisTargets": { + "additionalProperties": false, + "properties": { + "AccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The CIS target account ids.", + "title": "AccountIds", + "type": "array" + }, + "TargetResourceTags": { + "markdownDescription": "The CIS target resource tags.", + "title": "TargetResourceTags", + "type": "object" + } + }, + "required": [ + "AccountIds" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.DailySchedule": { + "additionalProperties": false, + "properties": { + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The schedule start time.", + "title": "StartTime" + } + }, + "required": [ + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.MonthlySchedule": { + "additionalProperties": false, + "properties": { + "Day": { + "markdownDescription": "The monthly schedule's day.", + "title": "Day", + "type": "string" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The monthly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Day", + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Schedule": { + "additionalProperties": false, + "properties": { + "Daily": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.DailySchedule", + "markdownDescription": "A daily schedule.", + "title": "Daily" + }, + "Monthly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.MonthlySchedule", + "markdownDescription": "A monthly schedule.", + "title": "Monthly" + }, + "OneTime": { + "markdownDescription": "A one time schedule.", + "title": "OneTime", + "type": "object" + }, + "Weekly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.WeeklySchedule", + "markdownDescription": "A weekly schedule.", + "title": "Weekly" + } + }, + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Time": { + "additionalProperties": false, + "properties": { + "TimeOfDay": { + "markdownDescription": "The time of day in 24-hour format (00:00).", + "title": "TimeOfDay", + "type": "string" + }, + "TimeZone": { + "markdownDescription": "The timezone.", + "title": "TimeZone", + "type": "string" + } + }, + "required": [ + "TimeOfDay", + "TimeZone" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.WeeklySchedule": { + "additionalProperties": false, + "properties": { + "Days": { + "items": { + "type": "string" + }, + "markdownDescription": "The weekly schedule's days.", + "title": "Days", + "type": "array" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The weekly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Days", + "StartTime" ], "type": "object" }, @@ -113168,6 +115794,11 @@ "title": "ServerCertificateArns", "type": "array" }, + "ServerCertificateConfig": { + "$ref": "#/definitions/AWS::IoT::DomainConfiguration.ServerCertificateConfig", + "markdownDescription": "The server certificate configuration.\n\nFor more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "ServerCertificateConfig" + }, "ServiceType": { "markdownDescription": "The type of service delivered by the endpoint.\n\n> AWS IoT Core currently supports only the `DATA` service type.", "title": "ServiceType", @@ -113230,6 +115861,17 @@ }, "type": "object" }, + "AWS::IoT::DomainConfiguration.ServerCertificateConfig": { + "additionalProperties": false, + "properties": { + "EnableOCSPCheck": { + "markdownDescription": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "EnableOCSPCheck", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::IoT::DomainConfiguration.ServerCertificateSummary": { "additionalProperties": false, "properties": { @@ -121182,7 +123824,7 @@ "title": "AccessPolicyIdentity" }, "AccessPolicyPermission": { - "markdownDescription": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "markdownDescription": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "title": "AccessPolicyPermission", "type": "string" }, @@ -121235,7 +123877,7 @@ }, "User": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.User", - "markdownDescription": "The IAM Identity Center user to which this access policy maps.", + "markdownDescription": "An IAM Identity Center user identity.", "title": "User" } }, @@ -121246,12 +123888,12 @@ "properties": { "Portal": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Portal", - "markdownDescription": "The AWS IoT SiteWise Monitor portal for this access policy.", + "markdownDescription": "Identifies an AWS IoT SiteWise Monitor portal.", "title": "Portal" }, "Project": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Project", - "markdownDescription": "The AWS IoT SiteWise Monitor project for this access policy.", + "markdownDescription": "Identifies a specific AWS IoT SiteWise Monitor project.", "title": "Project" } }, @@ -121305,7 +123947,7 @@ "additionalProperties": false, "properties": { "id": { - "markdownDescription": "The ID of the user.", + "markdownDescription": "The IAM Identity Center ID of the user.", "title": "id", "type": "string" } @@ -121348,15 +123990,18 @@ "additionalProperties": false, "properties": { "AssetDescription": { - "markdownDescription": "A description for the asset.", + "markdownDescription": "The ID of the asset, in UUID format.", "title": "AssetDescription", "type": "string" }, + "AssetExternalId": { + "type": "string" + }, "AssetHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::Asset.AssetHierarchy" }, - "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "title": "AssetHierarchies", "type": "array" }, @@ -121366,7 +124011,7 @@ "type": "string" }, "AssetName": { - "markdownDescription": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A friendly name for the asset.", "title": "AssetName", "type": "string" }, @@ -121422,15 +124067,20 @@ "title": "ChildAssetId", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .", "title": "LogicalId", "type": "string" } }, "required": [ - "ChildAssetId", - "LogicalId" + "ChildAssetId" ], "type": "object" }, @@ -121438,17 +124088,23 @@ "additionalProperties": false, "properties": { "Alias": { - "markdownDescription": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", + "markdownDescription": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", "title": "Alias", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset property.", "title": "LogicalId", "type": "string" }, "NotificationState": { - "markdownDescription": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "markdownDescription": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "title": "NotificationState", "type": "string" }, @@ -121458,9 +124114,6 @@ "type": "string" } }, - "required": [ - "LogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::AssetModel": { @@ -121502,7 +124155,7 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel" }, - "markdownDescription": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "markdownDescription": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelCompositeModels", "type": "array" }, @@ -121511,16 +124164,19 @@ "title": "AssetModelDescription", "type": "string" }, + "AssetModelExternalId": { + "type": "string" + }, "AssetModelHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelHierarchy" }, - "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelHierarchies", "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -121528,10 +124184,13 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" }, - "markdownDescription": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelProperties", "type": "array" }, + "AssetModelType": { + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -121570,6 +124229,9 @@ "AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel": { "additionalProperties": false, "properties": { + "ComposedAssetModelId": { + "type": "string" + }, "CompositeModelProperties": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" @@ -121583,11 +124245,26 @@ "title": "Description", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "Name": { "markdownDescription": "The name of the composite model.", "title": "Name", "type": "string" }, + "ParentAssetModelCompositeModelExternalId": { + "type": "string" + }, + "Path": { + "items": { + "type": "string" + }, + "type": "array" + }, "Type": { "markdownDescription": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` .", "title": "Type", @@ -121604,24 +124281,29 @@ "additionalProperties": false, "properties": { "ChildAssetModelId": { - "markdownDescription": "The Id of the asset model.", + "markdownDescription": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ChildAssetModelId", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation.", "title": "Name", "type": "string" } }, "required": [ "ChildAssetModelId", - "LogicalId", "Name" ], "type": "object" @@ -121630,7 +124312,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "markdownDescription": "The data type of the asset model property.", "title": "DataType", "type": "string" }, @@ -121639,19 +124321,25 @@ "title": "DataTypeSpec", "type": "string" }, + "ExternalId": { + "type": "string" + }, + "Id": { + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset model property.", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model property.", "title": "Name", "type": "string" }, "Type": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyType", - "markdownDescription": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "markdownDescription": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "title": "Type" }, "Unit": { @@ -121662,7 +124350,6 @@ }, "required": [ "DataType", - "LogicalId", "Name", "Type" ], @@ -121683,7 +124370,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "markdownDescription": "The friendly name of the variable to be used in the expression.", "title": "Name", "type": "string" }, @@ -121739,22 +124426,34 @@ }, "type": "object" }, + "AWS::IoTSiteWise::AssetModel.PropertyPathDefinition": { + "additionalProperties": false, + "properties": { + "Name": { + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::IoTSiteWise::AssetModel.PropertyType": { "additionalProperties": false, "properties": { "Attribute": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Attribute", - "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", + "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", "title": "Attribute" }, "Metric": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Metric", - "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", + "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", "title": "Metric" }, "Transform": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Transform", - "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "title": "Transform" }, "TypeName": { @@ -121813,20 +124512,35 @@ "AWS::IoTSiteWise::AssetModel.VariableValue": { "additionalProperties": false, "properties": { + "HierarchyExternalId": { + "type": "string" + }, + "HierarchyId": { + "type": "string" + }, "HierarchyLogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", "title": "HierarchyLogicalId", "type": "string" }, + "PropertyExternalId": { + "type": "string" + }, + "PropertyId": { + "type": "string" + }, "PropertyLogicalId": { - "markdownDescription": "The `LogicalID` of the property to use as the variable.", + "markdownDescription": "The `LogicalID` of the property that is being referenced.", "title": "PropertyLogicalId", "type": "string" + }, + "PropertyPath": { + "items": { + "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyPathDefinition" + }, + "type": "array" } }, - "required": [ - "PropertyLogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::Dashboard": { @@ -121965,7 +124679,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -122019,7 +124733,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", "title": "CapabilityNamespace", "type": "string" } @@ -122049,7 +124763,7 @@ "additionalProperties": false, "properties": { "GroupArn": { - "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* .", + "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* .", "title": "GroupArn", "type": "string" } @@ -122119,7 +124833,7 @@ "type": "string" }, "PortalAuthMode": { - "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "title": "PortalAuthMode", "type": "string" }, @@ -124806,6 +127520,11 @@ "title": "Name", "type": "string" }, + "Positioning": { + "markdownDescription": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", + "title": "Positioning", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -124892,6 +127611,41 @@ ], "type": "object" }, + "AWS::IoTWireless::WirelessDevice.Application": { + "additionalProperties": false, + "properties": { + "DestinationName": { + "markdownDescription": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "title": "DestinationName", + "type": "string" + }, + "FPort": { + "markdownDescription": "The name of the new destination for the device.", + "title": "FPort", + "type": "number" + }, + "Type": { + "markdownDescription": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::IoTWireless::WirelessDevice.FPorts": { + "additionalProperties": false, + "properties": { + "Applications": { + "items": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.Application" + }, + "markdownDescription": "LoRaWAN application configuration, which can be used to perform geolocation.", + "title": "Applications", + "type": "array" + } + }, + "type": "object" + }, "AWS::IoTWireless::WirelessDevice.LoRaWANDevice": { "additionalProperties": false, "properties": { @@ -124915,6 +127669,11 @@ "title": "DeviceProfileId", "type": "string" }, + "FPorts": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.FPorts", + "markdownDescription": "List of FPort assigned for different LoRaWAN application packages to use.", + "title": "FPorts" + }, "OtaaV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.OtaaV10x", "markdownDescription": "OTAA device object for create APIs for v1.0.x", @@ -126996,7 +129755,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -127004,7 +129763,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `**/*.png` - All .png files in all directories\n- `**/*.{png, ico, md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -130660,6 +133419,11 @@ "markdownDescription": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "title": "S3DestinationConfiguration" }, + "SnowflakeDestinationConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration", + "markdownDescription": "Configure Snowflake destination", + "title": "SnowflakeDestinationConfiguration" + }, "SplunkDestinationConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkDestinationConfiguration", "markdownDescription": "The configuration of a destination in Splunk for the delivery stream.", @@ -131211,6 +133975,11 @@ "title": "CompressionFormat", "type": "string" }, + "CustomTimeZone": { + "markdownDescription": "The time zone you prefer. UTC is the default.", + "title": "CustomTimeZone", + "type": "string" + }, "DataFormatConversionConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DataFormatConversionConfiguration", "markdownDescription": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", @@ -131231,6 +134000,11 @@ "title": "ErrorOutputPrefix", "type": "string" }, + "FileExtension": { + "markdownDescription": "Specify a file extension. It will override the default file extension", + "title": "FileExtension", + "type": "string" + }, "Prefix": { "markdownDescription": "The `YYYY/MM/DD/HH` time format prefix is automatically used for delivered Amazon S3 files. For more information, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "title": "Prefix", @@ -131853,6 +134627,153 @@ }, "type": "object" }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountUrl": { + "markdownDescription": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "title": "AccountUrl", + "type": "string" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", + "markdownDescription": "", + "title": "CloudWatchLoggingOptions" + }, + "ContentColumnName": { + "markdownDescription": "The name of the record content column", + "title": "ContentColumnName", + "type": "string" + }, + "DataLoadingOption": { + "markdownDescription": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", + "title": "DataLoadingOption", + "type": "string" + }, + "Database": { + "markdownDescription": "All data in Snowflake is maintained in databases.", + "title": "Database", + "type": "string" + }, + "KeyPassphrase": { + "markdownDescription": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "KeyPassphrase", + "type": "string" + }, + "MetaDataColumnName": { + "markdownDescription": "The name of the record metadata column", + "title": "MetaDataColumnName", + "type": "string" + }, + "PrivateKey": { + "markdownDescription": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "PrivateKey", + "type": "string" + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", + "markdownDescription": "", + "title": "ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions", + "markdownDescription": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "RetryOptions" + }, + "RoleARN": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Snowflake role", + "title": "RoleARN", + "type": "string" + }, + "S3BackupMode": { + "markdownDescription": "Choose an S3 backup mode", + "title": "S3BackupMode", + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.S3DestinationConfiguration", + "markdownDescription": "", + "title": "S3Configuration" + }, + "Schema": { + "markdownDescription": "Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views", + "title": "Schema", + "type": "string" + }, + "SnowflakeRoleConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration", + "markdownDescription": "Optionally configure a Snowflake role. Otherwise the default user role will be used.", + "title": "SnowflakeRoleConfiguration" + }, + "SnowflakeVpcConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration", + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "SnowflakeVpcConfiguration" + }, + "Table": { + "markdownDescription": "All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.", + "title": "Table", + "type": "string" + }, + "User": { + "markdownDescription": "User login name for the Snowflake account.", + "title": "User", + "type": "string" + } + }, + "required": [ + "AccountUrl", + "Database", + "PrivateKey", + "RoleARN", + "S3Configuration", + "Schema", + "Table", + "User" + ], + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions": { + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "markdownDescription": "the time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "DurationInSeconds", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Enable Snowflake role", + "title": "Enabled", + "type": "boolean" + }, + "SnowflakeRole": { + "markdownDescription": "The Snowflake role you wish to configure", + "title": "SnowflakeRole", + "type": "string" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration": { + "additionalProperties": false, + "properties": { + "PrivateLinkVpceId": { + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "PrivateLinkVpceId", + "type": "string" + } + }, + "required": [ + "PrivateLinkVpceId" + ], + "type": "object" + }, "AWS::KinesisFirehose::DeliveryStream.SplunkBufferingHints": { "additionalProperties": false, "properties": { @@ -133107,6 +136028,11 @@ "Properties": { "additionalProperties": false, "properties": { + "HybridAccessEnabled": { + "markdownDescription": "Indicates whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.", + "title": "HybridAccessEnabled", + "type": "boolean" + }, "ResourceArn": { "markdownDescription": "The Amazon Resource Name (ARN) of the resource.", "title": "ResourceArn", @@ -133493,7 +136419,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -133913,7 +136839,7 @@ "title": "FilterCriteria" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -134854,7 +137780,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -135094,7 +138020,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -140414,6 +143340,14 @@ "AWS::Location::Map.MapConfiguration": { "additionalProperties": false, "properties": { + "CustomLayers": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as the `POI` layer for the VectorEsriNavigation style.\n\n> Currenlty only `VectorEsriNavigation` supports CustomLayers. For more information, see [Custom Layers](https://docs.aws.amazon.com//location/latest/developerguide/map-concepts.html#map-custom-layers) .", + "title": "CustomLayers", + "type": "array" + }, "PoliticalView": { "markdownDescription": "Specifies the map political view selected from an available data provider.", "title": "PoliticalView", @@ -154648,6 +157582,10 @@ "type": "array" } }, + "required": [ + "ChannelGroupName", + "ChannelName" + ], "type": "object" }, "Type": { @@ -154666,7 +157604,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154740,6 +157679,9 @@ "type": "array" } }, + "required": [ + "ChannelGroupName" + ], "type": "object" }, "Type": { @@ -154758,7 +157700,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154814,6 +157757,8 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", "Policy" ], "type": "object" @@ -154935,7 +157880,9 @@ } }, "required": [ - "ContainerType" + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" ], "type": "object" }, @@ -155304,6 +158251,9 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", "Policy" ], "type": "object" @@ -186196,6 +189146,14 @@ "markdownDescription": "", "title": "Definition" }, + "LinkEntities": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", + "title": "LinkEntities", + "type": "array" + }, "LinkSharingConfiguration": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.LinkSharingConfiguration", "markdownDescription": "A structure that contains the link sharing configurations that you want to apply overrides to.", @@ -214695,7 +217653,7 @@ "type": "boolean" }, "EnableHttpEndpoint": { - "markdownDescription": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "markdownDescription": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EnableHttpEndpoint", "type": "boolean" }, @@ -214850,7 +217808,7 @@ "type": "boolean" }, "StorageType": { - "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "title": "StorageType", "type": "string" }, @@ -215495,7 +218453,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "markdownDescription": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "title": "StorageType", "type": "string" }, @@ -216640,6 +219598,108 @@ ], "type": "object" }, + "AWS::RDS::Integration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdditionalEncryptionContext": { + "additionalProperties": true, + "markdownDescription": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "AdditionalEncryptionContext", + "type": "object" + }, + "IntegrationName": { + "markdownDescription": "The name of the integration.", + "title": "IntegrationName", + "type": "string" + }, + "KMSKeyId": { + "markdownDescription": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", + "title": "KMSKeyId", + "type": "string" + }, + "SourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", + "title": "SourceArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "title": "Tags", + "type": "array" + }, + "TargetArn": { + "markdownDescription": "The ARN of the Redshift data warehouse to use as the target for replication.", + "title": "TargetArn", + "type": "string" + } + }, + "required": [ + "SourceArn", + "TargetArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::RDS::Integration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::RDS::OptionGroup": { "additionalProperties": false, "properties": { @@ -217296,7 +220356,7 @@ "type": "string" }, "Port": { - "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "title": "Port", "type": "number" }, @@ -218174,191 +221234,196 @@ "Properties": { "additionalProperties": false, "properties": { - "Enable": { - "markdownDescription": "If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about `state` of the scheduled action, see `ScheduledAction` .", - "title": "Enable", - "type": "boolean" - }, - "EndTime": { - "markdownDescription": "The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.", - "title": "EndTime", - "type": "string" - }, - "IamRole": { - "markdownDescription": "The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see [Using Identity-Based Policies for Amazon Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html) in the *Amazon Redshift Cluster Management Guide* .", - "title": "IamRole", - "type": "string" - }, - "Schedule": { - "markdownDescription": "The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.\n\nFormat of at expressions is \" `at(yyyy-mm-ddThh:mm:ss)` \". For example, \" `at(2016-03-04T17:27:00)` \".\n\nFormat of cron expressions is \" `cron(Minutes Hours Day-of-month Month Day-of-week Year)` \". For example, \" `cron(0 10 ? * MON *)` \". For more information, see [Cron Expressions](https://docs.aws.amazon.com//AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch Events User Guide* .", - "title": "Schedule", - "type": "string" - }, - "ScheduledActionDescription": { - "markdownDescription": "The description of the scheduled action.", - "title": "ScheduledActionDescription", - "type": "string" - }, - "ScheduledActionName": { - "markdownDescription": "The name of the scheduled action.", - "title": "ScheduledActionName", - "type": "string" - }, - "StartTime": { - "markdownDescription": "The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.", - "title": "StartTime", - "type": "string" - }, - "TargetAction": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ScheduledActionType", - "markdownDescription": "A JSON format string of the Amazon Redshift API operation with input parameters.\n\n\" `{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}` \".", - "title": "TargetAction" - } - }, - "required": [ - "ScheduledActionName" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::Redshift::ScheduledAction" - ], - "type": "string" - }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - } - }, - "required": [ - "Type", - "Properties" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.PauseClusterMessage": { - "additionalProperties": false, - "properties": { - "ClusterIdentifier": { - "markdownDescription": "The identifier of the cluster to be paused.", - "title": "ClusterIdentifier", - "type": "string" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ResizeClusterMessage": { - "additionalProperties": false, - "properties": { - "Classic": { - "markdownDescription": "A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to `false` , the resize type is elastic.", - "title": "Classic", - "type": "boolean" - }, - "ClusterIdentifier": { - "markdownDescription": "The unique identifier for the cluster to resize.", - "title": "ClusterIdentifier", - "type": "string" - }, - "ClusterType": { - "markdownDescription": "The new cluster type for the specified cluster.", - "title": "ClusterType", - "type": "string" - }, - "NodeType": { - "markdownDescription": "The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.", - "title": "NodeType", - "type": "string" - }, - "NumberOfNodes": { - "markdownDescription": "The new number of nodes for the cluster. If not specified, the cluster's current number of nodes is used.", - "title": "NumberOfNodes", - "type": "number" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ResumeClusterMessage": { - "additionalProperties": false, - "properties": { - "ClusterIdentifier": { - "markdownDescription": "The identifier of the cluster to be resumed.", - "title": "ClusterIdentifier", - "type": "string" - } - }, - "required": [ - "ClusterIdentifier" - ], - "type": "object" - }, - "AWS::Redshift::ScheduledAction.ScheduledActionType": { - "additionalProperties": false, - "properties": { - "PauseCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.PauseClusterMessage", - "markdownDescription": "An action that runs a `PauseCluster` API operation.", - "title": "PauseCluster" - }, - "ResizeCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResizeClusterMessage", - "markdownDescription": "An action that runs a `ResizeCluster` API operation.", - "title": "ResizeCluster" - }, - "ResumeCluster": { - "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResumeClusterMessage", - "markdownDescription": "An action that runs a `ResumeCluster` API operation.", - "title": "ResumeCluster" - } - }, - "type": "object" - }, - "AWS::RedshiftServerless::Namespace": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", + "Enable": { + "markdownDescription": "If true, the schedule is enabled. If false, the scheduled action does not trigger. For more information about `state` of the scheduled action, see `ScheduledAction` .", + "title": "Enable", + "type": "boolean" + }, + "EndTime": { + "markdownDescription": "The end time in UTC when the schedule is no longer active. After this time, the scheduled action does not trigger.", + "title": "EndTime", + "type": "string" + }, + "IamRole": { + "markdownDescription": "The IAM role to assume to run the scheduled action. This IAM role must have permission to run the Amazon Redshift API operation in the scheduled action. This IAM role must allow the Amazon Redshift scheduler (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, see [Using Identity-Based Policies for Amazon Redshift](https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html) in the *Amazon Redshift Cluster Management Guide* .", + "title": "IamRole", + "type": "string" + }, + "Schedule": { + "markdownDescription": "The schedule for a one-time (at format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour.\n\nFormat of at expressions is \" `at(yyyy-mm-ddThh:mm:ss)` \". For example, \" `at(2016-03-04T17:27:00)` \".\n\nFormat of cron expressions is \" `cron(Minutes Hours Day-of-month Month Day-of-week Year)` \". For example, \" `cron(0 10 ? * MON *)` \". For more information, see [Cron Expressions](https://docs.aws.amazon.com//AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch Events User Guide* .", + "title": "Schedule", + "type": "string" + }, + "ScheduledActionDescription": { + "markdownDescription": "The description of the scheduled action.", + "title": "ScheduledActionDescription", + "type": "string" + }, + "ScheduledActionName": { + "markdownDescription": "The name of the scheduled action.", + "title": "ScheduledActionName", + "type": "string" + }, + "StartTime": { + "markdownDescription": "The start time in UTC when the schedule is active. Before this time, the scheduled action does not trigger.", + "title": "StartTime", + "type": "string" + }, + "TargetAction": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ScheduledActionType", + "markdownDescription": "A JSON format string of the Amazon Redshift API operation with input parameters.\n\n\" `{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}` \".", + "title": "TargetAction" + } + }, + "required": [ + "ScheduledActionName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Redshift::ScheduledAction" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.PauseClusterMessage": { + "additionalProperties": false, + "properties": { + "ClusterIdentifier": { + "markdownDescription": "The identifier of the cluster to be paused.", + "title": "ClusterIdentifier", + "type": "string" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ResizeClusterMessage": { + "additionalProperties": false, + "properties": { + "Classic": { + "markdownDescription": "A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to `false` , the resize type is elastic.", + "title": "Classic", + "type": "boolean" + }, + "ClusterIdentifier": { + "markdownDescription": "The unique identifier for the cluster to resize.", + "title": "ClusterIdentifier", + "type": "string" + }, + "ClusterType": { + "markdownDescription": "The new cluster type for the specified cluster.", + "title": "ClusterType", + "type": "string" + }, + "NodeType": { + "markdownDescription": "The new node type for the nodes you are adding. If not specified, the cluster's current node type is used.", + "title": "NodeType", + "type": "string" + }, + "NumberOfNodes": { + "markdownDescription": "The new number of nodes for the cluster. If not specified, the cluster's current number of nodes is used.", + "title": "NumberOfNodes", + "type": "number" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ResumeClusterMessage": { + "additionalProperties": false, + "properties": { + "ClusterIdentifier": { + "markdownDescription": "The identifier of the cluster to be resumed.", + "title": "ClusterIdentifier", + "type": "string" + } + }, + "required": [ + "ClusterIdentifier" + ], + "type": "object" + }, + "AWS::Redshift::ScheduledAction.ScheduledActionType": { + "additionalProperties": false, + "properties": { + "PauseCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.PauseClusterMessage", + "markdownDescription": "An action that runs a `PauseCluster` API operation.", + "title": "PauseCluster" + }, + "ResizeCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResizeClusterMessage", + "markdownDescription": "An action that runs a `ResizeCluster` API operation.", + "title": "ResizeCluster" + }, + "ResumeCluster": { + "$ref": "#/definitions/AWS::Redshift::ScheduledAction.ResumeClusterMessage", + "markdownDescription": "An action that runs a `ResumeCluster` API operation.", + "title": "ResumeCluster" + } + }, + "type": "object" + }, + "AWS::RedshiftServerless::Namespace": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret. You can only use this parameter if `ManageAdminPassword` is `true` .", + "title": "AdminPasswordSecretKmsKeyId", "type": "string" }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { "AdminUserPassword": { "markdownDescription": "The password of the administrator for the primary database created in the namespace.", "title": "AdminUserPassword", @@ -218410,11 +221475,26 @@ "title": "LogExports", "type": "array" }, + "ManageAdminPassword": { + "markdownDescription": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials. You can't use `AdminUserPassword` if `ManageAdminPassword` is true. If `ManageAdminPassword` is `false` or not set, Amazon Redshift uses `AdminUserPassword` for the admin user account's password.", + "title": "ManageAdminPassword", + "type": "boolean" + }, "NamespaceName": { "markdownDescription": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "title": "NamespaceName", "type": "string" }, + "NamespaceResourcePolicy": { + "markdownDescription": "The resource policy that will be attached to the namespace.", + "title": "NamespaceResourcePolicy", + "type": "object" + }, + "RedshiftIdcApplicationArn": { + "markdownDescription": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "title": "RedshiftIdcApplicationArn", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -218453,6 +221533,16 @@ "AWS::RedshiftServerless::Namespace.Namespace": { "additionalProperties": false, "properties": { + "AdminPasswordSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the namespace's admin user credentials secret.", + "title": "AdminPasswordSecretArn", + "type": "string" + }, + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUsername": { "markdownDescription": "The username of the administrator for the first database created in the namespace.", "title": "AdminUsername", @@ -218570,6 +221660,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -218762,6 +221857,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -222266,6 +225366,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSet.GeoProximityLocation", + "markdownDescription": "*GeoproximityLocation resource record sets only:* A complex type that lets you control how Route\u00a053 responds to DNS queries based on the geographic origin of the query and your resources.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -222397,6 +225502,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSet.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSet.GeoLocation": { "additionalProperties": false, "properties": { @@ -222418,6 +225543,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSet.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSet.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup": { "additionalProperties": false, "properties": { @@ -222544,6 +225695,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSetGroup.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSetGroup.GeoLocation": { "additionalProperties": false, "properties": { @@ -222565,6 +225736,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSetGroup.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup.RecordSet": { "additionalProperties": false, "properties": { @@ -222588,6 +225785,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.GeoProximityLocation", + "markdownDescription": "A complex type that contains information about a geographic location.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -223705,6 +226907,11 @@ "markdownDescription": "The priority of the rule in the rule group. This value must be unique within the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting.", "title": "Priority", "type": "number" + }, + "Qtype": { + "markdownDescription": "The DNS query type you want the rule to evaluate. Allowed values are;\n\n- A: Returns an IPv4 address.\n- AAAA: Returns an Ipv6 address.\n- CAA: Restricts CAs that can create SSL/TLS certifications for the domain.\n- CNAME: Returns another domain name.\n- DS: Record that identifies the DNSSEC signing key of a delegated zone.\n- MX: Specifies mail servers.\n- NAPTR: Regular-expression-based rewriting of domain names.\n- NS: Authoritative name servers.\n- PTR: Maps an IP address to a domain name.\n- SOA: Start of authority record for the zone.\n- SPF: Lists the servers authorized to send emails from a domain.\n- SRV: Application specific values that identify servers.\n- TXT: Verifies email senders and application-specific values.\n- A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPE NUMBER , where the NUMBER can be 1-65334, for example, TYPE28. For more information, see [List of DNS record types](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/List_of_DNS_record_types) .", + "title": "Qtype", + "type": "string" } }, "required": [ @@ -231778,7 +234985,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "title": "RejectedPatchesAction", "type": "string" }, @@ -233843,6 +237050,11 @@ "title": "AppImageConfigName", "type": "string" }, + "JupyterLabAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "JupyterLabAppImageConfig" + }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", @@ -233883,6 +237095,56 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.ContainerConfig": { + "additionalProperties": false, + "properties": { + "ContainerArguments": { + "items": { + "type": "string" + }, + "markdownDescription": "The arguments for the container when you're running the application.", + "title": "ContainerArguments", + "type": "array" + }, + "ContainerEntrypoint": { + "items": { + "type": "string" + }, + "markdownDescription": "The entrypoint used to run the application in the container.", + "title": "ContainerEntrypoint", + "type": "array" + }, + "ContainerEnvironmentVariables": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable" + }, + "markdownDescription": "The environment variables to set in the container", + "title": "ContainerEnvironmentVariables", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that identifies a container environment variable.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the container environment variable.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, "AWS::SageMaker::AppImageConfig.FileSystemConfig": { "additionalProperties": false, "properties": { @@ -233904,6 +237166,17 @@ }, "type": "object" }, + "AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "The configuration used to run the application image container.", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig": { "additionalProperties": false, "properties": { @@ -235063,9 +238336,33 @@ }, "type": "object" }, + "AWS::SageMaker::Domain.DockerSettings": { + "additionalProperties": false, + "properties": { + "EnableDockerAccess": { + "markdownDescription": "Indicates whether the domain can access Docker.", + "title": "EnableDockerAccess", + "type": "string" + }, + "VpcOnlyTrustedAccounts": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of AWS accounts that are trusted when the domain is created in VPC-only mode.", + "title": "VpcOnlyTrustedAccounts", + "type": "array" + } + }, + "type": "object" + }, "AWS::SageMaker::Domain.DomainSettings": { "additionalProperties": false, "properties": { + "DockerSettings": { + "$ref": "#/definitions/AWS::SageMaker::Domain.DockerSettings", + "markdownDescription": "A collection of settings that configure the domain's Docker interaction.", + "title": "DockerSettings" + }, "RStudioServerProDomainSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.RStudioServerProDomainSettings", "markdownDescription": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", @@ -236209,7 +239506,7 @@ "type": "number" }, "ProvisionedConcurrency": { - "markdownDescription": "", + "markdownDescription": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) .", "title": "ProvisionedConcurrency", "type": "number" } @@ -236455,6 +239752,11 @@ "markdownDescription": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval.", "title": "StorageType", "type": "string" + }, + "TtlDuration": { + "$ref": "#/definitions/AWS::SageMaker::FeatureGroup.TtlDuration", + "markdownDescription": "Time to live duration, where the record is hard deleted after the expiration time is reached; `ExpiresAt` = `EventTime` + `TtlDuration` . For information on HardDelete, see the [DeleteRecord](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_feature_store_DeleteRecord.html) API in the Amazon SageMaker API Reference guide.", + "title": "TtlDuration" } }, "type": "object" @@ -236513,6 +239815,22 @@ ], "type": "object" }, + "AWS::SageMaker::FeatureGroup.TtlDuration": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "`TtlDuration` time unit.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "`TtlDuration` time value.", + "title": "Value", + "type": "number" + } + }, + "type": "object" + }, "AWS::SageMaker::Image": { "additionalProperties": false, "properties": { @@ -242278,6 +245596,16 @@ "title": "DomainId", "type": "string" }, + "OwnershipSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.OwnershipSettings", + "markdownDescription": "The collection of ownership settings for a space.", + "title": "OwnershipSettings" + }, + "SpaceDisplayName": { + "markdownDescription": "The name of the space that appears in the Studio UI.", + "title": "SpaceDisplayName", + "type": "string" + }, "SpaceName": { "markdownDescription": "The name of the space.", "title": "SpaceName", @@ -242288,6 +245616,11 @@ "markdownDescription": "A collection of space settings.", "title": "SpaceSettings" }, + "SpaceSharingSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceSharingSettings", + "markdownDescription": "A collection of space sharing settings.", + "title": "SpaceSharingSettings" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -242324,6 +245657,31 @@ ], "type": "object" }, + "AWS::SageMaker::Space.CodeRepository": { + "additionalProperties": false, + "properties": { + "RepositoryUrl": { + "markdownDescription": "The URL of the Git repository.", + "title": "RepositoryUrl", + "type": "string" + } + }, + "required": [ + "RepositoryUrl" + ], + "type": "object" + }, + "AWS::SageMaker::Space.CustomFileSystem": { + "additionalProperties": false, + "properties": { + "EFSFileSystem": { + "$ref": "#/definitions/AWS::SageMaker::Space.EFSFileSystem", + "markdownDescription": "A custom file system in Amazon EFS.", + "title": "EFSFileSystem" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.CustomImage": { "additionalProperties": false, "properties": { @@ -242349,6 +245707,34 @@ ], "type": "object" }, + "AWS::SageMaker::Space.EFSFileSystem": { + "additionalProperties": false, + "properties": { + "FileSystemId": { + "markdownDescription": "The ID of your Amazon EFS file system.", + "title": "FileSystemId", + "type": "string" + } + }, + "required": [ + "FileSystemId" + ], + "type": "object" + }, + "AWS::SageMaker::Space.EbsStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsVolumeSizeInGb": { + "markdownDescription": "The size of an EBS storage volume for a private space.", + "title": "EbsVolumeSizeInGb", + "type": "number" + } + }, + "required": [ + "EbsVolumeSizeInGb" + ], + "type": "object" + }, "AWS::SageMaker::Space.JupyterServerAppSettings": { "additionalProperties": false, "properties": { @@ -242379,6 +245765,20 @@ }, "type": "object" }, + "AWS::SageMaker::Space.OwnershipSettings": { + "additionalProperties": false, + "properties": { + "OwnerUserProfileName": { + "markdownDescription": "The user profile who is the owner of the private space.", + "title": "OwnerUserProfileName", + "type": "string" + } + }, + "required": [ + "OwnerUserProfileName" + ], + "type": "object" + }, "AWS::SageMaker::Space.ResourceSpec": { "additionalProperties": false, "properties": { @@ -242400,9 +245800,62 @@ }, "type": "object" }, + "AWS::SageMaker::Space.SpaceCodeEditorAppSettings": { + "additionalProperties": false, + "properties": { + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceJupyterLabAppSettings": { + "additionalProperties": false, + "properties": { + "CodeRepositories": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CodeRepository" + }, + "markdownDescription": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", + "title": "CodeRepositories", + "type": "array" + }, + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.SpaceSettings": { "additionalProperties": false, "properties": { + "AppType": { + "markdownDescription": "The type of app created within the space.", + "title": "AppType", + "type": "string" + }, + "CodeEditorAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceCodeEditorAppSettings", + "markdownDescription": "The Code Editor application settings.", + "title": "CodeEditorAppSettings" + }, + "CustomFileSystems": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" + }, + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "title": "CustomFileSystems", + "type": "array" + }, + "JupyterLabAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceJupyterLabAppSettings", + "markdownDescription": "The settings for the JupyterLab application.", + "title": "JupyterLabAppSettings" + }, "JupyterServerAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Space.JupyterServerAppSettings", "markdownDescription": "The JupyterServer app settings.", @@ -242412,6 +245865,36 @@ "$ref": "#/definitions/AWS::SageMaker::Space.KernelGatewayAppSettings", "markdownDescription": "The KernelGateway app settings.", "title": "KernelGatewayAppSettings" + }, + "SpaceStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceStorageSettings", + "markdownDescription": "The storage settings for a private space.", + "title": "SpaceStorageSettings" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceSharingSettings": { + "additionalProperties": false, + "properties": { + "SharingType": { + "markdownDescription": "Specifies the sharing type of the space.", + "title": "SharingType", + "type": "string" + } + }, + "required": [ + "SharingType" + ], + "type": "object" + }, + "AWS::SageMaker::Space.SpaceStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.EbsStorageSettings", + "markdownDescription": "A collection of EBS storage settings for a private space.", + "title": "EbsStorageSettings" } }, "type": "object" @@ -247530,7 +251013,7 @@ "type": "array" }, "RoleArn": { - "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", + "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", "title": "RoleArn", "type": "string" } @@ -247827,7 +251310,7 @@ "additionalProperties": false, "properties": { "Aggregation": { - "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "title": "Aggregation", "type": "string" }, @@ -251508,7 +254991,8 @@ } }, "required": [ - "Configuration" + "Configuration", + "PolicyStoreId" ], "type": "object" }, @@ -251904,6 +255388,7 @@ } }, "required": [ + "PolicyStoreId", "Statement" ], "type": "object" @@ -255503,9 +258988,6 @@ "AWS::WAFv2::LoggingConfiguration.FieldToMatch": { "additionalProperties": false, "properties": { - "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" - }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "title": "Method", @@ -255558,25 +259040,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.JsonBody": { - "additionalProperties": false, - "properties": { - "InvalidFallbackBehavior": { - "type": "string" - }, - "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" - }, - "MatchScope": { - "type": "string" - } - }, - "required": [ - "MatchPattern", - "MatchScope" - ], - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.LabelNameCondition": { "additionalProperties": false, "properties": { @@ -255614,21 +259077,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.MatchPattern": { - "additionalProperties": false, - "properties": { - "All": { - "type": "object" - }, - "IncludedPaths": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.SingleHeader": { "additionalProperties": false, "properties": { @@ -255912,7 +259360,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256160,7 +259608,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -256173,9 +259621,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -256357,6 +259810,20 @@ ], "type": "object" }, + "AWS::WAFv2::RuleGroup.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::RuleGroup.JsonBody": { "additionalProperties": false, "properties": { @@ -256376,7 +259843,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256498,6 +259965,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -256978,7 +260450,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -257100,7 +260572,7 @@ "properties": { "AssociationConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.AssociationConfig", - "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "title": "AssociationConfig" }, "CaptchaConfig": { @@ -257319,7 +260791,7 @@ "properties": { "RequestBody": { "additionalProperties": false, - "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "patternProperties": { "^[a-zA-Z0-9]+$": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RequestBodyAssociatedResourceTypeConfig" @@ -257346,7 +260818,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -257638,7 +261110,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -257651,9 +261123,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::WebACL.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::WebACL.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -257835,6 +261312,20 @@ ], "type": "object" }, + "AWS::WAFv2::WebACL.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::WebACL.JsonBody": { "additionalProperties": false, "properties": { @@ -257854,7 +261345,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -258076,6 +261567,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -258328,7 +261824,7 @@ "additionalProperties": false, "properties": { "DefaultSizeInspectionLimit": { - "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", + "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", "title": "DefaultSizeInspectionLimit", "type": "string" } @@ -258848,7 +262344,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -261874,6 +265370,9 @@ { "$ref": "#/definitions/AWS::CodeArtifact::Repository" }, + { + "$ref": "#/definitions/AWS::CodeBuild::Fleet" + }, { "$ref": "#/definitions/AWS::CodeBuild::Project" }, @@ -262075,6 +265574,9 @@ { "$ref": "#/definitions/AWS::ConnectCampaigns::Campaign" }, + { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline" + }, { "$ref": "#/definitions/AWS::ControlTower::EnabledControl" }, @@ -262201,6 +265703,27 @@ { "$ref": "#/definitions/AWS::DataSync::Task" }, + { + "$ref": "#/definitions/AWS::DataZone::DataSource" + }, + { + "$ref": "#/definitions/AWS::DataZone::Domain" + }, + { + "$ref": "#/definitions/AWS::DataZone::Environment" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile" + }, + { + "$ref": "#/definitions/AWS::DataZone::Project" + }, + { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -262897,6 +266420,9 @@ { "$ref": "#/definitions/AWS::Glue::Table" }, + { + "$ref": "#/definitions/AWS::Glue::TableOptimizer" + }, { "$ref": "#/definitions/AWS::Glue::Trigger" }, @@ -263050,6 +266576,9 @@ { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, + { + "$ref": "#/definitions/AWS::IVS::Stage" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -263101,6 +266630,9 @@ { "$ref": "#/definitions/AWS::Inspector::ResourceGroup" }, + { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration" + }, { "$ref": "#/definitions/AWS::InspectorV2::Filter" }, @@ -264088,6 +267620,9 @@ { "$ref": "#/definitions/AWS::RDS::GlobalCluster" }, + { + "$ref": "#/definitions/AWS::RDS::Integration" + }, { "$ref": "#/definitions/AWS::RDS::OptionGroup" },