diff --git a/bin/_file_formatter.py b/bin/_file_formatter.py index 33bdea4d7..7e092b234 100644 --- a/bin/_file_formatter.py +++ b/bin/_file_formatter.py @@ -1,4 +1,5 @@ """Formatter base class for JSONFormatter and YamlFormatter.""" + import argparse import os import sys diff --git a/bin/public_interface.py b/bin/public_interface.py index 8fd6c9928..7c4788939 100755 --- a/bin/public_interface.py +++ b/bin/public_interface.py @@ -101,13 +101,15 @@ def _print(signature: Dict[str, inspect.Signature], variables: Set[str]) -> None result: Dict[str, Any] = {"routines": {}, "variables": sorted(variables)} for key, value in signature.items(): result["routines"][key] = [ - { - "name": parameter.name, - "kind": parameter.kind.name, - "default": parameter.default, - } - if parameter.default != inspect.Parameter.empty - else {"name": parameter.name, "kind": parameter.kind.name} + ( + { + "name": parameter.name, + "kind": parameter.kind.name, + "default": parameter.default, + } + if parameter.default != inspect.Parameter.empty + else {"name": parameter.name, "kind": parameter.kind.name} + ) for parameter in value.parameters.values() ] print(json.dumps(result, indent=2, sort_keys=True)) diff --git a/integration/combination/test_function_with_all_event_types.py b/integration/combination/test_function_with_all_event_types.py index 1a2913bff..f593579aa 100644 --- a/integration/combination/test_function_with_all_event_types.py +++ b/integration/combination/test_function_with_all_event_types.py @@ -1,13 +1,13 @@ from unittest.case import skipIf -from integration.config.service_names import IOT, SCHEDULE_EVENT +from integration.config.service_names import IOT, LOGS, SCHEDULE_EVENT from integration.helpers.base_test import BaseTest from integration.helpers.resource import current_region_does_not_support, generate_suffix @skipIf( - current_region_does_not_support([IOT, SCHEDULE_EVENT]), - "IoT, ScheduleEvent is not supported in this testing region", + current_region_does_not_support([IOT, SCHEDULE_EVENT, LOGS]), + "IoT, ScheduleEvent or a Logs resource is not supported in this testing region", ) class TestFunctionWithAllEventTypes(BaseTest): def test_function_with_all_event_types(self): diff --git a/integration/combination/test_function_with_cloudwatch_log.py b/integration/combination/test_function_with_cloudwatch_log.py index e00974ba9..34641fc44 100644 --- a/integration/combination/test_function_with_cloudwatch_log.py +++ b/integration/combination/test_function_with_cloudwatch_log.py @@ -1,6 +1,14 @@ +from unittest.case import skipIf + +from integration.config.service_names import LOGS from integration.helpers.base_test import BaseTest +from integration.helpers.resource import current_region_does_not_support +@skipIf( + current_region_does_not_support([LOGS]), + "A Logs resource that is a part of this test is not supported in this testing region", +) class TestFunctionWithCloudWatchLog(BaseTest): def test_function_with_cloudwatch_log(self): self.create_and_verify_stack("combination/function_with_cloudwatch_log") diff --git a/integration/config/service_names.py b/integration/config/service_names.py index 0f6847c13..14f96b3e1 100644 --- a/integration/config/service_names.py +++ b/integration/config/service_names.py @@ -35,3 +35,4 @@ API_KEY = "ApiKey" APP_SYNC = "AppSync" SNS_FILTER_POLICY_SCOPE = "SnsFilterPolicyScope" +LOGS = "Logs" diff --git a/integration/helpers/deployer/deployer.py b/integration/helpers/deployer/deployer.py index 7e7c60ec9..c8073f512 100644 --- a/integration/helpers/deployer/deployer.py +++ b/integration/helpers/deployer/deployer.py @@ -208,9 +208,9 @@ def describe_changeset(self, change_set_id, stack_name, **kwargs): { "LogicalResourceId": resource_props.get("LogicalResourceId"), "ResourceType": resource_props.get("ResourceType"), - "Replacement": "N/A" - if resource_props.get("Replacement") is None - else resource_props.get("Replacement"), + "Replacement": ( + "N/A" if resource_props.get("Replacement") is None else resource_props.get("Replacement") + ), } ) diff --git a/integration/helpers/deployer/utils/colors.py b/integration/helpers/deployer/utils/colors.py index 3e1f90d0e..e97e395b3 100644 --- a/integration/helpers/deployer/utils/colors.py +++ b/integration/helpers/deployer/utils/colors.py @@ -2,6 +2,7 @@ Wrapper to generated colored messages for printing in Terminal This was ported over from the sam-cli repo """ + from typing import Dict, Literal SupportedColor = Literal["red", "green", "yellow"] diff --git a/integration/helpers/deployer/utils/retry.py b/integration/helpers/deployer/utils/retry.py index 6e3fe357c..d0cd85bdf 100644 --- a/integration/helpers/deployer/utils/retry.py +++ b/integration/helpers/deployer/utils/retry.py @@ -1,6 +1,7 @@ """ Retry decorator to retry decorated function based on Exception with exponential backoff and number of attempts built-in. """ + import math import random import time diff --git a/integration/helpers/deployer/utils/table_print.py b/integration/helpers/deployer/utils/table_print.py index 8c6b23110..7fc20b33c 100644 --- a/integration/helpers/deployer/utils/table_print.py +++ b/integration/helpers/deployer/utils/table_print.py @@ -2,6 +2,7 @@ Utilities for table pretty printing This was ported over from the sam-cli repo """ + import shutil import textwrap from functools import wraps diff --git a/integration/helpers/s3_uploader.py b/integration/helpers/s3_uploader.py index 125d1fc75..4b5de217b 100644 --- a/integration/helpers/s3_uploader.py +++ b/integration/helpers/s3_uploader.py @@ -1,6 +1,7 @@ """ Client for uploading files to s3 """ + import logging from typing import Any diff --git a/requirements/dev.txt b/requirements/dev.txt index de81b0cf7..79952c65e 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -19,7 +19,7 @@ tenacity~=8.0 requests~=2.28 # formatter -black==23.10.1 +black==24.3.0 ruamel.yaml==0.17.21 # It can parse yaml while perserving comments # type check diff --git a/samtranslator/__init__.py b/samtranslator/__init__.py index ca2330064..9e9fe4198 100644 --- a/samtranslator/__init__.py +++ b/samtranslator/__init__.py @@ -1 +1 @@ -__version__ = "1.87.0" +__version__ = "1.88.0" diff --git a/samtranslator/internal/deprecation_control.py b/samtranslator/internal/deprecation_control.py index b75522ad9..dfb7702f7 100644 --- a/samtranslator/internal/deprecation_control.py +++ b/samtranslator/internal/deprecation_control.py @@ -9,6 +9,7 @@ If external packages import deprecated interfaces, it is their responsibility to detect and remove them. """ + import warnings from functools import wraps from typing import Callable, Optional, TypeVar diff --git a/samtranslator/metrics/method_decorator.py b/samtranslator/metrics/method_decorator.py index 6ce388fa8..fa152b1b7 100644 --- a/samtranslator/metrics/method_decorator.py +++ b/samtranslator/metrics/method_decorator.py @@ -1,6 +1,7 @@ """ Method decorator for execution latency collection """ + import functools import logging from datetime import datetime @@ -84,13 +85,13 @@ def _send_cw_metric(prefix, name, execution_time_ms, func, args): # type: ignor @overload def cw_timer( *, name: Optional[str] = None, prefix: Optional[str] = None -) -> Callable[[Callable[_PT, _RT]], Callable[_PT, _RT]]: - ... +) -> Callable[[Callable[_PT, _RT]], Callable[_PT, _RT]]: ... @overload -def cw_timer(_func: Callable[_PT, _RT], name: Optional[str] = None, prefix: Optional[str] = None) -> Callable[_PT, _RT]: - ... +def cw_timer( + _func: Callable[_PT, _RT], name: Optional[str] = None, prefix: Optional[str] = None +) -> Callable[_PT, _RT]: ... def cw_timer( diff --git a/samtranslator/model/__init__.py b/samtranslator/model/__init__.py index 179f6c72b..e47131793 100644 --- a/samtranslator/model/__init__.py +++ b/samtranslator/model/__init__.py @@ -1,4 +1,5 @@ """ CloudFormation Resource serialization, deserialization, and validation """ + import inspect import re from abc import ABC, ABCMeta, abstractmethod diff --git a/samtranslator/model/api/http_api_generator.py b/samtranslator/model/api/http_api_generator.py index b2e9f2ec5..f97199037 100644 --- a/samtranslator/model/api/http_api_generator.py +++ b/samtranslator/model/api/http_api_generator.py @@ -777,9 +777,7 @@ def _add_title(self) -> None: self.definition_body = open_api_editor.openapi @cw_timer(prefix="Generator", name="HttpApi") - def to_cloudformation( - self, route53_record_set_groups: Dict[str, Route53RecordSetGroup] - ) -> Tuple[ + def to_cloudformation(self, route53_record_set_groups: Dict[str, Route53RecordSetGroup]) -> Tuple[ ApiGatewayV2HttpApi, Optional[ApiGatewayV2Stage], Optional[ApiGatewayV2DomainName], diff --git a/samtranslator/model/apigatewayv2.py b/samtranslator/model/apigatewayv2.py index c5cae3ea7..abed87dbe 100644 --- a/samtranslator/model/apigatewayv2.py +++ b/samtranslator/model/apigatewayv2.py @@ -129,7 +129,9 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 self.api_logical_id = api_logical_id self.name = name self.authorization_scopes = authorization_scopes - self.jwt_configuration: Optional[JwtConfiguration] = self._get_jwt_configuration(jwt_configuration) + self.jwt_configuration: Optional[JwtConfiguration] = self._get_jwt_configuration( + jwt_configuration, api_logical_id + ) self.id_source = id_source self.function_arn = function_arn self.function_invoke_role = function_invoke_role @@ -344,7 +346,9 @@ def _get_identity_source(self, auth_identity: Dict[str, Any]) -> List[str]: return identity_source @staticmethod - def _get_jwt_configuration(props: Optional[Dict[str, Union[str, List[str]]]]) -> Optional[JwtConfiguration]: + def _get_jwt_configuration( + props: Optional[Dict[str, Union[str, List[str]]]], api_logical_id: str + ) -> Optional[JwtConfiguration]: """Make sure that JWT configuration dict keys are lower case. ApiGatewayV2Authorizer doesn't create `AWS::ApiGatewayV2::Authorizer` but generates @@ -359,8 +363,8 @@ def _get_jwt_configuration(props: Optional[Dict[str, Union[str, List[str]]]]) -> Parameters ---------- - props - jwt configuration dict with the keys either lower case or capitalized + props: jwt configuration dict with the keys either lower case or capitalized + api_logical_id: logical id of the Serverless Api resource with the jwt configuration Returns ------- @@ -368,4 +372,5 @@ def _get_jwt_configuration(props: Optional[Dict[str, Union[str, List[str]]]]) -> """ if not props: return None + sam_expect(props, api_logical_id, "JwtConfiguration").to_be_a_map() return {k.lower(): v for k, v in props.items()} diff --git a/samtranslator/model/architecture.py b/samtranslator/model/architecture.py index 12a4c4be9..1a4946ddc 100644 --- a/samtranslator/model/architecture.py +++ b/samtranslator/model/architecture.py @@ -1,5 +1,6 @@ """ Enum for determining type of architectures for Lambda Function. """ + ARM64 = "arm64" X86_64 = "x86_64" diff --git a/samtranslator/model/sam_resources.py b/samtranslator/model/sam_resources.py index 4aeed875c..e7b32a88b 100644 --- a/samtranslator/model/sam_resources.py +++ b/samtranslator/model/sam_resources.py @@ -1,4 +1,5 @@ """ SAM macro definitions """ + import copy from contextlib import suppress from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union, cast diff --git a/samtranslator/model/types.py b/samtranslator/model/types.py index 32b3c7c4a..5c86b8b15 100644 --- a/samtranslator/model/types.py +++ b/samtranslator/model/types.py @@ -8,6 +8,7 @@ the Permissions property is an ARN or list of ARNs. In this situation, we validate that the Permissions property is either a string or a list of strings, but do not validate whether the string(s) are valid IAM policy ARNs. """ + from typing import Any, Callable, Type, Union import samtranslator.model.exceptions diff --git a/samtranslator/open_api/base_editor.py b/samtranslator/open_api/base_editor.py index 70d523d00..e5a9ef26e 100644 --- a/samtranslator/open_api/base_editor.py +++ b/samtranslator/open_api/base_editor.py @@ -1,4 +1,5 @@ """Base class for OpenApiEditor and SwaggerEditor.""" + import re from typing import Any, Dict, Iterator, List, Optional, Union diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 2432b810c..902ef3826 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -2055,6 +2055,11 @@ "title": "Password", "type": "string" }, + "ReplicationUser": { + "markdownDescription": "Defines if this user is intended for CRDR replication purposes.", + "title": "ReplicationUser", + "type": "boolean" + }, "Username": { "markdownDescription": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data.", "title": "Username", @@ -2365,7 +2370,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "markdownDescription": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "title": "EnvironmentVariables", "type": "array" }, @@ -2474,7 +2479,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "Environment variables for the auto created branch.", + "markdownDescription": "The environment variables for the autocreated branch.", "title": "EnvironmentVariables", "type": "array" }, @@ -2551,12 +2556,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2815,6 +2820,16 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, + "Certificate": { + "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", + "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", + "title": "Certificate" + }, + "CertificateSettings": { + "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", + "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", + "title": "CertificateSettings" + }, "DomainName": { "markdownDescription": "The domain name for the domain association.", "title": "DomainName", @@ -2832,6 +2847,11 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" + }, + "UpdateStatus": { + "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", + "title": "UpdateStatus", + "type": "string" } }, "required": [ @@ -2862,6 +2882,43 @@ ], "type": "object" }, + "AWS::Amplify::Domain.Certificate": { + "additionalProperties": false, + "properties": { + "CertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CertificateArn", + "type": "string" + }, + "CertificateType": { + "markdownDescription": "The type of SSL/TLS certificate that you want to use.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CertificateVerificationDNSRecord": { + "markdownDescription": "The DNS record for certificate verification.", + "title": "CertificateVerificationDNSRecord", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Amplify::Domain.CertificateSettings": { + "additionalProperties": false, + "properties": { + "CertificateType": { + "markdownDescription": "The certificate type.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CustomCertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for the custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CustomCertificateArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::Amplify::Domain.SubDomainSetting": { "additionalProperties": false, "properties": { @@ -3024,14 +3081,6 @@ "type": "array" } }, - "required": [ - "BindingProperties", - "ComponentType", - "Name", - "Overrides", - "Properties", - "Variants" - ], "type": "object" }, "Type": { @@ -3050,8 +3099,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3169,6 +3217,11 @@ "title": "Predicates", "type": "array" }, + "SlotName": { + "markdownDescription": "The name of a component slot.", + "title": "SlotName", + "type": "string" + }, "UserAttribute": { "markdownDescription": "An authenticated user attribute.", "title": "UserAttribute", @@ -3219,6 +3272,11 @@ }, "title": "Properties", "type": "object" + }, + "SourceId": { + "markdownDescription": "The unique ID of the child component in its original source system, such as Figma.", + "title": "SourceId", + "type": "string" } }, "required": [ @@ -3312,6 +3370,11 @@ "title": "Action", "type": "string" }, + "BindingEvent": { + "markdownDescription": "Binds an event to an action on a component. When you specify a `bindingEvent` , the event is called when the action is performed.", + "title": "BindingEvent", + "type": "string" + }, "Parameters": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Component.ActionParameters", "markdownDescription": "Describes information about the action.", @@ -3518,6 +3581,11 @@ "title": "Operand", "type": "string" }, + "OperandType": { + "markdownDescription": "The type of value to use when performing the evaluation.", + "title": "OperandType", + "type": "string" + }, "Operator": { "markdownDescription": "The operator to use to perform the evaluation.", "title": "Operator", @@ -3668,15 +3736,6 @@ "type": "object" } }, - "required": [ - "DataType", - "Fields", - "FormActionType", - "Name", - "SchemaVersion", - "SectionalElements", - "Style" - ], "type": "object" }, "Type": { @@ -3695,8 +3754,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3989,9 +4047,49 @@ ], "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue": { + "additionalProperties": false, + "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties", + "markdownDescription": "Describes the properties to customize with data at runtime.", + "title": "BindingProperties" + }, + "Type": { + "markdownDescription": "The property type.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties": { + "additionalProperties": false, + "properties": { + "Model": { + "markdownDescription": "An Amplify DataStore model.", + "title": "Model", + "type": "string" + } + }, + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormInputValueProperty": { "additionalProperties": false, "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties", + "markdownDescription": "The information to bind fields to data at runtime.", + "title": "BindingProperties" + }, + "Concat": { + "items": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValueProperty" + }, + "markdownDescription": "A list of form properties to concatenate to create the value to assign to this field property.", + "title": "Concat", + "type": "array" + }, "Value": { "markdownDescription": "The value to assign to the input field.", "title": "Value", @@ -4000,6 +4098,25 @@ }, "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties": { + "additionalProperties": false, + "properties": { + "Field": { + "markdownDescription": "The data field to bind the property to.", + "title": "Field", + "type": "string" + }, + "Property": { + "markdownDescription": "The form property to bind to the data field.", + "title": "Property", + "type": "string" + } + }, + "required": [ + "Property" + ], + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormStyle": { "additionalProperties": false, "properties": { @@ -4098,6 +4215,17 @@ "AWS::AmplifyUIBuilder::Form.ValueMappings": { "additionalProperties": false, "properties": { + "BindingProperties": { + "additionalProperties": false, + "markdownDescription": "The information to bind fields to data at runtime.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue" + } + }, + "title": "BindingProperties", + "type": "object" + }, "Values": { "items": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.ValueMapping" @@ -4190,10 +4318,6 @@ "type": "array" } }, - "required": [ - "Name", - "Values" - ], "type": "object" }, "Type": { @@ -4212,8 +4336,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -8888,6 +9011,14 @@ "title": "Description", "type": "string" }, + "DynamicExtensionParameters": { + "items": { + "$ref": "#/definitions/AWS::AppConfig::Deployment.DynamicExtensionParameters" + }, + "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "title": "DynamicExtensionParameters", + "type": "array" + }, "EnvironmentId": { "markdownDescription": "The environment ID.", "title": "EnvironmentId", @@ -8937,131 +9068,28 @@ ], "type": "object" }, - "AWS::AppConfig::Deployment.Tags": { + "AWS::AppConfig::Deployment.DynamicExtensionParameters": { "additionalProperties": false, "properties": { - "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", - "type": "string" - }, - "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", - "type": "string" - } - }, - "type": "object" - }, - "AWS::AppConfig::DeploymentStrategy": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ExtensionReference": { + "markdownDescription": "", + "title": "ExtensionReference", "type": "string" }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "DeploymentDurationInMinutes": { - "markdownDescription": "Total amount of time for a deployment to last.", - "title": "DeploymentDurationInMinutes", - "type": "number" - }, - "Description": { - "markdownDescription": "A description of the deployment strategy.", - "title": "Description", - "type": "string" - }, - "FinalBakeTimeInMinutes": { - "markdownDescription": "Specifies the amount of time AWS AppConfig monitors for Amazon CloudWatch alarms after the configuration has been deployed to 100% of its targets, before considering the deployment to be complete. If an alarm is triggered during this time, AWS AppConfig rolls back the deployment. You must configure permissions for AWS AppConfig to roll back based on CloudWatch alarms. For more information, see [Configuring permissions for rollback based on Amazon CloudWatch alarms](https://docs.aws.amazon.com/appconfig/latest/userguide/getting-started-with-appconfig-cloudwatch-alarms-permissions.html) in the *AWS AppConfig User Guide* .", - "title": "FinalBakeTimeInMinutes", - "type": "number" - }, - "GrowthFactor": { - "markdownDescription": "The percentage of targets to receive a deployed configuration during each interval.", - "title": "GrowthFactor", - "type": "number" - }, - "GrowthType": { - "markdownDescription": "The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:\n\n*Linear* : For this type, AWS AppConfig processes the deployment by dividing the total number of targets by the value specified for `Step percentage` . For example, a linear deployment that uses a `Step percentage` of 10 deploys the configuration to 10 percent of the hosts. After those deployments are complete, the system deploys the configuration to the next 10 percent. This continues until 100% of the targets have successfully received the configuration.\n\n*Exponential* : For this type, AWS AppConfig processes the deployment exponentially using the following formula: `G*(2^N)` . In this formula, `G` is the growth factor specified by the user and `N` is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:\n\n`2*(2^0)`\n\n`2*(2^1)`\n\n`2*(2^2)`\n\nExpressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.", - "title": "GrowthType", - "type": "string" - }, - "Name": { - "markdownDescription": "A name for the deployment strategy.", - "title": "Name", - "type": "string" - }, - "ReplicateTo": { - "markdownDescription": "Save the deployment strategy to a Systems Manager (SSM) document.", - "title": "ReplicateTo", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/AWS::AppConfig::DeploymentStrategy.Tags" - }, - "markdownDescription": "Assigns metadata to an AWS AppConfig resource. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "DeploymentDurationInMinutes", - "GrowthFactor", - "Name", - "ReplicateTo" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::AppConfig::DeploymentStrategy" - ], + "ParameterName": { + "markdownDescription": "", + "title": "ParameterName", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ParameterValue": { + "markdownDescription": "", + "title": "ParameterValue", "type": "string" } }, - "required": [ - "Type", - "Properties" - ], "type": "object" }, - "AWS::AppConfig::DeploymentStrategy.Tags": { + "AWS::AppConfig::Deployment.Tags": { "additionalProperties": false, "properties": { "Key": { @@ -9077,7 +9105,131 @@ }, "type": "object" }, - "AWS::AppConfig::Environment": { + "AWS::AppConfig::DeploymentStrategy": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DeploymentDurationInMinutes": { + "markdownDescription": "Total amount of time for a deployment to last.", + "title": "DeploymentDurationInMinutes", + "type": "number" + }, + "Description": { + "markdownDescription": "A description of the deployment strategy.", + "title": "Description", + "type": "string" + }, + "FinalBakeTimeInMinutes": { + "markdownDescription": "Specifies the amount of time AWS AppConfig monitors for Amazon CloudWatch alarms after the configuration has been deployed to 100% of its targets, before considering the deployment to be complete. If an alarm is triggered during this time, AWS AppConfig rolls back the deployment. You must configure permissions for AWS AppConfig to roll back based on CloudWatch alarms. For more information, see [Configuring permissions for rollback based on Amazon CloudWatch alarms](https://docs.aws.amazon.com/appconfig/latest/userguide/getting-started-with-appconfig-cloudwatch-alarms-permissions.html) in the *AWS AppConfig User Guide* .", + "title": "FinalBakeTimeInMinutes", + "type": "number" + }, + "GrowthFactor": { + "markdownDescription": "The percentage of targets to receive a deployed configuration during each interval.", + "title": "GrowthFactor", + "type": "number" + }, + "GrowthType": { + "markdownDescription": "The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:\n\n*Linear* : For this type, AWS AppConfig processes the deployment by dividing the total number of targets by the value specified for `Step percentage` . For example, a linear deployment that uses a `Step percentage` of 10 deploys the configuration to 10 percent of the hosts. After those deployments are complete, the system deploys the configuration to the next 10 percent. This continues until 100% of the targets have successfully received the configuration.\n\n*Exponential* : For this type, AWS AppConfig processes the deployment exponentially using the following formula: `G*(2^N)` . In this formula, `G` is the growth factor specified by the user and `N` is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:\n\n`2*(2^0)`\n\n`2*(2^1)`\n\n`2*(2^2)`\n\nExpressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.", + "title": "GrowthType", + "type": "string" + }, + "Name": { + "markdownDescription": "A name for the deployment strategy.", + "title": "Name", + "type": "string" + }, + "ReplicateTo": { + "markdownDescription": "Save the deployment strategy to a Systems Manager (SSM) document.", + "title": "ReplicateTo", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::AppConfig::DeploymentStrategy.Tags" + }, + "markdownDescription": "Assigns metadata to an AWS AppConfig resource. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DeploymentDurationInMinutes", + "GrowthFactor", + "Name", + "ReplicateTo" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppConfig::DeploymentStrategy" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppConfig::DeploymentStrategy.Tags": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The tag value can be up to 256 characters.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AppConfig::Environment": { "additionalProperties": false, "properties": { "Condition": { @@ -9124,7 +9276,7 @@ }, "Monitors": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Monitors" + "$ref": "#/definitions/AWS::AppConfig::Environment.Monitor" }, "markdownDescription": "Amazon CloudWatch alarms to monitor during the deployment process.", "title": "Monitors", @@ -9137,7 +9289,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Tags" + "$ref": "#/definitions/Tag" }, "markdownDescription": "Metadata to assign to the environment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define.", "title": "Tags", @@ -9171,28 +9323,23 @@ ], "type": "object" }, - "AWS::AppConfig::Environment.Monitors": { + "AWS::AppConfig::Environment.Monitor": { "additionalProperties": false, "properties": { "AlarmArn": { + "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", + "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { + "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", + "title": "AlarmRoleArn", "type": "string" } }, - "type": "object" - }, - "AWS::AppConfig::Environment.Tags": { - "additionalProperties": false, - "properties": { - "Key": { - "type": "string" - }, - "Value": { - "type": "string" - } - }, + "required": [ + "AlarmArn" + ], "type": "object" }, "AWS::AppConfig::Extension": { @@ -9305,6 +9452,11 @@ "title": "Description", "type": "string" }, + "Dynamic": { + "markdownDescription": "Indicates whether this parameter's value can be supplied at the extension's action point instead of during extension association. Dynamic parameters can't be marked `Required` .", + "title": "Dynamic", + "type": "boolean" + }, "Required": { "markdownDescription": "A parameter value must be specified in the extension association.", "title": "Required", @@ -12258,6 +12410,135 @@ ], "type": "object" }, + "AWS::AppIntegrations::Application": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicationSourceConfig": { + "$ref": "#/definitions/AWS::AppIntegrations::Application.ApplicationSourceConfig", + "markdownDescription": "The configuration for where the application should be loaded from.", + "title": "ApplicationSourceConfig" + }, + "Description": { + "markdownDescription": "The description of the application.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the application.", + "title": "Name", + "type": "string" + }, + "Namespace": { + "markdownDescription": "The namespace of the application.", + "title": "Namespace", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ApplicationSourceConfig", + "Description", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppIntegrations::Application" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppIntegrations::Application.ApplicationSourceConfig": { + "additionalProperties": false, + "properties": { + "ExternalUrlConfig": { + "$ref": "#/definitions/AWS::AppIntegrations::Application.ExternalUrlConfig", + "markdownDescription": "The external URL source for the application.", + "title": "ExternalUrlConfig" + } + }, + "required": [ + "ExternalUrlConfig" + ], + "type": "object" + }, + "AWS::AppIntegrations::Application.ExternalUrlConfig": { + "additionalProperties": false, + "properties": { + "AccessUrl": { + "markdownDescription": "The URL to access the application.", + "title": "AccessUrl", + "type": "string" + }, + "ApprovedOrigins": { + "items": { + "type": "string" + }, + "markdownDescription": "Additional URLs to allow list if different than the access URL.", + "title": "ApprovedOrigins", + "type": "array" + } + }, + "required": [ + "AccessUrl", + "ApprovedOrigins" + ], + "type": "object" + }, "AWS::AppIntegrations::DataIntegration": { "additionalProperties": false, "properties": { @@ -12300,7 +12581,7 @@ }, "FileConfiguration": { "$ref": "#/definitions/AWS::AppIntegrations::DataIntegration.FileConfiguration", - "markdownDescription": "", + "markdownDescription": "The configuration for what files should be pulled from the source.", "title": "FileConfiguration" }, "KmsKey": { @@ -12314,7 +12595,7 @@ "type": "string" }, "ObjectConfiguration": { - "markdownDescription": "", + "markdownDescription": "The configuration for what data should be pulled from the source.", "title": "ObjectConfiguration", "type": "object" }, @@ -12369,7 +12650,7 @@ "additionalProperties": false, "properties": { "Filters": { - "markdownDescription": "", + "markdownDescription": "Restrictions for what files should be pulled from the source.", "title": "Filters", "type": "object" }, @@ -12377,7 +12658,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Identifiers for the source folders to pull all files from recursively.", "title": "Folders", "type": "array" } @@ -18540,6 +18821,11 @@ "title": "AtRestEncryptionEnabled", "type": "boolean" }, + "HealthMetricsConfig": { + "markdownDescription": "Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:\n\n- *NetworkBandwidthOutAllowanceExceeded* : The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.\n- *EngineCPUUtilization* : The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.\n\nMetrics will be recorded by API ID. You can set the value to `ENABLED` or `DISABLED` .", + "title": "HealthMetricsConfig", + "type": "string" + }, "TransitEncryptionEnabled": { "markdownDescription": "Transit encryption flag when connecting to cache. You cannot update this setting after creation.", "title": "TransitEncryptionEnabled", @@ -18742,6 +19028,11 @@ "markdownDescription": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", "title": "LambdaConfig" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", + "title": "MetricsConfig", + "type": "string" + }, "Name": { "markdownDescription": "Friendly name for you to identify your AppSync data source after creation.", "title": "Name", @@ -19434,6 +19725,16 @@ "title": "AuthenticationType", "type": "string" }, + "EnhancedMetricsConfig": { + "$ref": "#/definitions/AWS::AppSync::GraphQLApi.EnhancedMetricsConfig", + "markdownDescription": "Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.\n\nEnhanced metrics can be configured at the resolver, data source, and operation levels. For more information, see [Monitoring and logging](https://docs.aws.amazon.com//appsync/latest/devguide/monitoring.html#cw-metrics) in the *AWS AppSync User Guide* .", + "title": "EnhancedMetricsConfig" + }, + "EnvironmentVariables": { + "markdownDescription": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .\n\n*Pattern* : `^[A-Za-z]+\\\\w*$\\\\`\n\n*Minimum* : 2\n\n*Maximum* : 64", + "title": "EnvironmentVariables", + "type": "object" + }, "IntrospectionConfig": { "markdownDescription": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "title": "IntrospectionConfig", @@ -19580,6 +19881,32 @@ }, "type": "object" }, + "AWS::AppSync::GraphQLApi.EnhancedMetricsConfig": { + "additionalProperties": false, + "properties": { + "DataSourceLevelMetricsBehavior": { + "markdownDescription": "Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:\n\n- *Requests* : The number of invocations that occured during a request.\n- *Latency* : The time to complete a data source invocation.\n- *Errors* : The number of errors that occurred during a data source invocation.\n\nThese metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. `dataSourceLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_DATA_SOURCE_METRICS` : Records and emits metric data for all data sources in the request.\n- `PER_DATA_SOURCE_METRICS` : Records and emits metric data for data sources that have the `MetricsConfig` value set to `ENABLED` .", + "title": "DataSourceLevelMetricsBehavior", + "type": "string" + }, + "OperationLevelMetricsConfig": { + "markdownDescription": "Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:\n\n- *Requests* : The number of times a specified GraphQL operation was called.\n- *GraphQL errors* : The number of GraphQL errors that occurred during a specified GraphQL operation.\n\nMetrics will be recorded by API ID and operation name. You can set the value to `ENABLED` or `DISABLED` .", + "title": "OperationLevelMetricsConfig", + "type": "string" + }, + "ResolverLevelMetricsBehavior": { + "markdownDescription": "Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:\n\n- *GraphQL errors* : The number of GraphQL errors that occurred.\n- *Requests* : The number of invocations that occurred during a request.\n- *Latency* : The time to complete a resolver invocation.\n- *Cache hits* : The number of cache hits during a request.\n- *Cache misses* : The number of cache misses during a request.\n\nThese metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. `resolverLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_RESOLVER_METRICS` : Records and emits metric data for all resolvers in the request.\n- `PER_RESOLVER_METRICS` : Records and emits metric data for resolvers that have the `MetricsConfig` value set to `ENABLED` .", + "title": "ResolverLevelMetricsBehavior", + "type": "string" + } + }, + "required": [ + "DataSourceLevelMetricsBehavior", + "OperationLevelMetricsConfig", + "ResolverLevelMetricsBehavior" + ], + "type": "object" + }, "AWS::AppSync::GraphQLApi.LambdaAuthorizerConfig": { "additionalProperties": false, "properties": { @@ -19826,6 +20153,11 @@ "title": "MaxBatchSize", "type": "number" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced resolver metrics for specified resolvers. Note that `MetricsConfig` won't be used unless the `resolverLevelMetricsBehavior` value is set to `PER_RESOLVER_METRICS` . If the `resolverLevelMetricsBehavior` is set to `FULL_REQUEST_RESOLVER_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.", + "title": "MetricsConfig", + "type": "string" + }, "PipelineConfig": { "$ref": "#/definitions/AWS::AppSync::Resolver.PipelineConfig", "markdownDescription": "Functions linked with the pipeline resolver.", @@ -20121,7 +20453,7 @@ "type": "number" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20307,7 +20639,7 @@ "type": "string" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20669,6 +21001,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AttachMissingPermission": { + "markdownDescription": "If set to true, the managed policies for SSM and CW will be attached to the instance roles if they are missing.", + "title": "AttachMissingPermission", + "type": "boolean" + }, "AutoConfigurationEnabled": { "markdownDescription": "If set to `true` , the application components will be configured with the monitoring configuration recommended by Application Insights.", "title": "AutoConfigurationEnabled", @@ -20892,6 +21229,24 @@ "title": "Logs", "type": "array" }, + "NetWeaverPrometheusExporter": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.NetWeaverPrometheusExporter", + "markdownDescription": "", + "title": "NetWeaverPrometheusExporter" + }, + "Processes": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.Process" + }, + "markdownDescription": "", + "title": "Processes", + "type": "array" + }, + "SQLServerPrometheusExporter": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.SQLServerPrometheusExporter", + "markdownDescription": "", + "title": "SQLServerPrometheusExporter" + }, "WindowsEvents": { "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.WindowsEvent" @@ -21078,6 +21433,77 @@ ], "type": "object" }, + "AWS::ApplicationInsights::Application.NetWeaverPrometheusExporter": { + "additionalProperties": false, + "properties": { + "InstanceNumbers": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "InstanceNumbers", + "type": "array" + }, + "PrometheusPort": { + "markdownDescription": "", + "title": "PrometheusPort", + "type": "string" + }, + "SAPSID": { + "markdownDescription": "", + "title": "SAPSID", + "type": "string" + } + }, + "required": [ + "InstanceNumbers", + "SAPSID" + ], + "type": "object" + }, + "AWS::ApplicationInsights::Application.Process": { + "additionalProperties": false, + "properties": { + "AlarmMetrics": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.AlarmMetric" + }, + "markdownDescription": "", + "title": "AlarmMetrics", + "type": "array" + }, + "ProcessName": { + "markdownDescription": "", + "title": "ProcessName", + "type": "string" + } + }, + "required": [ + "AlarmMetrics", + "ProcessName" + ], + "type": "object" + }, + "AWS::ApplicationInsights::Application.SQLServerPrometheusExporter": { + "additionalProperties": false, + "properties": { + "PrometheusPort": { + "markdownDescription": "", + "title": "PrometheusPort", + "type": "string" + }, + "SQLSecretName": { + "markdownDescription": "", + "title": "SQLSecretName", + "type": "string" + } + }, + "required": [ + "PrometheusPort", + "SQLSecretName" + ], + "type": "object" + }, "AWS::ApplicationInsights::Application.SubComponentConfigurationDetails": { "additionalProperties": false, "properties": { @@ -21097,6 +21523,14 @@ "title": "Logs", "type": "array" }, + "Processes": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.Process" + }, + "markdownDescription": "", + "title": "Processes", + "type": "array" + }, "WindowsEvents": { "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.WindowsEvent" @@ -22452,6 +22886,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::AutoScaling::AutoScalingGroup.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", @@ -24752,9 +25191,6 @@ "title": "MappingTemplate", "type": "string" }, - "ModifiedAt": { - "type": "string" - }, "Name": { "markdownDescription": "Returns the descriptive name for the transformer.", "title": "Name", @@ -25068,6 +25504,11 @@ "markdownDescription": "Specifies the number of days after creation that a recovery point is moved to cold storage.", "title": "MoveToColdStorageAfterDays", "type": "number" + }, + "OptInToArchiveForSupportedResources": { + "markdownDescription": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", + "title": "OptInToArchiveForSupportedResources", + "type": "boolean" } }, "type": "object" @@ -25709,7 +26150,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the accounts to be included in the report.", + "markdownDescription": "These are the accounts to be included in the report.\n\nUse string value of `ROOT` to include all organizational units.", "title": "Accounts", "type": "array" }, @@ -25733,7 +26174,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the Regions to be included in the report.", + "markdownDescription": "These are the Regions to be included in the report.\n\nUse the wildcard as the string value to include all Regions.", "title": "Regions", "type": "array" }, @@ -25789,7 +26230,7 @@ "title": "RecoveryPointSelection" }, "RestoreTestingPlanName": { - "markdownDescription": "This is the restore testing plan name.", + "markdownDescription": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan. This cannot be changed after creation, and it must consist of only alphanumeric characters and underscores.", "title": "RestoreTestingPlanName", "type": "string" }, @@ -25960,7 +26401,7 @@ "type": "object" }, "RestoreTestingPlanName": { - "markdownDescription": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan.", + "markdownDescription": "Unique string that is the name of the restore testing plan.\n\nThe name cannot be changed after creation. The name must consist of only alphanumeric characters and underscores. Maximum length is 50.", "title": "RestoreTestingPlanName", "type": "string" }, @@ -26500,6 +26941,11 @@ "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", + "title": "EcsProperties" + }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", @@ -26516,13 +26962,7 @@ "title": "NodeProperties" }, "Parameters": { - "additionalProperties": true, "markdownDescription": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Parameters", "type": "object" }, @@ -26550,18 +26990,12 @@ "type": "number" }, "Tags": { - "additionalProperties": true, "markdownDescription": "The tags that are applied to the job definition.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Tags", "type": "object" }, "Timeout": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.JobTimeout", + "$ref": "#/definitions/AWS::Batch::JobDefinition.Timeout", "markdownDescription": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", "title": "Timeout" }, @@ -26597,6 +27031,22 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.AuthorizationConfig": { + "additionalProperties": false, + "properties": { + "AccessPointId": { + "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", + "title": "AccessPointId", + "type": "string" + }, + "Iam": { + "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", + "title": "Iam", + "type": "string" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.ContainerProperties": { "additionalProperties": false, "properties": { @@ -26663,7 +27113,7 @@ }, "MountPoints": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoint" + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" }, "markdownDescription": "The mount points for data volumes in your container. This parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", "title": "MountPoints", @@ -26684,6 +27134,11 @@ "title": "ReadonlyRootFilesystem", "type": "boolean" }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, "ResourceRequirements": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" @@ -26725,7 +27180,7 @@ }, "Volumes": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Volume" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" }, "markdownDescription": "A list of data volumes used in a job.", "title": "Volumes", @@ -26761,34 +27216,111 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSAuthorizationConfig": { + "AWS::Batch::JobDefinition.EcsProperties": { "additionalProperties": false, "properties": { - "AccessPointId": { + "TaskProperties": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" + }, + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "title": "TaskProperties", + "type": "array" + } + }, + "required": [ + "TaskProperties" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.EcsTaskProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerProperties" + }, + "markdownDescription": "This object is a list of containers.", + "title": "Containers", + "type": "array" + }, + "EphemeralStorage": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EphemeralStorage", + "markdownDescription": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "title": "EphemeralStorage" + }, + "ExecutionRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "title": "ExecutionRoleArn", "type": "string" }, - "Iam": { + "IpcMode": { + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "title": "IpcMode", + "type": "string" + }, + "NetworkConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "title": "NetworkConfiguration" + }, + "PidMode": { + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "title": "PidMode", "type": "string" + }, + "PlatformVersion": { + "markdownDescription": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "PlatformVersion", + "type": "string" + }, + "RuntimePlatform": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RuntimePlatform", + "markdownDescription": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "title": "RuntimePlatform" + }, + "TaskRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "title": "TaskRoleArn", + "type": "string" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" + }, + "markdownDescription": "A list of volumes that are associated with the job.", + "title": "Volumes", + "type": "array" } }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSVolumeConfiguration": { + "AWS::Batch::JobDefinition.EfsVolumeConfiguration": { "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSAuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", + "title": "AuthorizationConfig" }, "FileSystemId": { + "markdownDescription": "The Amazon EFS file system ID to use.", + "title": "FileSystemId", "type": "string" }, "RootDirectory": { + "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", + "title": "RootDirectory", "type": "string" }, "TransitEncryption": { + "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { + "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryptionPort", "type": "number" } }, @@ -26886,24 +27418,12 @@ "additionalProperties": false, "properties": { "Limits": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to reserve for the container. The values vary based on the `name` that's specified. Resources can be requested using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> To maximize your resource utilization, provide your jobs with as much memory as possible for the specific instance type that you are using. To learn how, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that's reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both places, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that's reserved for the container. Values must be a whole integer. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Limits", "type": "object" }, "Requests": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to request for the container. The values vary based on the `name` that's specified. Resources can be requested by using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that are reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that are reserved for the container. Values must be a whole integer. `nvidia.com/gpu` can be specified in `limits` , `requests` , or both. If `nvidia.com/gpu` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Requests", "type": "object" } @@ -26989,56 +27509,11 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EksMetadata": { - "additionalProperties": false, - "properties": { - "Labels": { - "additionalProperties": true, - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.EksPodProperties": { - "additionalProperties": false, - "properties": { - "Containers": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" - }, - "type": "array" - }, - "DnsPolicy": { - "type": "string" - }, - "HostNetwork": { - "type": "boolean" - }, - "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksMetadata" - }, - "ServiceAccountName": { - "type": "string" - }, - "Volumes": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.EksProperties": { "additionalProperties": false, "properties": { "PodProperties": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksPodProperties", + "$ref": "#/definitions/AWS::Batch::JobDefinition.PodProperties", "markdownDescription": "The properties for the Kubernetes pod resources of a job.", "title": "PodProperties" } @@ -27163,24 +27638,6 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.Host": { - "additionalProperties": false, - "properties": { - "SourcePath": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.JobTimeout": { - "additionalProperties": false, - "properties": { - "AttemptDurationSeconds": { - "type": "number" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -27232,13 +27689,7 @@ "type": "string" }, "Options": { - "additionalProperties": true, "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Options", "type": "object" }, @@ -27256,16 +27707,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.MountPoint": { + "AWS::Batch::JobDefinition.Metadata": { + "additionalProperties": false, + "properties": { + "Labels": { + "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", + "title": "Labels", + "type": "object" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.MountPoints": { "additionalProperties": false, "properties": { "ContainerPath": { + "markdownDescription": "The path on the container where the host volume is mounted.", + "title": "ContainerPath", "type": "string" }, "ReadOnly": { + "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", + "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { + "markdownDescription": "The name of the volume to mount.", + "title": "SourceVolume", "type": "string" } }, @@ -27319,6 +27787,19 @@ "markdownDescription": "The container details for the node range.", "title": "Container" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "title": "EcsProperties" + }, + "InstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", + "title": "InstanceTypes", + "type": "array" + }, "TargetNodes": { "markdownDescription": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties.", "title": "TargetNodes", @@ -27330,6 +27811,75 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.PodProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "title": "Containers", + "type": "array" + }, + "DnsPolicy": { + "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "title": "DnsPolicy", + "type": "string" + }, + "HostNetwork": { + "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "title": "HostNetwork", + "type": "boolean" + }, + "InitContainers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "title": "InitContainers", + "type": "array" + }, + "Metadata": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", + "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "title": "Metadata" + }, + "ServiceAccountName": { + "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "title": "ServiceAccountName", + "type": "string" + }, + "ShareProcessNamespace": { + "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", + "title": "ShareProcessNamespace", + "type": "boolean" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" + }, + "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", + "title": "Volumes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.RepositoryCredentials": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials.", + "title": "CredentialsParameter", + "type": "string" + } + }, + "required": [ + "CredentialsParameter" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.ResourceRequirement": { "additionalProperties": false, "properties": { @@ -27401,6 +27951,147 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.TaskContainerDependency": { + "additionalProperties": false, + "properties": { + "Condition": { + "markdownDescription": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "title": "Condition", + "type": "string" + }, + "ContainerName": { + "markdownDescription": "A unique identifier for the container.", + "title": "ContainerName", + "type": "string" + } + }, + "required": [ + "Condition", + "ContainerName" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.TaskContainerProperties": { + "additionalProperties": false, + "properties": { + "Command": { + "items": { + "type": "string" + }, + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "title": "Command", + "type": "array" + }, + "DependsOn": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerDependency" + }, + "markdownDescription": "A list of containers that this container depends on.", + "title": "DependsOn", + "type": "array" + }, + "Environment": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Environment" + }, + "markdownDescription": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "title": "Environment", + "type": "array" + }, + "Essential": { + "markdownDescription": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "Essential", + "type": "boolean" + }, + "Image": { + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "title": "Image", + "type": "string" + }, + "LinuxParameters": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LinuxParameters", + "markdownDescription": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "title": "LinuxParameters" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "LogConfiguration" + }, + "MountPoints": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" + }, + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "title": "MountPoints", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "title": "Name", + "type": "string" + }, + "Privileged": { + "markdownDescription": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "title": "Privileged", + "type": "boolean" + }, + "ReadonlyRootFilesystem": { + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "title": "ReadonlyRootFilesystem", + "type": "boolean" + }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, + "ResourceRequirements": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" + }, + "markdownDescription": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "title": "ResourceRequirements", + "type": "array" + }, + "Secrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Secret" + }, + "markdownDescription": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "title": "Secrets", + "type": "array" + }, + "Ulimits": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Ulimit" + }, + "markdownDescription": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "title": "Ulimits", + "type": "array" + }, + "User": { + "markdownDescription": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Image" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.Timeout": { + "additionalProperties": false, + "properties": { + "AttemptDurationSeconds": { + "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", + "title": "AttemptDurationSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.Tmpfs": { "additionalProperties": false, "properties": { @@ -27455,16 +28146,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.Volume": { + "AWS::Batch::JobDefinition.Volumes": { "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", + "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", + "title": "EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", + "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", + "title": "Host" }, "Name": { + "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", + "title": "Name", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.VolumesHost": { + "additionalProperties": false, + "properties": { + "SourcePath": { + "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", + "title": "SourcePath", "type": "string" } }, @@ -27518,6 +28226,14 @@ "title": "JobQueueName", "type": "string" }, + "JobStateTimeLimitActions": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobQueue.JobStateTimeLimitAction" + }, + "markdownDescription": "The set of actions that AWS Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. AWS Batch will perform each action after `maxTimeSeconds` has passed.", + "title": "JobStateTimeLimitActions", + "type": "array" + }, "Priority": { "markdownDescription": "The priority of the job queue. Job queues with a higher priority (or a higher integer value for the `priority` parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of `10` is given scheduling preference over a job queue with a priority value of `1` . All of the compute environments must be either EC2 ( `EC2` or `SPOT` ) or Fargate ( `FARGATE` or `FARGATE_SPOT` ); EC2 and Fargate compute environments can't be mixed.", "title": "Priority", @@ -27592,6 +28308,38 @@ ], "type": "object" }, + "AWS::Batch::JobQueue.JobStateTimeLimitAction": { + "additionalProperties": false, + "properties": { + "Action": { + "markdownDescription": "The action to take when a job is at the head of the job queue in the specified state for the specified period of time. The only supported value is `CANCEL` , which will cancel the job.", + "title": "Action", + "type": "string" + }, + "MaxTimeSeconds": { + "markdownDescription": "The approximate amount of time, in seconds, that must pass with the job in the specified state before the action is taken. The minimum value is 600 (10 minutes) and the maximum value is 86,400 (24 hours).", + "title": "MaxTimeSeconds", + "type": "number" + }, + "Reason": { + "markdownDescription": "The reason to log for the action being taken.", + "title": "Reason", + "type": "string" + }, + "State": { + "markdownDescription": "The state of the job needed to trigger the action. The only supported value is `RUNNABLE` .", + "title": "State", + "type": "string" + } + }, + "required": [ + "Action", + "MaxTimeSeconds", + "Reason", + "State" + ], + "type": "object" + }, "AWS::Batch::SchedulingPolicy": { "additionalProperties": false, "properties": { @@ -29510,6 +30258,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AutoScalingSpecifications": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSpecification", + "markdownDescription": "The optional auto scaling capacity settings for a table in provisioned capacity mode.", + "title": "AutoScalingSpecifications" + }, "BillingMode": { "$ref": "#/definitions/AWS::Cassandra::Table.BillingMode", "markdownDescription": "The billing mode for the table, which determines how you'll be charged for reads and writes:\n\n- *On-demand mode* (default) - You pay based on the actual reads and writes your application performs.\n- *Provisioned mode* - Lets you specify the number of reads and writes per second that you need for your application.\n\nIf you don't specify a value for this property, then the table will use on-demand mode.", @@ -29564,6 +30317,14 @@ "title": "RegularColumns", "type": "array" }, + "ReplicaSpecifications": { + "items": { + "$ref": "#/definitions/AWS::Cassandra::Table.ReplicaSpecification" + }, + "markdownDescription": "The AWS Region specific settings of a multi-Region table.\n\nFor a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters.\n\n- `region` : The Region where these settings are applied. (Required)\n- `readCapacityUnits` : The provisioned read capacity units. (Optional)\n- `readCapacityAutoScaling` : The read capacity auto scaling settings for the table. (Optional)", + "title": "ReplicaSpecifications", + "type": "array" + }, "TableName": { "markdownDescription": "The name of the table to be created. The table name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you can't perform updates that require replacing this resource. You can perform updates that require no interruption or some interruption. If you must replace the resource, specify a new name. \n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "title": "TableName", @@ -29605,6 +30366,48 @@ ], "type": "object" }, + "AWS::Cassandra::Table.AutoScalingSetting": { + "additionalProperties": false, + "properties": { + "AutoScalingDisabled": { + "markdownDescription": "This optional parameter enables auto scaling for the table if set to `false` .", + "title": "AutoScalingDisabled", + "type": "boolean" + }, + "MaximumUnits": { + "markdownDescription": "Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MaximumUnits", + "type": "number" + }, + "MinimumUnits": { + "markdownDescription": "The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MinimumUnits", + "type": "number" + }, + "ScalingPolicy": { + "$ref": "#/definitions/AWS::Cassandra::Table.ScalingPolicy", + "markdownDescription": "Amazon Keyspaces supports the `target tracking` auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.", + "title": "ScalingPolicy" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.AutoScalingSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's read capacity.", + "title": "ReadCapacityAutoScaling" + }, + "WriteCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's write capacity.", + "title": "WriteCapacityAutoScaling" + } + }, + "type": "object" + }, "AWS::Cassandra::Table.BillingMode": { "additionalProperties": false, "properties": { @@ -29702,6 +30505,70 @@ ], "type": "object" }, + "AWS::Cassandra::Table.ReplicaSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The read capacity auto scaling settings for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityAutoScaling" + }, + "ReadCapacityUnits": { + "markdownDescription": "The provisioned read capacity units for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityUnits", + "type": "number" + }, + "Region": { + "markdownDescription": "The AWS Region.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "Region" + ], + "type": "object" + }, + "AWS::Cassandra::Table.ScalingPolicy": { + "additionalProperties": false, + "properties": { + "TargetTrackingScalingPolicyConfiguration": { + "$ref": "#/definitions/AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration", + "markdownDescription": "The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity.", + "title": "TargetTrackingScalingPolicyConfiguration" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration": { + "additionalProperties": false, + "properties": { + "DisableScaleIn": { + "markdownDescription": "Specifies if `scale-in` is enabled.\n\nWhen auto scaling automatically decreases capacity for a table, the table *scales in* . When scaling policies are set, they can't scale in the table lower than its minimum capacity.", + "title": "DisableScaleIn", + "type": "boolean" + }, + "ScaleInCooldown": { + "markdownDescription": "Specifies a `scale-in` cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleInCooldown", + "type": "number" + }, + "ScaleOutCooldown": { + "markdownDescription": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleOutCooldown", + "type": "number" + }, + "TargetValue": { + "markdownDescription": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90.", + "title": "TargetValue", + "type": "number" + } + }, + "required": [ + "TargetValue" + ], + "type": "object" + }, "AWS::CertificateManager::Account": { "additionalProperties": false, "properties": { @@ -33316,7 +34183,7 @@ "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15.", + "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution.", "title": "Weight", "type": "number" } @@ -36274,7 +37141,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36597,7 +37464,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -37286,6 +38153,9 @@ "AWS::CloudWatch::AnomalyDetector.SingleMetricAnomalyDetector": { "additionalProperties": false, "properties": { + "AccountId": { + "type": "string" + }, "Dimensions": { "items": { "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.Dimension" @@ -37866,6 +38736,164 @@ ], "type": "object" }, + "AWS::CodeArtifact::PackageGroup": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ContactInfo": { + "markdownDescription": "The contact information of the package group.", + "title": "ContactInfo", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the package group.", + "title": "Description", + "type": "string" + }, + "DomainName": { + "markdownDescription": "The domain that contains the package group.", + "title": "DomainName", + "type": "string" + }, + "DomainOwner": { + "markdownDescription": "The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.", + "title": "DomainOwner", + "type": "string" + }, + "OriginConfiguration": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.OriginConfiguration", + "markdownDescription": "Details about the package origin configuration of a package group.", + "title": "OriginConfiguration" + }, + "Pattern": { + "markdownDescription": "The pattern of the package group. The pattern determines which packages are associated with the package group.", + "title": "Pattern", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags to be applied to the package group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainName", + "Pattern" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeArtifact::PackageGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.OriginConfiguration": { + "additionalProperties": false, + "properties": { + "Restrictions": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.Restrictions", + "markdownDescription": "The origin configuration settings that determine how package versions can enter repositories.", + "title": "Restrictions" + } + }, + "required": [ + "Restrictions" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.RestrictionType": { + "additionalProperties": false, + "properties": { + "Repositories": { + "items": { + "type": "string" + }, + "markdownDescription": "The repositories to add to the allowed repositories list. The allowed repositories list is used when the `RestrictionMode` is set to `ALLOW_SPECIFIC_REPOSITORIES` .", + "title": "Repositories", + "type": "array" + }, + "RestrictionMode": { + "markdownDescription": "The package group origin restriction setting. When the value is `INHERIT` , the value is set to the value of the first parent package group which does not have a value of `INHERIT` .", + "title": "RestrictionMode", + "type": "string" + } + }, + "required": [ + "RestrictionMode" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.Restrictions": { + "additionalProperties": false, + "properties": { + "ExternalUpstream": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for external, upstream repositories.", + "title": "ExternalUpstream" + }, + "InternalUpstream": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for internal, upstream repositories.", + "title": "InternalUpstream" + }, + "Publish": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for publishing packages.", + "title": "Publish" + } + }, + "type": "object" + }, "AWS::CodeArtifact::Repository": { "additionalProperties": false, "properties": { @@ -37918,7 +38946,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of external connections associated with the repository.", + "markdownDescription": "An array of external connections associated with the repository. For more information, see [Supported external connection repositories](https://docs.aws.amazon.com/codeartifact/latest/ug/external-connection.html#supported-public-repositories) in the *CodeArtifact user guide* .", "title": "ExternalConnections", "type": "array" }, @@ -37976,6 +39004,92 @@ ], "type": "object" }, + "AWS::CodeBuild::Fleet": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaseCapacity": { + "markdownDescription": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", + "title": "BaseCapacity", + "type": "number" + }, + "ComputeType": { + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "title": "ComputeType", + "type": "string" + }, + "EnvironmentType": { + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "title": "EnvironmentType", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the compute fleet.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeBuild::Fleet" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::CodeBuild::Project": { "additionalProperties": false, "properties": { @@ -38310,6 +39424,9 @@ "title": "EnvironmentVariables", "type": "array" }, + "Fleet": { + "$ref": "#/definitions/AWS::CodeBuild::Project.ProjectFleet" + }, "Image": { "markdownDescription": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", "title": "Image", @@ -38337,7 +39454,9 @@ } }, "required": [ - "Image" + "ComputeType", + "Image", + "Type" ], "type": "object" }, @@ -38713,7 +39832,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", + "markdownDescription": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.", "title": "Type", "type": "string" } @@ -40558,6 +41677,11 @@ "title": "DisableInboundStageTransitions", "type": "array" }, + "ExecutionMode": { + "markdownDescription": "The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.", + "title": "ExecutionMode", + "type": "string" + }, "Name": { "markdownDescription": "The name of the pipeline.", "title": "Name", @@ -40691,6 +41815,11 @@ "markdownDescription": "The order in which actions are run.", "title": "RunOrder", "type": "number" + }, + "TimeoutInMinutes": { + "markdownDescription": "A timeout duration in minutes that can be applied against the ActionType\u2019s default timeout value specified in [Quotas for AWS CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/limits.html) . This attribute is available only to the manual approval ActionType.", + "title": "TimeoutInMinutes", + "type": "number" } }, "required": [ @@ -40816,9 +41945,39 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitBranchFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitConfiguration": { "additionalProperties": false, "properties": { + "PullRequest": { + "items": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPullRequestFilter" + }, + "markdownDescription": "The field where the repository event that will start the pipeline is specified as pull requests.", + "title": "PullRequest", + "type": "array" + }, "Push": { "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" @@ -40838,9 +41997,65 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::CodePipeline::Pipeline.GitPullRequestFilter": { + "additionalProperties": false, + "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the pull request trigger configuration.", + "title": "Branches" + }, + "Events": { + "items": { + "type": "string" + }, + "markdownDescription": "The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.", + "title": "Events", + "type": "array" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the pull request trigger configuration.", + "title": "FilePaths" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitPushFilter": { "additionalProperties": false, "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the push trigger configuration.", + "title": "Branches" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the push trigger configuration.", + "title": "FilePaths" + }, "Tags": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitTagFilterCriteria", "markdownDescription": "The field that contains the details for the Git tags trigger configuration.", @@ -41847,6 +43062,10 @@ "type": "boolean" } }, + "required": [ + "ClientId", + "ProviderName" + ], "type": "object" }, "AWS::Cognito::IdentityPool.CognitoStreams": { @@ -42013,12 +43232,24 @@ "type": "string" }, "RoleMappings": { + "additionalProperties": false, "markdownDescription": "How users for a specific identity provider are mapped to roles. This is a string to the `RoleMapping` object map. The string identifies the identity provider. For example: `graph.facebook.com` or `cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id` .\n\nIf the `IdentityProvider` field isn't provided in this object, the string is used as the identity provider name.\n\nFor more information, see the [RoleMapping property](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-identitypoolroleattachment-rolemapping.html) .", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::Cognito::IdentityPoolRoleAttachment.RoleMapping" + } + }, "title": "RoleMappings", "type": "object" }, "Roles": { + "additionalProperties": true, "markdownDescription": "The map of the roles associated with this pool. For a given role, the key is either \"authenticated\" or \"unauthenticated\". The value is the role ARN.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, "title": "Roles", "type": "object" } @@ -42296,7 +43527,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -43365,7 +44596,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "title": "ProviderDetails", "type": "object" }, @@ -48371,6 +49602,22 @@ "title": "AssignContactCategoryActions", "type": "array" }, + "CreateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.CreateCaseAction" + }, + "markdownDescription": "", + "title": "CreateCaseActions", + "type": "array" + }, + "EndAssociatedTasksActions": { + "items": { + "type": "object" + }, + "markdownDescription": "", + "title": "EndAssociatedTasksActions", + "type": "array" + }, "EventBridgeActions": { "items": { "$ref": "#/definitions/AWS::Connect::Rule.EventBridgeAction" @@ -48394,8 +49641,39 @@ "markdownDescription": "Information about the task action. This field is required if `TriggerEventSource` is one of the following values: `OnZendeskTicketCreate` | `OnZendeskTicketStatusUpdate` | `OnSalesforceCaseCreate`", "title": "TaskActions", "type": "array" + }, + "UpdateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.UpdateCaseAction" + }, + "markdownDescription": "", + "title": "UpdateCaseActions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Connect::Rule.CreateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + }, + "TemplateId": { + "markdownDescription": "", + "title": "TemplateId", + "type": "string" } }, + "required": [ + "Fields", + "TemplateId" + ], "type": "object" }, "AWS::Connect::Rule.EventBridgeAction": { @@ -48412,6 +49690,52 @@ ], "type": "object" }, + "AWS::Connect::Rule.Field": { + "additionalProperties": false, + "properties": { + "Id": { + "markdownDescription": "", + "title": "Id", + "type": "string" + }, + "Value": { + "$ref": "#/definitions/AWS::Connect::Rule.FieldValue", + "markdownDescription": "", + "title": "Value" + } + }, + "required": [ + "Id", + "Value" + ], + "type": "object" + }, + "AWS::Connect::Rule.FieldValue": { + "additionalProperties": false, + "properties": { + "BooleanValue": { + "markdownDescription": "", + "title": "BooleanValue", + "type": "boolean" + }, + "DoubleValue": { + "markdownDescription": "", + "title": "DoubleValue", + "type": "number" + }, + "EmptyValue": { + "markdownDescription": "", + "title": "EmptyValue", + "type": "object" + }, + "StringValue": { + "markdownDescription": "", + "title": "StringValue", + "type": "string" + } + }, + "type": "object" + }, "AWS::Connect::Rule.NotificationRecipientType": { "additionalProperties": false, "properties": { @@ -48549,6 +49873,23 @@ ], "type": "object" }, + "AWS::Connect::Rule.UpdateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + } + }, + "required": [ + "Fields" + ], + "type": "object" + }, "AWS::Connect::SecurityKey": { "additionalProperties": false, "properties": { @@ -48657,6 +49998,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AllowedAccessControlHierarchyGroupId": { + "markdownDescription": "The identifier of the hierarchy group that a security profile uses to restrict access to resources in Amazon Connect.", + "title": "AllowedAccessControlHierarchyGroupId", + "type": "string" + }, "AllowedAccessControlTags": { "items": { "$ref": "#/definitions/Tag" @@ -48665,11 +50011,27 @@ "title": "AllowedAccessControlTags", "type": "array" }, + "Applications": { + "items": { + "$ref": "#/definitions/AWS::Connect::SecurityProfile.Application" + }, + "markdownDescription": "", + "title": "Applications", + "type": "array" + }, "Description": { "markdownDescription": "The description of the security profile.", "title": "Description", "type": "string" }, + "HierarchyRestrictedResources": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of resources that a security profile applies hierarchy restrictions to in Amazon Connect. Following are acceptable ResourceNames: `User` .", + "title": "HierarchyRestrictedResources", + "type": "array" + }, "InstanceArn": { "markdownDescription": "The identifier of the Amazon Connect instance.", "title": "InstanceArn", @@ -48732,6 +50094,29 @@ ], "type": "object" }, + "AWS::Connect::SecurityProfile.Application": { + "additionalProperties": false, + "properties": { + "ApplicationPermissions": { + "items": { + "type": "string" + }, + "markdownDescription": "The permissions that the agent is granted on the application. Only the `ACCESS` permission is supported.", + "title": "ApplicationPermissions", + "type": "array" + }, + "Namespace": { + "markdownDescription": "Namespace of the application that you want to give access to.", + "title": "Namespace", + "type": "string" + } + }, + "required": [ + "ApplicationPermissions", + "Namespace" + ], + "type": "object" + }, "AWS::Connect::TaskTemplate": { "additionalProperties": false, "properties": { @@ -49764,6 +51149,117 @@ ], "type": "object" }, + "AWS::ControlTower::EnabledBaseline": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaselineIdentifier": { + "markdownDescription": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", + "title": "BaselineIdentifier", + "type": "string" + }, + "BaselineVersion": { + "markdownDescription": "The enabled version of the `Baseline` .", + "title": "BaselineVersion", + "type": "string" + }, + "Parameters": { + "items": { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline.Parameter" + }, + "markdownDescription": "Parameters that are applied when enabling this `Baseline` . These parameters configure the behavior of the baseline.", + "title": "Parameters", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags associated with input to `EnableBaseline` .", + "title": "Tags", + "type": "array" + }, + "TargetIdentifier": { + "markdownDescription": "The target on which to enable the `Baseline` .", + "title": "TargetIdentifier", + "type": "string" + } + }, + "required": [ + "BaselineIdentifier", + "BaselineVersion", + "TargetIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ControlTower::EnabledBaseline" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::ControlTower::EnabledBaseline.Parameter": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "A string denoting the parameter key.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "A low-level `Document` object of any type (for example, a Java Object).", + "title": "Value", + "type": "object" + } + }, + "type": "object" + }, "AWS::ControlTower::EnabledControl": { "additionalProperties": false, "properties": { @@ -49812,6 +51308,14 @@ "title": "Parameters", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags to be applied to the enabled control.", + "title": "Tags", + "type": "array" + }, "TargetIdentifier": { "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", "title": "TargetIdentifier", @@ -50227,6 +51731,7 @@ } }, "required": [ + "DefaultExpirationDays", "DomainName" ], "type": "object" @@ -51221,7 +52726,9 @@ } }, "required": [ - "DomainName" + "Description", + "DomainName", + "ObjectTypeName" ], "type": "object" }, @@ -51804,7 +53311,7 @@ "additionalProperties": false, "properties": { "CronExpression": { - "markdownDescription": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see [Cron expressions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch User Guide* .", + "markdownDescription": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see the [Cron expressions reference](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) in the *Amazon EventBridge User Guide* .", "title": "CronExpression", "type": "string" }, @@ -52597,28 +54104,18 @@ "additionalProperties": false, "properties": { "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "DatabaseName": { - "markdownDescription": "Database name for the endpoint.", - "title": "DatabaseName", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "ServerName": { - "markdownDescription": "Fully qualified domain name of the endpoint. For an Amazon RDS SQL Server instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52628,23 +54125,15 @@ "additionalProperties": false, "properties": { "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "ServerName": { - "markdownDescription": "The host name of the endpoint database.\n\nFor an Amazon RDS MySQL instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.\n\nFor an Aurora MySQL instance, this is the output of [DescribeDBClusters](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html) , in the `Endpoint` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52654,53 +54143,33 @@ "additionalProperties": false, "properties": { "AsmServer": { - "markdownDescription": "For an Oracle source endpoint, your ASM server address. You can set this value from the `asm_server` value. You set `asm_server` as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see [Configuration for change data capture (CDC) on an Oracle source database](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration) .", - "title": "AsmServer", "type": "string" }, "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "DatabaseName": { - "markdownDescription": "Database name for the endpoint.", - "title": "DatabaseName", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "SecretsManagerOracleAsmAccessRoleArn": { - "markdownDescription": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the `SecretsManagerOracleAsmSecret` . This `SecretsManagerOracleAsmSecret` has the secret value that allows access to the Oracle ASM of the endpoint.\n\n> You can specify one of two sets of values for these permissions. You can specify the values for this setting and `SecretsManagerOracleAsmSecretId` . Or you can specify clear-text values for `AsmUser` , `AsmPassword` , and `AsmServerName` . You can't specify both. For more information on creating this `SecretsManagerOracleAsmSecret` and the `SecretsManagerOracleAsmAccessRoleArn` and `SecretsManagerOracleAsmSecretId` required to access it, see [Using secrets to access AWS Database Migration Service resources](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) in the *AWS Database Migration Service User Guide* .", - "title": "SecretsManagerOracleAsmAccessRoleArn", "type": "string" }, "SecretsManagerOracleAsmSecretId": { - "markdownDescription": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN, partial ARN, or friendly name of the `SecretsManagerOracleAsmSecret` that contains the Oracle ASM connection details for the Oracle endpoint.", - "title": "SecretsManagerOracleAsmSecretId", "type": "string" }, "SecretsManagerSecurityDbEncryptionAccessRoleArn": { - "markdownDescription": "", - "title": "SecretsManagerSecurityDbEncryptionAccessRoleArn", "type": "string" }, "SecretsManagerSecurityDbEncryptionSecretId": { - "markdownDescription": "", - "title": "SecretsManagerSecurityDbEncryptionSecretId", "type": "string" }, "ServerName": { - "markdownDescription": "Fully qualified domain name of the endpoint.\n\nFor an Amazon RDS Oracle instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52741,19 +54210,13 @@ "additionalProperties": false, "properties": { "MicrosoftSqlServerSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.MicrosoftSqlServerSettings", - "markdownDescription": "", - "title": "MicrosoftSqlServerSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.MicrosoftSqlServerSettings" }, "MySqlSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.MySqlSettings", - "markdownDescription": "", - "title": "MySqlSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.MySqlSettings" }, "OracleSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.OracleSettings", - "markdownDescription": "", - "title": "OracleSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.OracleSettings" }, "PostgreSqlSettings": { "$ref": "#/definitions/AWS::DMS::DataProvider.PostgreSqlSettings", @@ -59324,6 +60787,11 @@ "title": "Includes", "type": "array" }, + "ManifestConfig": { + "$ref": "#/definitions/AWS::DataSync::Task.ManifestConfig", + "markdownDescription": "The configuration of the manifest that lists the files or objects to transfer. For more information, see [Specifying what DataSync transfers by using a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html) .", + "title": "ManifestConfig" + }, "Name": { "markdownDescription": "The name of a task. This value is a text reference that is used to identify the task in the console.", "title": "Name", @@ -59400,7 +60868,7 @@ "additionalProperties": false, "properties": { "S3": { - "$ref": "#/definitions/AWS::DataSync::Task.S3", + "$ref": "#/definitions/AWS::DataSync::Task.TaskReportConfigDestinationS3", "markdownDescription": "Specifies the Amazon S3 bucket where DataSync uploads your task report.", "title": "S3" } @@ -59423,6 +60891,56 @@ }, "type": "object" }, + "AWS::DataSync::Task.ManifestConfig": { + "additionalProperties": false, + "properties": { + "Action": { + "markdownDescription": "Specifies what DataSync uses the manifest for.", + "title": "Action", + "type": "string" + }, + "Format": { + "markdownDescription": "Specifies the file format of your manifest. For more information, see [Creating a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-create) .", + "title": "Format", + "type": "string" + }, + "Source": { + "$ref": "#/definitions/AWS::DataSync::Task.Source", + "markdownDescription": "Specifies the manifest that you want DataSync to use and where it's hosted.\n\n> You must specify this parameter if you're configuring a new manifest on or after February 7, 2024.\n> \n> If you don't, you'll get a 400 status code and `ValidationException` error stating that you're missing the IAM role for DataSync to access the S3 bucket where you're hosting your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) .", + "title": "Source" + } + }, + "required": [ + "Source" + ], + "type": "object" + }, + "AWS::DataSync::Task.ManifestConfigSourceS3": { + "additionalProperties": false, + "properties": { + "BucketAccessRoleArn": { + "markdownDescription": "Specifies the AWS Identity and Access Management (IAM) role that allows DataSync to access your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) .", + "title": "BucketAccessRoleArn", + "type": "string" + }, + "ManifestObjectPath": { + "markdownDescription": "Specifies the Amazon S3 object key of your manifest. This can include a prefix (for example, `prefix/my-manifest.csv` ).", + "title": "ManifestObjectPath", + "type": "string" + }, + "ManifestObjectVersionId": { + "markdownDescription": "Specifies the object version ID of the manifest that you want DataSync to use. If you don't set this, DataSync uses the latest version of the object.", + "title": "ManifestObjectVersionId", + "type": "string" + }, + "S3BucketArn": { + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the S3 bucket where you're hosting your manifest.", + "title": "S3BucketArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::DataSync::Task.Options": { "additionalProperties": false, "properties": { @@ -59534,18 +61052,12 @@ "additionalProperties": false, "properties": { "BucketAccessRoleArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/creating-task-reports.html) .", - "title": "BucketAccessRoleArn", "type": "string" }, "S3BucketArn": { - "markdownDescription": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", - "title": "S3BucketArn", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a bucket prefix for your report.", - "title": "Subdirectory", "type": "string" } }, @@ -59562,6 +61074,17 @@ }, "type": "object" }, + "AWS::DataSync::Task.Source": { + "additionalProperties": false, + "properties": { + "S3": { + "$ref": "#/definitions/AWS::DataSync::Task.ManifestConfigSourceS3", + "markdownDescription": "Specifies the S3 bucket where you're hosting your manifest.", + "title": "S3" + } + }, + "type": "object" + }, "AWS::DataSync::Task.TaskReportConfig": { "additionalProperties": false, "properties": { @@ -59597,6 +61120,27 @@ ], "type": "object" }, + "AWS::DataSync::Task.TaskReportConfigDestinationS3": { + "additionalProperties": false, + "properties": { + "BucketAccessRoleArn": { + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .", + "title": "BucketAccessRoleArn", + "type": "string" + }, + "S3BucketArn": { + "markdownDescription": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", + "title": "S3BucketArn", + "type": "string" + }, + "Subdirectory": { + "markdownDescription": "Specifies a bucket prefix for your report.", + "title": "Subdirectory", + "type": "string" + } + }, + "type": "object" + }, "AWS::DataSync::Task.TaskSchedule": { "additionalProperties": false, "properties": { @@ -59633,7 +61177,7 @@ }, "type": "object" }, - "AWS::Detective::Graph": { + "AWS::DataZone::DataSource": { "additionalProperties": false, "properties": { "Condition": { @@ -59668,25 +61212,82 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" - }, - "Tags": { + "AssetFormsInput": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::DataZone::DataSource.FormInput" }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", + "markdownDescription": "The metadata forms attached to the assets that the data source works with.", + "title": "AssetFormsInput", "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.DataSourceConfigurationInput", + "markdownDescription": "The configuration of the data source.", + "title": "Configuration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain where the data source is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnableSetting": { + "markdownDescription": "Specifies whether the data source is enabled.", + "title": "EnableSetting", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The unique identifier of the Amazon DataZone environment to which the data source publishes assets.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which you want to add this data source.", + "title": "ProjectIdentifier", + "type": "string" + }, + "PublishOnImport": { + "markdownDescription": "Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog.", + "title": "PublishOnImport", + "type": "boolean" + }, + "Recommendation": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RecommendationConfiguration", + "markdownDescription": "Specifies whether the business name generation is to be enabled for this data source.", + "title": "Recommendation" + }, + "Schedule": { + "$ref": "#/definitions/AWS::DataZone::DataSource.ScheduleConfiguration", + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule" + }, + "Type": { + "markdownDescription": "The type of the data source.", + "title": "Type", + "type": "string" } }, + "required": [ + "DomainIdentifier", + "EnvironmentIdentifier", + "Name", + "ProjectIdentifier", + "Type" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::DataZone::DataSource" ], "type": "string" }, @@ -59700,11 +61301,1027 @@ } }, "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.DataSourceConfigurationInput": { + "additionalProperties": false, + "properties": { + "GlueRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.GlueRunConfigurationInput", + "markdownDescription": "The configuration of the AWS Glue data source.", + "title": "GlueRunConfiguration" + }, + "RedshiftRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftRunConfigurationInput", + "markdownDescription": "The configuration of the Amazon Redshift data source.", + "title": "RedshiftRunConfiguration" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.FilterExpression": { + "additionalProperties": false, + "properties": { + "Expression": { + "markdownDescription": "The search filter expression.", + "title": "Expression", + "type": "string" + }, + "Type": { + "markdownDescription": "The search filter explresison type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Expression", "Type" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::DataZone::DataSource.FormInput": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the metadata form.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The name of the metadata form.", + "title": "FormName", + "type": "string" + }, + "TypeIdentifier": { + "markdownDescription": "The ID of the metadata form type.", + "title": "TypeIdentifier", + "type": "string" + }, + "TypeRevision": { + "markdownDescription": "The revision of the metadata form type.", + "title": "TypeRevision", + "type": "string" + } + }, + "required": [ + "FormName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.GlueRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RecommendationConfiguration": { + "additionalProperties": false, + "properties": { + "EnableBusinessNameGeneration": { + "markdownDescription": "Specifies whether automatic business name generation is to be enabled or not as part of the recommendation configuration.", + "title": "EnableBusinessNameGeneration", + "type": "boolean" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftClusterStorage": { + "additionalProperties": false, + "properties": { + "ClusterName": { + "markdownDescription": "The name of an Amazon Redshift cluster.", + "title": "ClusterName", + "type": "string" + } + }, + "required": [ + "ClusterName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftCredentialConfiguration": { + "additionalProperties": false, + "properties": { + "SecretManagerArn": { + "markdownDescription": "The ARN of a secret manager for an Amazon Redshift cluster.", + "title": "SecretManagerArn", + "type": "string" + } + }, + "required": [ + "SecretManagerArn" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the Amazon Redshift data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RedshiftCredentialConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftCredentialConfiguration", + "markdownDescription": "The details of the credentials required to access an Amazon Redshift cluster.", + "title": "RedshiftCredentialConfiguration" + }, + "RedshiftStorage": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftStorage", + "markdownDescription": "The details of the Amazon Redshift storage as part of the configuration of an Amazon Redshift data source run.", + "title": "RedshiftStorage" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RedshiftCredentialConfiguration", + "RedshiftStorage", + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftServerlessStorage": { + "additionalProperties": false, + "properties": { + "WorkgroupName": { + "markdownDescription": "The name of the Amazon Redshift Serverless workgroup.", + "title": "WorkgroupName", + "type": "string" + } + }, + "required": [ + "WorkgroupName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftStorage": { + "additionalProperties": false, + "properties": { + "RedshiftClusterSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftClusterStorage", + "markdownDescription": "The details of the Amazon Redshift cluster source.", + "title": "RedshiftClusterSource" + }, + "RedshiftServerlessSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftServerlessStorage", + "markdownDescription": "The details of the Amazon Redshift Serverless workgroup source.", + "title": "RedshiftServerlessSource" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RelationalFilterConfiguration": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The database name specified in the relational filter configuration for the data source.", + "title": "DatabaseName", + "type": "string" + }, + "FilterExpressions": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.FilterExpression" + }, + "markdownDescription": "The filter expressions specified in the relational filter configuration for the data source.", + "title": "FilterExpressions", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The schema name specified in the relational filter configuration for the data source.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "DatabaseName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.ScheduleConfiguration": { + "additionalProperties": false, + "properties": { + "Schedule": { + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule", + "type": "string" + }, + "Timezone": { + "markdownDescription": "The timezone of the data source run.", + "title": "Timezone", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Domain": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the Amazon DataZone domain.", + "title": "Description", + "type": "string" + }, + "DomainExecutionRole": { + "markdownDescription": "The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the AWS account that houses the Amazon DataZone domain.", + "title": "DomainExecutionRole", + "type": "string" + }, + "KmsKeyIdentifier": { + "markdownDescription": "The identifier of the AWS Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data.", + "title": "KmsKeyIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone domain.", + "title": "Name", + "type": "string" + }, + "SingleSignOn": { + "$ref": "#/definitions/AWS::DataZone::Domain.SingleSignOn", + "markdownDescription": "The single sign-on details in Amazon DataZone.", + "title": "SingleSignOn" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags specified for the Amazon DataZone domain.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainExecutionRole", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Domain" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Domain.SingleSignOn": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of single sign-on in Amazon DataZone.", + "title": "Type", + "type": "string" + }, + "UserAssignment": { + "markdownDescription": "The single sign-on user assignment in Amazon DataZone.", + "title": "UserAssignment", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Environment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the environment.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentProfileIdentifier": { + "markdownDescription": "The identifier of the environment profile that is used to create this Amazon DataZone environment.", + "title": "EnvironmentProfileIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone environment.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone environment.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which this environment is created.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::Environment.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnvironmentProfileIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Environment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Environment.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment parameter.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which an environment blueprint exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnabledRegions": { + "items": { + "type": "string" + }, + "markdownDescription": "The enabled AWS Regions specified in a blueprint configuration.", + "title": "EnabledRegions", + "type": "array" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of the environment blueprint.\n\nIn the current release, only the following values are supported: `DefaultDataLake` and `DefaultDataWarehouse` .", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "ManageAccessRoleArn": { + "markdownDescription": "The ARN of the manage access role.", + "title": "ManageAccessRoleArn", + "type": "string" + }, + "ProvisioningRoleArn": { + "markdownDescription": "The ARN of the provisioning role.", + "title": "ProvisioningRoleArn", + "type": "string" + }, + "RegionalParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter" + }, + "markdownDescription": "The regional parameters of the environment blueprint.", + "title": "RegionalParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnabledRegions", + "EnvironmentBlueprintIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentBlueprintConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter": { + "additionalProperties": false, + "properties": { + "Parameters": { + "additionalProperties": true, + "markdownDescription": "A string to string map containing parameters for the region.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Parameters", + "type": "object" + }, + "Region": { + "markdownDescription": "The region specified in the environment parameter.", + "title": "Region", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "markdownDescription": "The identifier of an AWS account in which an environment profile exists.", + "title": "AwsAccountId", + "type": "string" + }, + "AwsAccountRegion": { + "markdownDescription": "The AWS Region in which an environment profile exists.", + "title": "AwsAccountRegion", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the environment profile.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment profile exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of a blueprint with which an environment profile is created.", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the environment profile.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of a project in which an environment profile exists.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment profile.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "AwsAccountId", + "AwsAccountRegion", + "DomainIdentifier", + "EnvironmentBlueprintIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name specified in the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment profile.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Project": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a project.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of a Amazon DataZone domain where the project exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone project.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a project.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "DomainIdentifier", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Project" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicableAssetTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The asset types included in the subscription target.", + "title": "ApplicableAssetTypes", + "type": "array" + }, + "AuthorizedPrincipals": { + "items": { + "type": "string" + }, + "markdownDescription": "The authorized principals included in the subscription target.", + "title": "AuthorizedPrincipals", + "type": "array" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain in which subscription target is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The ID of the environment in which subscription target is created.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "ManageAccessRole": { + "markdownDescription": "The manage access role that is used to create the subscription target.", + "title": "ManageAccessRole", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the subscription target.", + "title": "Name", + "type": "string" + }, + "Provider": { + "markdownDescription": "The provider of the subscription target.", + "title": "Provider", + "type": "string" + }, + "SubscriptionTargetConfig": { + "items": { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm" + }, + "markdownDescription": "The configuration of the subscription target.", + "title": "SubscriptionTargetConfig", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of the subscription target.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "ApplicableAssetTypes", + "AuthorizedPrincipals", + "DomainIdentifier", + "EnvironmentIdentifier", + "ManageAccessRole", + "Name", + "SubscriptionTargetConfig", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::SubscriptionTarget" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the subscription target configuration.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The form name included in the subscription target configuration.", + "title": "FormName", + "type": "string" + } + }, + "required": [ + "Content", + "FormName" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { "additionalProperties": false, "properties": { "Condition": { @@ -61372,6 +63989,11 @@ "AWS::DynamoDB::GlobalTable.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.", "title": "StreamArn", @@ -61537,6 +64159,16 @@ "title": "Region", "type": "string" }, + "ReplicaStreamSpecification": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ReplicaStreamSpecification", + "markdownDescription": "Represents the DynamoDB Streams configuration for a global table replica.", + "title": "ReplicaStreamSpecification" + }, + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified replica of a DynamoDB global table. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "ResourcePolicy" + }, "SSESpecification": { "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ReplicaSSESpecification", "markdownDescription": "Allows you to specify a customer-managed key for the replica. When using customer-managed keys for server-side encryption, this property must have a value in all replicas.", @@ -61561,6 +64193,34 @@ ], "type": "object" }, + "AWS::DynamoDB::GlobalTable.ReplicaStreamSpecification": { + "additionalProperties": false, + "properties": { + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains the permissions for the specified stream of a DynamoDB global table replica. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nYou can update the `ResourcePolicy` property if you've specified more than one table using the [AWS ::DynamoDB::GlobalTable](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-globaltable.html) resource.", + "title": "ResourcePolicy" + } + }, + "required": [ + "ResourcePolicy" + ], + "type": "object" + }, + "AWS::DynamoDB::GlobalTable.ResourcePolicy": { + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, its indexes, and stream. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "PolicyDocument", + "type": "object" + } + }, + "required": [ + "PolicyDocument" + ], + "type": "object" + }, "AWS::DynamoDB::GlobalTable.SSESpecification": { "additionalProperties": false, "properties": { @@ -61762,6 +64422,11 @@ "markdownDescription": "Throughput for the specified table, which consists of values for `ReadCapacityUnits` and `WriteCapacityUnits` . For more information about the contents of a provisioned throughput structure, see [Amazon DynamoDB Table ProvisionedThroughput](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ProvisionedThroughput.html) .\n\nIf you set `BillingMode` as `PROVISIONED` , you must specify this property. If you set `BillingMode` as `PAY_PER_REQUEST` , you cannot specify this property.", "title": "ProvisionedThroughput" }, + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::Table.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified table. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nWhen you attach a resource-based policy while creating a table, the policy creation is *strongly consistent* . For information about the considerations that you should keep in mind while attaching a resource-based policy, see [Resource-based policy considerations](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) .", + "title": "ResourcePolicy" + }, "SSESpecification": { "$ref": "#/definitions/AWS::DynamoDB::Table.SSESpecification", "markdownDescription": "Specifies the settings to enable server-side encryption.", @@ -61978,6 +64643,11 @@ "AWS::DynamoDB::Table.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.\n\nLength Constraints: Minimum length of 37. Maximum length of 1024.", "title": "StreamArn", @@ -62068,6 +64738,20 @@ ], "type": "object" }, + "AWS::DynamoDB::Table.ResourcePolicy": { + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, index, or both. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "PolicyDocument", + "type": "object" + } + }, + "required": [ + "PolicyDocument" + ], + "type": "object" + }, "AWS::DynamoDB::Table.S3BucketSource": { "additionalProperties": false, "properties": { @@ -62119,6 +64803,11 @@ "AWS::DynamoDB::Table.StreamSpecification": { "additionalProperties": false, "properties": { + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::Table.ResourcePolicy", + "markdownDescription": "Creates or updates a resource-based policy document that contains the permissions for DynamoDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "ResourcePolicy" + }, "StreamViewType": { "markdownDescription": "When an item in the table is modified, `StreamViewType` determines what information is written to the stream for this table. Valid values for `StreamViewType` are:\n\n- `KEYS_ONLY` - Only the key attributes of the modified item are written to the stream.\n- `NEW_IMAGE` - The entire item, as it appears after it was modified, is written to the stream.\n- `OLD_IMAGE` - The entire item, as it appeared before it was modified, is written to the stream.\n- `NEW_AND_OLD_IMAGES` - Both the new and the old item images of the item are written to the stream.", "title": "StreamViewType", @@ -63248,6 +65937,11 @@ "title": "DomainNameServers", "type": "array" }, + "Ipv6AddressPreferredLeaseTime": { + "markdownDescription": "A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.", + "title": "Ipv6AddressPreferredLeaseTime", + "type": "number" + }, "NetbiosNameServers": { "items": { "type": "string" @@ -63690,6 +66384,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::EC2Fleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -64376,13 +67075,13 @@ "type": "string" }, "DeliverLogsPermissionArn": { - "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", + "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", "title": "DeliverLogsPermissionArn", "type": "string" }, "DestinationOptions": { "$ref": "#/definitions/AWS::EC2::FlowLog.DestinationOptions", - "markdownDescription": "The destination options. The following options are supported:\n\n- `FileFormat` - The format for the flow log ( `plain-text` | `parquet` ). The default is `plain-text` .\n- `HiveCompatiblePartitions` - Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3 ( `true` | `false` ). The default is `false` .\n- `PerHourPartition` - Indicates whether to partition the flow log per hour ( `true` | `false` ). The default is `false` .", + "markdownDescription": "The destination options.", "title": "DestinationOptions" }, "LogDestination": { @@ -65702,9 +68401,9 @@ "title": "Ebs" }, "NoDevice": { - "$ref": "#/definitions/AWS::EC2::Instance.NoDevice", "markdownDescription": "To omit the device from the block device mapping, specify an empty string.\n\n> After the instance is running, modifying this parameter results in instance [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "title": "NoDevice" + "title": "NoDevice", + "type": "object" }, "VirtualName": { "markdownDescription": "The virtual device name ( `ephemeral` N). The name must be in the form `ephemeral` *X* where *X* is a number starting from zero (0). For example, an instance type with 2 available instance store volumes can specify mappings for `ephemeral0` and `ephemeral1` . The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.\n\nNVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.\n\n*Constraints* : For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.\n\n> After the instance is running, modifying this parameter results in instance [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", @@ -65975,11 +68674,6 @@ ], "type": "object" }, - "AWS::EC2::Instance.NoDevice": { - "additionalProperties": false, - "properties": {}, - "type": "object" - }, "AWS::EC2::Instance.PrivateDnsNameOptions": { "additionalProperties": false, "properties": { @@ -66595,7 +69289,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -66791,6 +69485,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.MemoryGiBPerVCpu", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -69484,7 +72183,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security group IDs associated with this network interface.", + "markdownDescription": "The IDs of the security groups associated with this network interface.", "title": "GroupSet", "type": "array" }, @@ -69507,7 +72206,7 @@ "type": "array" }, "Ipv6AddressCount": { - "markdownDescription": "The number of IPv6 addresses to assign to a network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "markdownDescription": "The number of IPv6 addresses to assign to the network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", "title": "Ipv6AddressCount", "type": "number" }, @@ -69515,7 +72214,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::NetworkInterface.InstanceIpv6Address" }, - "markdownDescription": "One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet to associate with the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "markdownDescription": "The IPv6 addresses from the IPv6 CIDR block range of your subnet to assign to the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", "title": "Ipv6Addresses", "type": "array" }, @@ -69533,7 +72232,7 @@ "type": "array" }, "PrivateIpAddress": { - "markdownDescription": "Assigns a single private IP address to the network interface, which is used as the primary private IP address. If you want to specify multiple private IP address, use the `PrivateIpAddresses` property.", + "markdownDescription": "The private IPv4 address to assign to the network interface as the primary private IP address. If you want to specify multiple private IP addresses, use the `PrivateIpAddresses` property.", "title": "PrivateIpAddress", "type": "string" }, @@ -69541,7 +72240,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::NetworkInterface.PrivateIpAddressSpecification" }, - "markdownDescription": "Assigns private IP addresses to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", + "markdownDescription": "The private IPv4 addresses to assign to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", "title": "PrivateIpAddresses", "type": "array" }, @@ -69564,7 +72263,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An arbitrary set of tags (key-value pairs) for this network interface.", + "markdownDescription": "The tags to apply to the network interface.", "title": "Tags", "type": "array" } @@ -69723,6 +72422,11 @@ "title": "DeviceIndex", "type": "string" }, + "EnaSrdSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification", + "markdownDescription": "Configures ENA Express for the network interface that this action attaches to the instance.", + "title": "EnaSrdSpecification" + }, "InstanceId": { "markdownDescription": "The ID of the instance to which you will attach the ENI.", "title": "InstanceId", @@ -69762,6 +72466,33 @@ ], "type": "object" }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdEnabled": { + "markdownDescription": "Indicates whether ENA Express is enabled for the network interface.", + "title": "EnaSrdEnabled", + "type": "boolean" + }, + "EnaSrdUdpSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification", + "markdownDescription": "Configures ENA Express for UDP network traffic.", + "title": "EnaSrdUdpSpecification" + } + }, + "type": "object" + }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdUdpEnabled": { + "markdownDescription": "Indicates whether UDP traffic to and from the instance uses ENA Express. To specify this setting, you must first enable ENA Express.", + "title": "EnaSrdUdpEnabled", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::EC2::NetworkInterfacePermission": { "additionalProperties": false, "properties": { @@ -70076,7 +72807,6 @@ }, "required": [ "AddressFamily", - "MaxEntries", "PrefixListName" ], "type": "object" @@ -70438,12 +73168,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70453,12 +73183,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70472,6 +73202,11 @@ "title": "IpProtocol", "type": "string" }, + "SourceSecurityGroupId": { + "markdownDescription": "", + "title": "SourceSecurityGroupId", + "type": "string" + }, "ToPort": { "markdownDescription": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).", "title": "ToPort", @@ -70487,12 +73222,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70578,12 +73313,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70593,12 +73328,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70686,12 +73421,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -71043,7 +73778,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -71280,6 +74015,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::SpotFleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -75831,7 +78571,7 @@ "type": "string" }, "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "title": "Encrypted", "type": "boolean" }, @@ -75879,7 +78619,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`", + "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`", "title": "VolumeType", "type": "string" } @@ -77002,7 +79742,7 @@ "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", "title": "Weight", "type": "number" } @@ -77209,7 +79949,7 @@ "type": "string" }, "PropagateTags": { - "markdownDescription": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nThe default is `NONE` .", + "markdownDescription": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nYou must set this to a value other than `NONE` when you use Cost Explorer. For more information, see [Amazon ECS usage reports](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/usage-reports.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe default is `NONE` .", "title": "PropagateTags", "type": "string" }, @@ -77536,7 +80276,7 @@ "additionalProperties": false, "properties": { "Field": { - "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "title": "Field", "type": "string" }, @@ -77647,6 +80387,16 @@ "markdownDescription": "The `portName` must match the name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service.", "title": "PortName", "type": "string" + }, + "Timeout": { + "$ref": "#/definitions/AWS::ECS::Service.TimeoutConfiguration", + "markdownDescription": "A reference to an object that represents the configured timeouts for Service Connect.", + "title": "Timeout" + }, + "Tls": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsConfiguration", + "markdownDescription": "A reference to an object that represents a Transport Layer Security (TLS) configuration.", + "title": "Tls" } }, "required": [ @@ -77654,6 +80404,41 @@ ], "type": "object" }, + "AWS::ECS::Service.ServiceConnectTlsCertificateAuthority": { + "additionalProperties": false, + "properties": { + "AwsPcaAuthorityArn": { + "markdownDescription": "The ARN of the AWS Private Certificate Authority certificate.", + "title": "AwsPcaAuthorityArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Service.ServiceConnectTlsConfiguration": { + "additionalProperties": false, + "properties": { + "IssuerCertificateAuthority": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsCertificateAuthority", + "markdownDescription": "The signer certificate authority.", + "title": "IssuerCertificateAuthority" + }, + "KmsKey": { + "markdownDescription": "The AWS Key Management Service key.", + "title": "KmsKey", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "IssuerCertificateAuthority" + ], + "type": "object" + }, "AWS::ECS::Service.ServiceManagedEBSVolumeConfiguration": { "additionalProperties": false, "properties": { @@ -77761,6 +80546,22 @@ ], "type": "object" }, + "AWS::ECS::Service.TimeoutConfiguration": { + "additionalProperties": false, + "properties": { + "IdleTimeoutSeconds": { + "markdownDescription": "The amount of time in seconds a connection will stay active while idle. A value of `0` can be set to disable `idleTimeout` .\n\nThe `idleTimeout` default for `HTTP` / `HTTP2` / `GRPC` is 5 minutes.\n\nThe `idleTimeout` default for `TCP` is 1 hour.", + "title": "IdleTimeoutSeconds", + "type": "number" + }, + "PerRequestTimeoutSeconds": { + "markdownDescription": "The amount of time waiting for the upstream to respond with a complete response per request. A value of `0` can be set to disable `perRequestTimeout` . `perRequestTimeout` can only be set if Service Connect `appProtocol` isn't `TCP` . Only `idleTimeout` is allowed for `TCP` `appProtocol` .", + "title": "PerRequestTimeoutSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::ECS::TaskDefinition": { "additionalProperties": false, "properties": { @@ -77954,6 +80755,14 @@ "title": "Cpu", "type": "number" }, + "CredentialSpecs": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", + "title": "CredentialSpecs", + "type": "array" + }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.ContainerDependency" @@ -78321,7 +81130,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The file type to use. The only supported value is `s3` .", + "markdownDescription": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "title": "Type", "type": "string" }, @@ -78337,7 +81146,7 @@ "additionalProperties": false, "properties": { "SizeInGiB": { - "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.", + "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB.", "title": "SizeInGiB", "type": "number" } @@ -78933,6 +81742,14 @@ "title": "ServiceRegistries", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, "TaskDefinition": { "markdownDescription": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used.", "title": "TaskDefinition", @@ -80468,7 +83285,7 @@ "additionalProperties": false, "properties": { "AmiType": { - "markdownDescription": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "AmiType", "type": "string" }, @@ -80483,7 +83300,7 @@ "type": "string" }, "DiskSize": { - "markdownDescription": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "DiskSize", "type": "number" }, @@ -80496,7 +83313,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "InstanceTypes", "type": "array" }, @@ -80513,11 +83330,11 @@ }, "LaunchTemplate": { "$ref": "#/definitions/AWS::EKS::Nodegroup.LaunchTemplateSpecification", - "markdownDescription": "An object representing a node group's launch template specification. If specified, then do not specify `instanceTypes` , `diskSize` , or `remoteAccess` and make sure that the launch template meets the requirements in `launchTemplateSpecification` .", + "markdownDescription": "An object representing a node group's launch template specification. When using this object, don't directly specify `instanceTypes` , `diskSize` , or `remoteAccess` . Make sure that the launch template meets the requirements in `launchTemplateSpecification` . Also refer to [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "LaunchTemplate" }, "NodeRole": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "NodeRole", "type": "string" }, @@ -80533,7 +83350,7 @@ }, "RemoteAccess": { "$ref": "#/definitions/AWS::EKS::Nodegroup.RemoteAccess", - "markdownDescription": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "RemoteAccess" }, "ScalingConfig": { @@ -80545,7 +83362,7 @@ "items": { "type": "string" }, - "markdownDescription": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "Subnets", "type": "array" }, @@ -80926,7 +83743,7 @@ "title": "ManagedScalingPolicy" }, "Name": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "title": "Name", "type": "string" }, @@ -81569,6 +84386,11 @@ "markdownDescription": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", "title": "TerminationProtected", "type": "boolean" + }, + "UnhealthyNodeReplacement": { + "markdownDescription": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.", + "title": "UnhealthyNodeReplacement", + "type": "boolean" } }, "type": "object" @@ -85076,7 +87898,7 @@ "Port": { "markdownDescription": "The port number that the cache engine is listening on.", "title": "Port", - "type": "number" + "type": "string" } }, "type": "object" @@ -87469,6 +90291,11 @@ "Properties": { "additionalProperties": false, "properties": { + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": { + "markdownDescription": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", + "title": "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", + "type": "string" + }, "IpAddressType": { "markdownDescription": "The IP address type. The possible values are `ipv4` (for IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.", "title": "IpAddressType", @@ -87556,7 +90383,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", "title": "Key", "type": "string" }, @@ -93180,7 +96007,7 @@ "type": "number" }, "Mode": { - "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value.", + "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value.", "title": "Mode", "type": "string" } @@ -93301,7 +96128,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93324,7 +96151,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -98154,6 +100981,11 @@ "title": "Description", "type": "string" }, + "LakeFormationConfiguration": { + "$ref": "#/definitions/AWS::Glue::Crawler.LakeFormationConfiguration", + "markdownDescription": "Specifies whether the crawler should use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.", + "title": "LakeFormationConfiguration" + }, "Name": { "markdownDescription": "The name of the crawler.", "title": "Name", @@ -98352,6 +101184,22 @@ }, "type": "object" }, + "AWS::Glue::Crawler.LakeFormationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountId": { + "markdownDescription": "Required for cross account crawls. For same account crawls as the target data, this can be left as null.", + "title": "AccountId", + "type": "string" + }, + "UseLakeFormationCredentials": { + "markdownDescription": "Specifies whether to use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.", + "title": "UseLakeFormationCredentials", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::Glue::Crawler.MongoDBTarget": { "additionalProperties": false, "properties": { @@ -98701,6 +101549,11 @@ "title": "CatalogEncryptionMode", "type": "string" }, + "CatalogEncryptionServiceRole": { + "markdownDescription": "The role that AWS Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.", + "title": "CatalogEncryptionServiceRole", + "type": "string" + }, "SseAwsKmsKeyId": { "markdownDescription": "The ID of the AWS KMS key to use for encryption at rest.", "title": "SseAwsKmsKeyId", @@ -100906,6 +103759,117 @@ }, "type": "object" }, + "AWS::Glue::TableOptimizer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The catalog ID of the table.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The table name. For Hive compatibility, this must be entirely lowercase.", + "title": "TableName", + "type": "string" + }, + "TableOptimizerConfiguration": { + "$ref": "#/definitions/AWS::Glue::TableOptimizer.TableOptimizerConfiguration", + "markdownDescription": "Specifies configuration details of a table optimizer.", + "title": "TableOptimizerConfiguration" + }, + "Type": { + "markdownDescription": "The type of table optimizer. Currently, the only valid value is compaction.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "CatalogId", + "DatabaseName", + "TableName", + "TableOptimizerConfiguration", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Glue::TableOptimizer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Glue::TableOptimizer.TableOptimizerConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Whether the table optimization is enabled.", + "title": "Enabled", + "type": "boolean" + }, + "RoleArn": { + "markdownDescription": "A role passed by the caller which gives the service permission to update the resources associated with the optimizer on the caller's behalf.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "Enabled", + "RoleArn" + ], + "type": "object" + }, "AWS::Glue::Trigger": { "additionalProperties": false, "properties": { @@ -105814,7 +108778,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::GuardDuty::Filter.TagItem" }, "markdownDescription": "The tags to be added to a new filter resource. Each tag consists of a key and an optional value, both of which you define.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", @@ -105822,12 +108786,7 @@ } }, "required": [ - "Action", - "Description", - "DetectorId", - "FindingCriteria", - "Name", - "Rank" + "FindingCriteria" ], "type": "object" }, @@ -105934,14 +108893,37 @@ "additionalProperties": false, "properties": { "Criterion": { + "additionalProperties": false, "markdownDescription": "Represents a map of finding properties that match specified conditions and values when querying findings.\n\nFor information about JSON criterion mapping to their console equivalent, see [Finding criteria](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_filter-findings.html#filter_criteria) . The following are the available criterion:\n\n- accountId\n- id\n- region\n- severity\n\nTo filter on the basis of severity, API and CFN use the following input list for the condition:\n\n- *Low* : `[\"1\", \"2\", \"3\"]`\n- *Medium* : `[\"4\", \"5\", \"6\"]`\n- *High* : `[\"7\", \"8\", \"9\"]`\n\nFor more information, see [Severity levels for GuardDuty findings](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity) .\n- type\n- updatedAt\n\nType: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.\n- resource.accessKeyDetails.accessKeyId\n- resource.accessKeyDetails.principalId\n- resource.accessKeyDetails.userName\n- resource.accessKeyDetails.userType\n- resource.instanceDetails.iamInstanceProfile.id\n- resource.instanceDetails.imageId\n- resource.instanceDetails.instanceId\n- resource.instanceDetails.tags.key\n- resource.instanceDetails.tags.value\n- resource.instanceDetails.networkInterfaces.ipv6Addresses\n- resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress\n- resource.instanceDetails.networkInterfaces.publicDnsName\n- resource.instanceDetails.networkInterfaces.publicIp\n- resource.instanceDetails.networkInterfaces.securityGroups.groupId\n- resource.instanceDetails.networkInterfaces.securityGroups.groupName\n- resource.instanceDetails.networkInterfaces.subnetId\n- resource.instanceDetails.networkInterfaces.vpcId\n- resource.instanceDetails.outpostArn\n- resource.resourceType\n- resource.s3BucketDetails.publicAccess.effectivePermissions\n- resource.s3BucketDetails.name\n- resource.s3BucketDetails.tags.key\n- resource.s3BucketDetails.tags.value\n- resource.s3BucketDetails.type\n- service.action.actionType\n- service.action.awsApiCallAction.api\n- service.action.awsApiCallAction.callerType\n- service.action.awsApiCallAction.errorCode\n- service.action.awsApiCallAction.remoteIpDetails.city.cityName\n- service.action.awsApiCallAction.remoteIpDetails.country.countryName\n- service.action.awsApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.awsApiCallAction.remoteIpDetails.organization.asn\n- service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg\n- service.action.awsApiCallAction.serviceName\n- service.action.dnsRequestAction.domain\n- service.action.networkConnectionAction.blocked\n- service.action.networkConnectionAction.connectionDirection\n- service.action.networkConnectionAction.localPortDetails.port\n- service.action.networkConnectionAction.protocol\n- service.action.networkConnectionAction.remoteIpDetails.city.cityName\n- service.action.networkConnectionAction.remoteIpDetails.country.countryName\n- service.action.networkConnectionAction.remoteIpDetails.ipAddressV4\n- service.action.networkConnectionAction.remoteIpDetails.organization.asn\n- service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg\n- service.action.networkConnectionAction.remotePortDetails.port\n- service.action.awsApiCallAction.remoteAccountDetails.affiliated\n- service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.kubernetesApiCallAction.requestUri\n- service.action.networkConnectionAction.localIpDetails.ipAddressV4\n- service.action.networkConnectionAction.protocol\n- service.action.awsApiCallAction.serviceName\n- service.action.awsApiCallAction.remoteAccountDetails.accountId\n- service.additionalInfo.threatListName\n- service.resourceRole\n- resource.eksClusterDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.namespace\n- resource.kubernetesDetails.kubernetesUserDetails.username\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix\n- service.ebsVolumeScanDetails.scanId\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash\n- resource.ecsClusterDetails.name\n- resource.ecsClusterDetails.taskDetails.containers.image\n- resource.ecsClusterDetails.taskDetails.definitionArn\n- resource.containerDetails.image\n- resource.rdsDbInstanceDetails.dbInstanceIdentifier\n- resource.rdsDbInstanceDetails.dbClusterIdentifier\n- resource.rdsDbInstanceDetails.engine\n- resource.rdsDbUserDetails.user\n- resource.rdsDbInstanceDetails.tags.key\n- resource.rdsDbInstanceDetails.tags.value\n- service.runtimeDetails.process.executableSha256\n- service.runtimeDetails.process.name\n- service.runtimeDetails.process.name\n- resource.lambdaDetails.functionName\n- resource.lambdaDetails.functionArn\n- resource.lambdaDetails.tags.key\n- resource.lambdaDetails.tags.value", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + } + }, "title": "Criterion", "type": "object" + } + }, + "type": "object" + }, + "AWS::GuardDuty::Filter.TagItem": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "", + "title": "Key", + "type": "string" }, - "ItemType": { - "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + "Value": { + "markdownDescription": "", + "title": "Value", + "type": "string" } }, + "required": [ + "Key", + "Value" + ], "type": "object" }, "AWS::GuardDuty::IPSet": { @@ -106205,9 +109187,7 @@ } }, "required": [ - "DetectorId", - "Email", - "MemberId" + "Email" ], "type": "object" }, @@ -108445,6 +111425,77 @@ }, "type": "object" }, + "AWS::IVS::Stage": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Stage name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::Stage" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -110953,7 +114004,8 @@ } }, "required": [ - "Name" + "Name", + "SemanticVersion" ], "type": "object" }, @@ -111295,23 +114347,113 @@ "Properties": { "additionalProperties": false, "properties": { - "ResourceGroupTags": { - "items": { - "$ref": "#/definitions/Tag" + "ResourceGroupTags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "title": "ResourceGroupTags", + "type": "array" + } + }, + "required": [ + "ResourceGroupTags" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Inspector::ResourceGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ScanName": { + "markdownDescription": "The name of the CIS scan configuration.", + "title": "ScanName", + "type": "string" + }, + "Schedule": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Schedule", + "markdownDescription": "The CIS scan configuration's schedule.", + "title": "Schedule" + }, + "SecurityLevel": { + "markdownDescription": "The CIS scan configuration's CIS Benchmark level.", + "title": "SecurityLevel", + "type": "string" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "The CIS scan configuration's tags.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } }, - "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", - "title": "ResourceGroupTags", - "type": "array" + "title": "Tags", + "type": "object" + }, + "Targets": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.CisTargets", + "markdownDescription": "The CIS scan configuration's targets.", + "title": "Targets" } }, - "required": [ - "ResourceGroupTags" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Inspector::ResourceGroup" + "AWS::InspectorV2::CisScanConfiguration" ], "type": "string" }, @@ -111325,8 +114467,132 @@ } }, "required": [ - "Type", - "Properties" + "Type" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.CisTargets": { + "additionalProperties": false, + "properties": { + "AccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The CIS target account ids.", + "title": "AccountIds", + "type": "array" + }, + "TargetResourceTags": { + "markdownDescription": "The CIS target resource tags.", + "title": "TargetResourceTags", + "type": "object" + } + }, + "required": [ + "AccountIds" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.DailySchedule": { + "additionalProperties": false, + "properties": { + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The schedule start time.", + "title": "StartTime" + } + }, + "required": [ + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.MonthlySchedule": { + "additionalProperties": false, + "properties": { + "Day": { + "markdownDescription": "The monthly schedule's day.", + "title": "Day", + "type": "string" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The monthly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Day", + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Schedule": { + "additionalProperties": false, + "properties": { + "Daily": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.DailySchedule", + "markdownDescription": "A daily schedule.", + "title": "Daily" + }, + "Monthly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.MonthlySchedule", + "markdownDescription": "A monthly schedule.", + "title": "Monthly" + }, + "OneTime": { + "markdownDescription": "A one time schedule.", + "title": "OneTime", + "type": "object" + }, + "Weekly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.WeeklySchedule", + "markdownDescription": "A weekly schedule.", + "title": "Weekly" + } + }, + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Time": { + "additionalProperties": false, + "properties": { + "TimeOfDay": { + "markdownDescription": "The time of day in 24-hour format (00:00).", + "title": "TimeOfDay", + "type": "string" + }, + "TimeZone": { + "markdownDescription": "The timezone.", + "title": "TimeZone", + "type": "string" + } + }, + "required": [ + "TimeOfDay", + "TimeZone" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.WeeklySchedule": { + "additionalProperties": false, + "properties": { + "Days": { + "items": { + "type": "string" + }, + "markdownDescription": "The weekly schedule's days.", + "title": "Days", + "type": "array" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The weekly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Days", + "StartTime" ], "type": "object" }, @@ -111836,11 +115102,21 @@ "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", "title": "HealthEventsConfig" }, + "IncludeLinkedAccounts": { + "markdownDescription": "", + "title": "IncludeLinkedAccounts", + "type": "boolean" + }, "InternetMeasurementsLogDelivery": { "$ref": "#/definitions/AWS::InternetMonitor::Monitor.InternetMeasurementsLogDelivery", "markdownDescription": "Publish internet measurements for a monitor for all city-networks (up to the 500,000 service limit) to another location, such as an Amazon S3 bucket. Measurements are also published to Amazon CloudWatch Logs for the first 500 (by traffic volume) city-networks (client locations and ASNs, typically internet service providers or ISPs).", "title": "InternetMeasurementsLogDelivery" }, + "LinkedAccountId": { + "markdownDescription": "", + "title": "LinkedAccountId", + "type": "string" + }, "MaxCityNetworksToMonitor": { "markdownDescription": "The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network, such as an internet service provider, that clients access the resources through.\n\nFor more information, see [Choosing a city-network maximum value](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/IMCityNetworksMaximum.html) in *Using Amazon CloudWatch Internet Monitor* .", "title": "MaxCityNetworksToMonitor", @@ -113210,6 +116486,11 @@ "title": "ServerCertificateArns", "type": "array" }, + "ServerCertificateConfig": { + "$ref": "#/definitions/AWS::IoT::DomainConfiguration.ServerCertificateConfig", + "markdownDescription": "The server certificate configuration.\n\nFor more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "ServerCertificateConfig" + }, "ServiceType": { "markdownDescription": "The type of service delivered by the endpoint.\n\n> AWS IoT Core currently supports only the `DATA` service type.", "title": "ServiceType", @@ -113272,6 +116553,17 @@ }, "type": "object" }, + "AWS::IoT::DomainConfiguration.ServerCertificateConfig": { + "additionalProperties": false, + "properties": { + "EnableOCSPCheck": { + "markdownDescription": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "EnableOCSPCheck", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::IoT::DomainConfiguration.ServerCertificateSummary": { "additionalProperties": false, "properties": { @@ -121224,7 +124516,7 @@ "title": "AccessPolicyIdentity" }, "AccessPolicyPermission": { - "markdownDescription": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "markdownDescription": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "title": "AccessPolicyPermission", "type": "string" }, @@ -121277,7 +124569,7 @@ }, "User": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.User", - "markdownDescription": "The IAM Identity Center user to which this access policy maps.", + "markdownDescription": "An IAM Identity Center user identity.", "title": "User" } }, @@ -121288,12 +124580,12 @@ "properties": { "Portal": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Portal", - "markdownDescription": "The AWS IoT SiteWise Monitor portal for this access policy.", + "markdownDescription": "Identifies an AWS IoT SiteWise Monitor portal.", "title": "Portal" }, "Project": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Project", - "markdownDescription": "The AWS IoT SiteWise Monitor project for this access policy.", + "markdownDescription": "Identifies a specific AWS IoT SiteWise Monitor project.", "title": "Project" } }, @@ -121347,7 +124639,7 @@ "additionalProperties": false, "properties": { "id": { - "markdownDescription": "The ID of the user.", + "markdownDescription": "The IAM Identity Center ID of the user.", "title": "id", "type": "string" } @@ -121390,15 +124682,20 @@ "additionalProperties": false, "properties": { "AssetDescription": { - "markdownDescription": "A description for the asset.", + "markdownDescription": "The ID of the asset, in UUID format.", "title": "AssetDescription", "type": "string" }, + "AssetExternalId": { + "markdownDescription": "The external ID of the asset model composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "AssetExternalId", + "type": "string" + }, "AssetHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::Asset.AssetHierarchy" }, - "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "title": "AssetHierarchies", "type": "array" }, @@ -121408,7 +124705,7 @@ "type": "string" }, "AssetName": { - "markdownDescription": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A friendly name for the asset.", "title": "AssetName", "type": "string" }, @@ -121464,15 +124761,24 @@ "title": "ChildAssetId", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the hierarchy, if it has one. When you update an asset hierarchy, you may assign an external ID if it doesn't already have one. You can't change the external ID of an asset hierarchy that already has one. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .", "title": "LogicalId", "type": "string" } }, "required": [ - "ChildAssetId", - "LogicalId" + "ChildAssetId" ], "type": "object" }, @@ -121480,17 +124786,27 @@ "additionalProperties": false, "properties": { "Alias": { - "markdownDescription": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", + "markdownDescription": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", "title": "Alias", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset property.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset property.", "title": "LogicalId", "type": "string" }, "NotificationState": { - "markdownDescription": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "markdownDescription": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "title": "NotificationState", "type": "string" }, @@ -121500,9 +124816,6 @@ "type": "string" } }, - "required": [ - "LogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::AssetModel": { @@ -121544,7 +124857,7 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel" }, - "markdownDescription": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "markdownDescription": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelCompositeModels", "type": "array" }, @@ -121553,16 +124866,21 @@ "title": "AssetModelDescription", "type": "string" }, + "AssetModelExternalId": { + "markdownDescription": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "AssetModelExternalId", + "type": "string" + }, "AssetModelHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelHierarchy" }, - "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelHierarchies", "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -121570,10 +124888,15 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" }, - "markdownDescription": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelProperties", "type": "array" }, + "AssetModelType": { + "markdownDescription": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", + "title": "AssetModelType", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -121612,6 +124935,11 @@ "AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel": { "additionalProperties": false, "properties": { + "ComposedAssetModelId": { + "markdownDescription": "The ID of a component model which is reused to create this composite model.", + "title": "ComposedAssetModelId", + "type": "string" + }, "CompositeModelProperties": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" @@ -121625,11 +124953,34 @@ "title": "Description", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset model composite model.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "Name": { "markdownDescription": "The name of the composite model.", "title": "Name", "type": "string" }, + "ParentAssetModelCompositeModelExternalId": { + "markdownDescription": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", + "title": "ParentAssetModelCompositeModelExternalId", + "type": "string" + }, + "Path": { + "items": { + "type": "string" + }, + "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", + "title": "Path", + "type": "array" + }, "Type": { "markdownDescription": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` .", "title": "Type", @@ -121646,24 +124997,33 @@ "additionalProperties": false, "properties": { "ChildAssetModelId": { - "markdownDescription": "The Id of the asset model.", + "markdownDescription": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ChildAssetModelId", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset model hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set. \n\n- If you are callling [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) to create a *new* hierarchy: You can specify its ID here, if desired. AWS IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.\n- If you are calling UpdateAssetModel to modify an *existing* hierarchy: This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation.", "title": "Name", "type": "string" } }, "required": [ "ChildAssetModelId", - "LogicalId", "Name" ], "type": "object" @@ -121672,7 +125032,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "markdownDescription": "The data type of the asset model property.", "title": "DataType", "type": "string" }, @@ -121681,19 +125041,29 @@ "title": "DataTypeSpec", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the property.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset model property.", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model property.", "title": "Name", "type": "string" }, "Type": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyType", - "markdownDescription": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "markdownDescription": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "title": "Type" }, "Unit": { @@ -121704,7 +125074,6 @@ }, "required": [ "DataType", - "LogicalId", "Name", "Type" ], @@ -121725,7 +125094,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "markdownDescription": "The friendly name of the variable to be used in the expression.", "title": "Name", "type": "string" }, @@ -121781,22 +125150,36 @@ }, "type": "object" }, + "AWS::IoTSiteWise::AssetModel.PropertyPathDefinition": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the path segment.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::IoTSiteWise::AssetModel.PropertyType": { "additionalProperties": false, "properties": { "Attribute": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Attribute", - "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", + "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", "title": "Attribute" }, "Metric": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Metric", - "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", + "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", "title": "Metric" }, "Transform": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Transform", - "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "title": "Transform" }, "TypeName": { @@ -121855,20 +125238,45 @@ "AWS::IoTSiteWise::AssetModel.VariableValue": { "additionalProperties": false, "properties": { + "HierarchyExternalId": { + "markdownDescription": "The external ID of the hierarchy being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "HierarchyExternalId", + "type": "string" + }, + "HierarchyId": { + "markdownDescription": "The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\nYou use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same `propertyId` . For example, you might have separately grouped assets that come from the same asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "title": "HierarchyId", + "type": "string" + }, "HierarchyLogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", "title": "HierarchyLogicalId", "type": "string" }, + "PropertyExternalId": { + "markdownDescription": "The external ID of the property being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "PropertyExternalId", + "type": "string" + }, + "PropertyId": { + "markdownDescription": "The ID of the property to use as the variable. You can use the property `name` if it's from the same asset model. If the property has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> This is a return value and can't be set.", + "title": "PropertyId", + "type": "string" + }, "PropertyLogicalId": { - "markdownDescription": "The `LogicalID` of the property to use as the variable.", + "markdownDescription": "The `LogicalID` of the property that is being referenced.", "title": "PropertyLogicalId", "type": "string" + }, + "PropertyPath": { + "items": { + "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyPathDefinition" + }, + "markdownDescription": "The path of the property. Each step of the path is the name of the step. See the following example:\n\n`PropertyPath: Name: AssetModelName Name: Composite1 Name: NestedComposite`", + "title": "PropertyPath", + "type": "array" } }, - "required": [ - "PropertyLogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::Dashboard": { @@ -122007,7 +125415,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -122061,7 +125469,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", "title": "CapabilityNamespace", "type": "string" } @@ -122083,6 +125491,11 @@ "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.GreengrassV2", "markdownDescription": "A gateway that runs on AWS IoT Greengrass V2 .", "title": "GreengrassV2" + }, + "SiemensIE": { + "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.SiemensIE", + "markdownDescription": "", + "title": "SiemensIE" } }, "type": "object" @@ -122091,7 +125504,7 @@ "additionalProperties": false, "properties": { "GroupArn": { - "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* .", + "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* .", "title": "GroupArn", "type": "string" } @@ -122115,6 +125528,20 @@ ], "type": "object" }, + "AWS::IoTSiteWise::Gateway.SiemensIE": { + "additionalProperties": false, + "properties": { + "IotCoreThingName": { + "markdownDescription": "", + "title": "IotCoreThingName", + "type": "string" + } + }, + "required": [ + "IotCoreThingName" + ], + "type": "object" + }, "AWS::IoTSiteWise::Portal": { "additionalProperties": false, "properties": { @@ -122161,7 +125588,7 @@ "type": "string" }, "PortalAuthMode": { - "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "title": "PortalAuthMode", "type": "string" }, @@ -124848,6 +128275,11 @@ "title": "Name", "type": "string" }, + "Positioning": { + "markdownDescription": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", + "title": "Positioning", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -124934,6 +128366,41 @@ ], "type": "object" }, + "AWS::IoTWireless::WirelessDevice.Application": { + "additionalProperties": false, + "properties": { + "DestinationName": { + "markdownDescription": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "title": "DestinationName", + "type": "string" + }, + "FPort": { + "markdownDescription": "The name of the new destination for the device.", + "title": "FPort", + "type": "number" + }, + "Type": { + "markdownDescription": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::IoTWireless::WirelessDevice.FPorts": { + "additionalProperties": false, + "properties": { + "Applications": { + "items": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.Application" + }, + "markdownDescription": "LoRaWAN application configuration, which can be used to perform geolocation.", + "title": "Applications", + "type": "array" + } + }, + "type": "object" + }, "AWS::IoTWireless::WirelessDevice.LoRaWANDevice": { "additionalProperties": false, "properties": { @@ -124957,6 +128424,11 @@ "title": "DeviceProfileId", "type": "string" }, + "FPorts": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.FPorts", + "markdownDescription": "List of FPort assigned for different LoRaWAN application packages to use.", + "title": "FPorts" + }, "OtaaV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.OtaaV10x", "markdownDescription": "OTAA device object for create APIs for v1.0.x", @@ -125687,6 +129159,14 @@ "title": "ServiceExecutionRoleArn", "type": "string" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + }, "WorkerConfiguration": { "$ref": "#/definitions/AWS::KafkaConnect::Connector.WorkerConfiguration", "markdownDescription": "The worker configurations that are in use with the connector.", @@ -126067,6 +129547,239 @@ }, "type": "object" }, + "AWS::KafkaConnect::CustomPlugin": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ContentType": { + "markdownDescription": "The format of the plugin file.", + "title": "ContentType", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the custom plugin.", + "title": "Description", + "type": "string" + }, + "Location": { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin.CustomPluginLocation", + "markdownDescription": "Information about the location of the custom plugin.", + "title": "Location" + }, + "Name": { + "markdownDescription": "The name of the custom plugin.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ContentType", + "Location", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::KafkaConnect::CustomPlugin" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.CustomPluginFileDescription": { + "additionalProperties": false, + "properties": { + "FileMd5": { + "markdownDescription": "The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.", + "title": "FileMd5", + "type": "string" + }, + "FileSize": { + "markdownDescription": "The size in bytes of the custom plugin file. You can use it to validate the file.", + "title": "FileSize", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.CustomPluginLocation": { + "additionalProperties": false, + "properties": { + "S3Location": { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin.S3Location", + "markdownDescription": "The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.", + "title": "S3Location" + } + }, + "required": [ + "S3Location" + ], + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.S3Location": { + "additionalProperties": false, + "properties": { + "BucketArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of an S3 bucket.", + "title": "BucketArn", + "type": "string" + }, + "FileKey": { + "markdownDescription": "The file key for an object in an S3 bucket.", + "title": "FileKey", + "type": "string" + }, + "ObjectVersion": { + "markdownDescription": "The version of an object in an S3 bucket.", + "title": "ObjectVersion", + "type": "string" + } + }, + "required": [ + "BucketArn", + "FileKey" + ], + "type": "object" + }, + "AWS::KafkaConnect::WorkerConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a worker configuration.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the worker configuration.", + "title": "Name", + "type": "string" + }, + "PropertiesFileContent": { + "markdownDescription": "Base64 encoded contents of the connect-distributed.properties file.", + "title": "PropertiesFileContent", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name", + "PropertiesFileContent" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::KafkaConnect::WorkerConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::Kendra::DataSource": { "additionalProperties": false, "properties": { @@ -127038,7 +130751,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -127046,7 +130759,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -127814,6 +131527,11 @@ "title": "IndexId", "type": "string" }, + "LanguageCode": { + "markdownDescription": "The code for a language. This shows a supported language for the FAQ document as part of the summary information for FAQs. English is supported by default. For more information on supported languages, including their codes, see [Adding documents in languages other than English](https://docs.aws.amazon.com/kendra/latest/dg/in-adding-languages.html) .", + "title": "LanguageCode", + "type": "string" + }, "Name": { "markdownDescription": "The name that you assigned the FAQ when you created or updated the FAQ.", "title": "Name", @@ -128133,7 +131851,7 @@ "type": "string" }, "Freshness": { - "markdownDescription": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. You can only set the `Freshness` field on one `DATE` type field. Only applies to `DATE` fields.", + "markdownDescription": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. Only applies to `DATE` fields.", "title": "Freshness", "type": "boolean" }, @@ -128143,7 +131861,7 @@ "type": "number" }, "RankOrder": { - "markdownDescription": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` and `DOUBLE` fields.", + "markdownDescription": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` fields.", "title": "RankOrder", "type": "string" }, @@ -129432,7 +133150,7 @@ }, "SqlApplicationConfiguration": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::Application.SqlApplicationConfiguration", - "markdownDescription": "The creation and update parameters for a SQL-based Managed Service for Apache Flink application.", + "markdownDescription": "The creation and update parameters for a SQL-based Kinesis Data Analytics application.", "title": "SqlApplicationConfiguration" }, "VpcConfigurations": { @@ -129703,7 +133421,7 @@ "title": "KinesisStreamsInput" }, "NamePrefix": { - "markdownDescription": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Managed Service for Apache Flink then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on.", + "markdownDescription": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Kinesis Data Analytics then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on.", "title": "NamePrefix", "type": "string" } @@ -130250,7 +133968,7 @@ }, "Output": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::ApplicationOutput.Output", - "markdownDescription": "Describes a SQL-based Managed Service for Apache Flink application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.", + "markdownDescription": "Describes a SQL-based Kinesis Data Analytics application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.", "title": "Output" } }, @@ -130410,7 +134128,7 @@ }, "ReferenceDataSource": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource", - "markdownDescription": "For a SQL-based Managed Service for Apache Flink application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.", + "markdownDescription": "For a SQL-based Kinesis Data Analytics application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.", "title": "ReferenceDataSource" } }, @@ -130702,6 +134420,11 @@ "markdownDescription": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "title": "S3DestinationConfiguration" }, + "SnowflakeDestinationConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration", + "markdownDescription": "Configure Snowflake destination", + "title": "SnowflakeDestinationConfiguration" + }, "SplunkDestinationConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkDestinationConfiguration", "markdownDescription": "The configuration of a destination in Splunk for the delivery stream.", @@ -130711,7 +134434,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.", + "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -131253,6 +134976,11 @@ "title": "CompressionFormat", "type": "string" }, + "CustomTimeZone": { + "markdownDescription": "The time zone you prefer. UTC is the default.", + "title": "CustomTimeZone", + "type": "string" + }, "DataFormatConversionConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DataFormatConversionConfiguration", "markdownDescription": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", @@ -131273,6 +135001,11 @@ "title": "ErrorOutputPrefix", "type": "string" }, + "FileExtension": { + "markdownDescription": "Specify a file extension. It will override the default file extension", + "title": "FileExtension", + "type": "string" + }, "Prefix": { "markdownDescription": "The `YYYY/MM/DD/HH` time format prefix is automatically used for delivered Amazon S3 files. For more information, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "title": "Prefix", @@ -131895,6 +135628,153 @@ }, "type": "object" }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountUrl": { + "markdownDescription": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "title": "AccountUrl", + "type": "string" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", + "markdownDescription": "", + "title": "CloudWatchLoggingOptions" + }, + "ContentColumnName": { + "markdownDescription": "The name of the record content column", + "title": "ContentColumnName", + "type": "string" + }, + "DataLoadingOption": { + "markdownDescription": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", + "title": "DataLoadingOption", + "type": "string" + }, + "Database": { + "markdownDescription": "All data in Snowflake is maintained in databases.", + "title": "Database", + "type": "string" + }, + "KeyPassphrase": { + "markdownDescription": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "KeyPassphrase", + "type": "string" + }, + "MetaDataColumnName": { + "markdownDescription": "The name of the record metadata column", + "title": "MetaDataColumnName", + "type": "string" + }, + "PrivateKey": { + "markdownDescription": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "PrivateKey", + "type": "string" + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", + "markdownDescription": "", + "title": "ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions", + "markdownDescription": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "RetryOptions" + }, + "RoleARN": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Snowflake role", + "title": "RoleARN", + "type": "string" + }, + "S3BackupMode": { + "markdownDescription": "Choose an S3 backup mode", + "title": "S3BackupMode", + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.S3DestinationConfiguration", + "markdownDescription": "", + "title": "S3Configuration" + }, + "Schema": { + "markdownDescription": "Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views", + "title": "Schema", + "type": "string" + }, + "SnowflakeRoleConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration", + "markdownDescription": "Optionally configure a Snowflake role. Otherwise the default user role will be used.", + "title": "SnowflakeRoleConfiguration" + }, + "SnowflakeVpcConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration", + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "SnowflakeVpcConfiguration" + }, + "Table": { + "markdownDescription": "All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.", + "title": "Table", + "type": "string" + }, + "User": { + "markdownDescription": "User login name for the Snowflake account.", + "title": "User", + "type": "string" + } + }, + "required": [ + "AccountUrl", + "Database", + "PrivateKey", + "RoleARN", + "S3Configuration", + "Schema", + "Table", + "User" + ], + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions": { + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "markdownDescription": "the time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "DurationInSeconds", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Enable Snowflake role", + "title": "Enabled", + "type": "boolean" + }, + "SnowflakeRole": { + "markdownDescription": "The Snowflake role you wish to configure", + "title": "SnowflakeRole", + "type": "string" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration": { + "additionalProperties": false, + "properties": { + "PrivateLinkVpceId": { + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "PrivateLinkVpceId", + "type": "string" + } + }, + "required": [ + "PrivateLinkVpceId" + ], + "type": "object" + }, "AWS::KinesisFirehose::DeliveryStream.SplunkBufferingHints": { "additionalProperties": false, "properties": { @@ -133149,6 +137029,11 @@ "Properties": { "additionalProperties": false, "properties": { + "HybridAccessEnabled": { + "markdownDescription": "Indicates whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.", + "title": "HybridAccessEnabled", + "type": "boolean" + }, "ResourceArn": { "markdownDescription": "The Amazon Resource Name (ARN) of the resource.", "title": "ResourceArn", @@ -133535,7 +137420,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -133955,7 +137840,7 @@ "title": "FilterCriteria" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -134903,7 +138788,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -135143,7 +139028,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -140463,6 +144348,14 @@ "AWS::Location::Map.MapConfiguration": { "additionalProperties": false, "properties": { + "CustomLayers": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as the `POI` layer for the VectorEsriNavigation style.\n\n> Currenlty only `VectorEsriNavigation` supports CustomLayers. For more information, see [Custom Layers](https://docs.aws.amazon.com//location/latest/developerguide/map-concepts.html#map-custom-layers) .", + "title": "CustomLayers", + "type": "array" + }, "PoliticalView": { "markdownDescription": "Specifies the map political view selected from an available data provider.", "title": "PoliticalView", @@ -144365,6 +148258,17 @@ ], "type": "object" }, + "AWS::MSK::Replicator.ReplicationStartingPosition": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, "AWS::MSK::Replicator.TopicReplication": { "additionalProperties": false, "properties": { @@ -144383,6 +148287,11 @@ "title": "DetectAndCopyNewTopics", "type": "boolean" }, + "StartingPosition": { + "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationStartingPosition", + "markdownDescription": "", + "title": "StartingPosition" + }, "TopicsToExclude": { "items": { "type": "string" @@ -154697,6 +158606,10 @@ "type": "array" } }, + "required": [ + "ChannelGroupName", + "ChannelName" + ], "type": "object" }, "Type": { @@ -154715,7 +158628,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154789,6 +158703,9 @@ "type": "array" } }, + "required": [ + "ChannelGroupName" + ], "type": "object" }, "Type": { @@ -154807,7 +158724,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154863,6 +158781,8 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", "Policy" ], "type": "object" @@ -154984,7 +158904,9 @@ } }, "required": [ - "ContainerType" + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" ], "type": "object" }, @@ -155353,6 +159275,9 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", "Policy" ], "type": "object" @@ -170194,6 +174119,11 @@ "title": "Identity", "type": "string" }, + "OrchestrationSendingRoleArn": { + "markdownDescription": "", + "title": "OrchestrationSendingRoleArn", + "type": "string" + }, "RoleArn": { "markdownDescription": "The ARN of the AWS Identity and Access Management (IAM) role that you want Amazon Pinpoint to use when it submits email-related event data for the channel.", "title": "RoleArn", @@ -186245,6 +190175,14 @@ "markdownDescription": "", "title": "Definition" }, + "LinkEntities": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", + "title": "LinkEntities", + "type": "array" + }, "LinkSharingConfiguration": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.LinkSharingConfiguration", "markdownDescription": "A structure that contains the link sharing configurations that you want to apply overrides to.", @@ -214744,7 +218682,7 @@ "type": "boolean" }, "EnableHttpEndpoint": { - "markdownDescription": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "markdownDescription": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EnableHttpEndpoint", "type": "boolean" }, @@ -214759,7 +218697,7 @@ "type": "string" }, "EngineMode": { - "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only supports Aurora Serverless v1.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n- [Limitations of parallel query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EngineMode", "type": "string" }, @@ -214899,7 +218837,7 @@ "type": "boolean" }, "StorageType": { - "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "title": "StorageType", "type": "string" }, @@ -215376,7 +219314,7 @@ "title": "Endpoint" }, "Engine": { - "markdownDescription": "The name of the database engine that you want to use for this DB instance.\n\nNot every database engine is available in every AWS Region.\n\n> When you are creating a DB instance, the `Engine` property is required. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Engine", "type": "string" }, @@ -215544,7 +219482,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "markdownDescription": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "title": "StorageType", "type": "string" }, @@ -216441,42 +220379,148 @@ "Properties": { "additionalProperties": false, "properties": { - "DBSubnetGroupDescription": { - "markdownDescription": "The description for the DB subnet group.", - "title": "DBSubnetGroupDescription", - "type": "string" + "DBSubnetGroupDescription": { + "markdownDescription": "The description for the DB subnet group.", + "title": "DBSubnetGroupDescription", + "type": "string" + }, + "DBSubnetGroupName": { + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "title": "DBSubnetGroupName", + "type": "string" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The EC2 Subnet IDs for the DB subnet group.", + "title": "SubnetIds", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DBSubnetGroupDescription", + "SubnetIds" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::RDS::DBSubnetGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::RDS::EventSubscription": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", + "title": "Enabled", + "type": "boolean" }, - "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", - "title": "DBSubnetGroupName", + "EventCategories": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", + "title": "EventCategories", + "type": "array" + }, + "SnsTopicArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", + "title": "SnsTopicArn", "type": "string" }, - "SubnetIds": { + "SourceIds": { "items": { "type": "string" }, - "markdownDescription": "The EC2 Subnet IDs for the DB subnet group.", - "title": "SubnetIds", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "title": "SourceIds", "type": "array" }, + "SourceType": { + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "title": "SourceType", + "type": "string" + }, + "SubscriptionName": { + "markdownDescription": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", + "title": "SubscriptionName", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "An optional array of key-value pairs to apply to this subscription.", "title": "Tags", "type": "array" } }, "required": [ - "DBSubnetGroupDescription", - "SubnetIds" + "SnsTopicArn" ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::DBSubnetGroup" + "AWS::RDS::EventSubscription" ], "type": "string" }, @@ -216495,7 +220539,7 @@ ], "type": "object" }, - "AWS::RDS::EventSubscription": { + "AWS::RDS::GlobalCluster": { "additionalProperties": false, "properties": { "Condition": { @@ -216530,59 +220574,42 @@ "Properties": { "additionalProperties": false, "properties": { - "Enabled": { - "markdownDescription": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", - "title": "Enabled", + "DeletionProtection": { + "markdownDescription": "Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.", + "title": "DeletionProtection", "type": "boolean" }, - "EventCategories": { - "items": { - "type": "string" - }, - "markdownDescription": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", - "title": "EventCategories", - "type": "array" - }, - "SnsTopicArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", - "title": "SnsTopicArn", + "Engine": { + "markdownDescription": "The database engine to use for this global database cluster.\n\nValid Values: `aurora-mysql | aurora-postgresql`\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine of the source DB cluster.", + "title": "Engine", "type": "string" }, - "SourceIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", - "title": "SourceIds", - "type": "array" + "EngineVersion": { + "markdownDescription": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", + "title": "EngineVersion", + "type": "string" }, - "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", - "title": "SourceType", + "GlobalClusterIdentifier": { + "markdownDescription": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", + "title": "GlobalClusterIdentifier", "type": "string" }, - "SubscriptionName": { - "markdownDescription": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", - "title": "SubscriptionName", + "SourceDBClusterIdentifier": { + "markdownDescription": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", + "title": "SourceDBClusterIdentifier", "type": "string" }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "An optional array of key-value pairs to apply to this subscription.", - "title": "Tags", - "type": "array" + "StorageEncrypted": { + "markdownDescription": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", + "title": "StorageEncrypted", + "type": "boolean" } }, - "required": [ - "SnsTopicArn" - ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::EventSubscription" + "AWS::RDS::GlobalCluster" ], "type": "string" }, @@ -216596,12 +220623,11 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::RDS::GlobalCluster": { + "AWS::RDS::Integration": { "additionalProperties": false, "properties": { "Condition": { @@ -216636,42 +220662,55 @@ "Properties": { "additionalProperties": false, "properties": { - "DeletionProtection": { - "markdownDescription": "Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.", - "title": "DeletionProtection", - "type": "boolean" + "AdditionalEncryptionContext": { + "additionalProperties": true, + "markdownDescription": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "AdditionalEncryptionContext", + "type": "object" }, - "Engine": { - "markdownDescription": "The database engine to use for this global database cluster.\n\nValid Values: `aurora-mysql | aurora-postgresql`\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine of the source DB cluster.", - "title": "Engine", + "IntegrationName": { + "markdownDescription": "The name of the integration.", + "title": "IntegrationName", "type": "string" }, - "EngineVersion": { - "markdownDescription": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", - "title": "EngineVersion", + "KMSKeyId": { + "markdownDescription": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", + "title": "KMSKeyId", "type": "string" }, - "GlobalClusterIdentifier": { - "markdownDescription": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", - "title": "GlobalClusterIdentifier", + "SourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", + "title": "SourceArn", "type": "string" }, - "SourceDBClusterIdentifier": { - "markdownDescription": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", - "title": "SourceDBClusterIdentifier", - "type": "string" + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "title": "Tags", + "type": "array" }, - "StorageEncrypted": { - "markdownDescription": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", - "title": "StorageEncrypted", - "type": "boolean" + "TargetArn": { + "markdownDescription": "The ARN of the Redshift data warehouse to use as the target for replication.", + "title": "TargetArn", + "type": "string" } }, + "required": [ + "SourceArn", + "TargetArn" + ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::GlobalCluster" + "AWS::RDS::Integration" ], "type": "string" }, @@ -216685,7 +220724,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -217345,7 +221385,7 @@ "type": "string" }, "Port": { - "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "title": "Port", "type": "number" }, @@ -218408,6 +222448,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret. You can only use this parameter if `ManageAdminPassword` is `true` .", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUserPassword": { "markdownDescription": "The password of the administrator for the primary database created in the namespace.", "title": "AdminUserPassword", @@ -218459,11 +222504,26 @@ "title": "LogExports", "type": "array" }, + "ManageAdminPassword": { + "markdownDescription": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials. You can't use `AdminUserPassword` if `ManageAdminPassword` is true. If `ManageAdminPassword` is `false` or not set, Amazon Redshift uses `AdminUserPassword` for the admin user account's password.", + "title": "ManageAdminPassword", + "type": "boolean" + }, "NamespaceName": { "markdownDescription": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "title": "NamespaceName", "type": "string" }, + "NamespaceResourcePolicy": { + "markdownDescription": "The resource policy that will be attached to the namespace.", + "title": "NamespaceResourcePolicy", + "type": "object" + }, + "RedshiftIdcApplicationArn": { + "markdownDescription": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "title": "RedshiftIdcApplicationArn", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -218502,6 +222562,16 @@ "AWS::RedshiftServerless::Namespace.Namespace": { "additionalProperties": false, "properties": { + "AdminPasswordSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the namespace's admin user credentials secret.", + "title": "AdminPasswordSecretArn", + "type": "string" + }, + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUsername": { "markdownDescription": "The username of the administrator for the first database created in the namespace.", "title": "AdminUsername", @@ -218619,6 +222689,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -218811,6 +222886,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -220068,15 +224148,9 @@ "type": "string" }, "Policy": { - "additionalProperties": false, + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.PolicyMap", "markdownDescription": "The resiliency policy.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy" - } - }, - "title": "Policy", - "type": "object" + "title": "Policy" }, "PolicyDescription": { "markdownDescription": "The description for the policy.", @@ -220153,6 +224227,37 @@ ], "type": "object" }, + "AWS::ResilienceHub::ResiliencyPolicy.PolicyMap": { + "additionalProperties": false, + "properties": { + "AZ": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for Availability Zone disruption.", + "title": "AZ" + }, + "Hardware": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for hardware disruption.", + "title": "Hardware" + }, + "Region": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for Regional disruption.", + "title": "Region" + }, + "Software": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for software disruption.", + "title": "Software" + } + }, + "required": [ + "AZ", + "Hardware", + "Software" + ], + "type": "object" + }, "AWS::ResourceExplorer2::DefaultViewAssociation": { "additionalProperties": false, "properties": { @@ -222315,6 +226420,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSet.GeoProximityLocation", + "markdownDescription": "*GeoproximityLocation resource record sets only:* A complex type that lets you control how Route\u00a053 responds to DNS queries based on the geographic origin of the query and your resources.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -222446,6 +226556,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSet.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSet.GeoLocation": { "additionalProperties": false, "properties": { @@ -222467,6 +226597,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSet.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSet.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup": { "additionalProperties": false, "properties": { @@ -222593,6 +226749,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSetGroup.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSetGroup.GeoLocation": { "additionalProperties": false, "properties": { @@ -222614,6 +226790,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSetGroup.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup.RecordSet": { "additionalProperties": false, "properties": { @@ -222637,6 +226839,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.GeoProximityLocation", + "markdownDescription": "A complex type that contains information about a geographic location.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -223754,6 +227961,11 @@ "markdownDescription": "The priority of the rule in the rule group. This value must be unique within the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting.", "title": "Priority", "type": "number" + }, + "Qtype": { + "markdownDescription": "The DNS query type you want the rule to evaluate. Allowed values are;\n\n- A: Returns an IPv4 address.\n- AAAA: Returns an Ipv6 address.\n- CAA: Restricts CAs that can create SSL/TLS certifications for the domain.\n- CNAME: Returns another domain name.\n- DS: Record that identifies the DNSSEC signing key of a delegated zone.\n- MX: Specifies mail servers.\n- NAPTR: Regular-expression-based rewriting of domain names.\n- NS: Authoritative name servers.\n- PTR: Maps an IP address to a domain name.\n- SOA: Start of authority record for the zone.\n- SPF: Lists the servers authorized to send emails from a domain.\n- SRV: Application specific values that identify servers.\n- TXT: Verifies email senders and application-specific values.\n- A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPE NUMBER , where the NUMBER can be 1-65334, for example, TYPE28. For more information, see [List of DNS record types](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/List_of_DNS_record_types) .", + "title": "Qtype", + "type": "string" } }, "required": [ @@ -231848,7 +236060,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "title": "RejectedPatchesAction", "type": "string" }, @@ -233913,6 +238125,11 @@ "title": "AppImageConfigName", "type": "string" }, + "JupyterLabAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "JupyterLabAppImageConfig" + }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", @@ -233953,6 +238170,56 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.ContainerConfig": { + "additionalProperties": false, + "properties": { + "ContainerArguments": { + "items": { + "type": "string" + }, + "markdownDescription": "The arguments for the container when you're running the application.", + "title": "ContainerArguments", + "type": "array" + }, + "ContainerEntrypoint": { + "items": { + "type": "string" + }, + "markdownDescription": "The entrypoint used to run the application in the container.", + "title": "ContainerEntrypoint", + "type": "array" + }, + "ContainerEnvironmentVariables": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable" + }, + "markdownDescription": "The environment variables to set in the container", + "title": "ContainerEnvironmentVariables", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that identifies a container environment variable.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the container environment variable.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, "AWS::SageMaker::AppImageConfig.FileSystemConfig": { "additionalProperties": false, "properties": { @@ -233974,6 +238241,17 @@ }, "type": "object" }, + "AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "The configuration used to run the application image container.", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig": { "additionalProperties": false, "properties": { @@ -235133,9 +239411,33 @@ }, "type": "object" }, + "AWS::SageMaker::Domain.DockerSettings": { + "additionalProperties": false, + "properties": { + "EnableDockerAccess": { + "markdownDescription": "Indicates whether the domain can access Docker.", + "title": "EnableDockerAccess", + "type": "string" + }, + "VpcOnlyTrustedAccounts": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of AWS accounts that are trusted when the domain is created in VPC-only mode.", + "title": "VpcOnlyTrustedAccounts", + "type": "array" + } + }, + "type": "object" + }, "AWS::SageMaker::Domain.DomainSettings": { "additionalProperties": false, "properties": { + "DockerSettings": { + "$ref": "#/definitions/AWS::SageMaker::Domain.DockerSettings", + "markdownDescription": "A collection of settings that configure the domain's Docker interaction.", + "title": "DockerSettings" + }, "RStudioServerProDomainSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.RStudioServerProDomainSettings", "markdownDescription": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", @@ -236279,7 +240581,7 @@ "type": "number" }, "ProvisionedConcurrency": { - "markdownDescription": "", + "markdownDescription": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) .", "title": "ProvisionedConcurrency", "type": "number" } @@ -236525,6 +240827,11 @@ "markdownDescription": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval.", "title": "StorageType", "type": "string" + }, + "TtlDuration": { + "$ref": "#/definitions/AWS::SageMaker::FeatureGroup.TtlDuration", + "markdownDescription": "Time to live duration, where the record is hard deleted after the expiration time is reached; `ExpiresAt` = `EventTime` + `TtlDuration` . For information on HardDelete, see the [DeleteRecord](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_feature_store_DeleteRecord.html) API in the Amazon SageMaker API Reference guide.", + "title": "TtlDuration" } }, "type": "object" @@ -236583,6 +240890,22 @@ ], "type": "object" }, + "AWS::SageMaker::FeatureGroup.TtlDuration": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "`TtlDuration` time unit.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "`TtlDuration` time value.", + "title": "Value", + "type": "number" + } + }, + "type": "object" + }, "AWS::SageMaker::Image": { "additionalProperties": false, "properties": { @@ -237488,7 +241811,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container. Each key and value in the `Environment` string to string map can have length of up to 1024. We support up to 16 entries in the map.", + "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -242348,6 +246671,16 @@ "title": "DomainId", "type": "string" }, + "OwnershipSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.OwnershipSettings", + "markdownDescription": "The collection of ownership settings for a space.", + "title": "OwnershipSettings" + }, + "SpaceDisplayName": { + "markdownDescription": "The name of the space that appears in the Studio UI.", + "title": "SpaceDisplayName", + "type": "string" + }, "SpaceName": { "markdownDescription": "The name of the space.", "title": "SpaceName", @@ -242358,6 +246691,11 @@ "markdownDescription": "A collection of space settings.", "title": "SpaceSettings" }, + "SpaceSharingSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceSharingSettings", + "markdownDescription": "A collection of space sharing settings.", + "title": "SpaceSharingSettings" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -242394,6 +246732,31 @@ ], "type": "object" }, + "AWS::SageMaker::Space.CodeRepository": { + "additionalProperties": false, + "properties": { + "RepositoryUrl": { + "markdownDescription": "The URL of the Git repository.", + "title": "RepositoryUrl", + "type": "string" + } + }, + "required": [ + "RepositoryUrl" + ], + "type": "object" + }, + "AWS::SageMaker::Space.CustomFileSystem": { + "additionalProperties": false, + "properties": { + "EFSFileSystem": { + "$ref": "#/definitions/AWS::SageMaker::Space.EFSFileSystem", + "markdownDescription": "A custom file system in Amazon EFS.", + "title": "EFSFileSystem" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.CustomImage": { "additionalProperties": false, "properties": { @@ -242419,6 +246782,34 @@ ], "type": "object" }, + "AWS::SageMaker::Space.EFSFileSystem": { + "additionalProperties": false, + "properties": { + "FileSystemId": { + "markdownDescription": "The ID of your Amazon EFS file system.", + "title": "FileSystemId", + "type": "string" + } + }, + "required": [ + "FileSystemId" + ], + "type": "object" + }, + "AWS::SageMaker::Space.EbsStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsVolumeSizeInGb": { + "markdownDescription": "The size of an EBS storage volume for a private space.", + "title": "EbsVolumeSizeInGb", + "type": "number" + } + }, + "required": [ + "EbsVolumeSizeInGb" + ], + "type": "object" + }, "AWS::SageMaker::Space.JupyterServerAppSettings": { "additionalProperties": false, "properties": { @@ -242449,6 +246840,20 @@ }, "type": "object" }, + "AWS::SageMaker::Space.OwnershipSettings": { + "additionalProperties": false, + "properties": { + "OwnerUserProfileName": { + "markdownDescription": "The user profile who is the owner of the private space.", + "title": "OwnerUserProfileName", + "type": "string" + } + }, + "required": [ + "OwnerUserProfileName" + ], + "type": "object" + }, "AWS::SageMaker::Space.ResourceSpec": { "additionalProperties": false, "properties": { @@ -242470,9 +246875,62 @@ }, "type": "object" }, + "AWS::SageMaker::Space.SpaceCodeEditorAppSettings": { + "additionalProperties": false, + "properties": { + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceJupyterLabAppSettings": { + "additionalProperties": false, + "properties": { + "CodeRepositories": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CodeRepository" + }, + "markdownDescription": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", + "title": "CodeRepositories", + "type": "array" + }, + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.SpaceSettings": { "additionalProperties": false, "properties": { + "AppType": { + "markdownDescription": "The type of app created within the space.", + "title": "AppType", + "type": "string" + }, + "CodeEditorAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceCodeEditorAppSettings", + "markdownDescription": "The Code Editor application settings.", + "title": "CodeEditorAppSettings" + }, + "CustomFileSystems": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" + }, + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "title": "CustomFileSystems", + "type": "array" + }, + "JupyterLabAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceJupyterLabAppSettings", + "markdownDescription": "The settings for the JupyterLab application.", + "title": "JupyterLabAppSettings" + }, "JupyterServerAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Space.JupyterServerAppSettings", "markdownDescription": "The JupyterServer app settings.", @@ -242482,6 +246940,36 @@ "$ref": "#/definitions/AWS::SageMaker::Space.KernelGatewayAppSettings", "markdownDescription": "The KernelGateway app settings.", "title": "KernelGatewayAppSettings" + }, + "SpaceStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceStorageSettings", + "markdownDescription": "The storage settings for a private space.", + "title": "SpaceStorageSettings" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceSharingSettings": { + "additionalProperties": false, + "properties": { + "SharingType": { + "markdownDescription": "Specifies the sharing type of the space.", + "title": "SharingType", + "type": "string" + } + }, + "required": [ + "SharingType" + ], + "type": "object" + }, + "AWS::SageMaker::Space.SpaceStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.EbsStorageSettings", + "markdownDescription": "A collection of EBS storage settings for a private space.", + "title": "EbsStorageSettings" } }, "type": "object" @@ -247600,7 +252088,7 @@ "type": "array" }, "RoleArn": { - "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", + "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", "title": "RoleArn", "type": "string" } @@ -247897,7 +252385,7 @@ "additionalProperties": false, "properties": { "Aggregation": { - "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "title": "Aggregation", "type": "string" }, @@ -250563,7 +255051,7 @@ "type": "string" }, "EncryptionAlgorithm": { - "markdownDescription": "The algorithm that is used to encrypt the file.\n\n> You can only specify `NONE` if the URL for your connector uses HTTPS. This ensures that no traffic is sent in clear text.", + "markdownDescription": "The algorithm that is used to encrypt the file.\n\nNote the following:\n\n- Do not use the `DES_EDE3_CBC` algorithm unless you must support a legacy client that requires it, as it is a weak encryption algorithm.\n- You can only specify `NONE` if the URL for your connector uses HTTPS. Using HTTPS ensures that no traffic is sent in clear text.", "title": "EncryptionAlgorithm", "type": "string" }, @@ -251585,7 +256073,8 @@ } }, "required": [ - "Configuration" + "Configuration", + "PolicyStoreId" ], "type": "object" }, @@ -251981,6 +256470,7 @@ } }, "required": [ + "PolicyStoreId", "Statement" ], "type": "object" @@ -255510,7 +260000,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.FieldToMatch" }, - "markdownDescription": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` .", + "markdownDescription": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` . > This setting has no impact on request sampling. With request sampling, the only way to exclude fields is by disabling sampling in the web ACL visibility configuration.", "title": "RedactedFields", "type": "array" }, @@ -255580,9 +260070,6 @@ "AWS::WAFv2::LoggingConfiguration.FieldToMatch": { "additionalProperties": false, "properties": { - "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" - }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "title": "Method", @@ -255635,25 +260122,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.JsonBody": { - "additionalProperties": false, - "properties": { - "InvalidFallbackBehavior": { - "type": "string" - }, - "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" - }, - "MatchScope": { - "type": "string" - } - }, - "required": [ - "MatchPattern", - "MatchScope" - ], - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.LabelNameCondition": { "additionalProperties": false, "properties": { @@ -255691,21 +260159,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.MatchPattern": { - "additionalProperties": false, - "properties": { - "All": { - "type": "object" - }, - "IncludedPaths": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.SingleHeader": { "additionalProperties": false, "properties": { @@ -255989,7 +260442,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256237,7 +260690,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -256250,9 +260703,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -256434,6 +260892,20 @@ ], "type": "object" }, + "AWS::WAFv2::RuleGroup.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::RuleGroup.JsonBody": { "additionalProperties": false, "properties": { @@ -256453,7 +260925,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256575,6 +261047,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -257055,7 +261532,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -257105,7 +261582,7 @@ "type": "string" }, "SampledRequestsEnabled": { - "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.", + "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.", "title": "SampledRequestsEnabled", "type": "boolean" } @@ -257177,7 +261654,7 @@ "properties": { "AssociationConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.AssociationConfig", - "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "title": "AssociationConfig" }, "CaptchaConfig": { @@ -257396,7 +261873,7 @@ "properties": { "RequestBody": { "additionalProperties": false, - "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "patternProperties": { "^[a-zA-Z0-9]+$": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RequestBodyAssociatedResourceTypeConfig" @@ -257423,7 +261900,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -257715,7 +262192,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -257728,9 +262205,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::WebACL.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::WebACL.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -257912,6 +262394,20 @@ ], "type": "object" }, + "AWS::WAFv2::WebACL.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::WebACL.JsonBody": { "additionalProperties": false, "properties": { @@ -257931,7 +262427,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -258153,6 +262649,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -258405,7 +262906,7 @@ "additionalProperties": false, "properties": { "DefaultSizeInspectionLimit": { - "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", + "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", "title": "DefaultSizeInspectionLimit", "type": "string" } @@ -258925,7 +263426,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -258975,7 +263476,7 @@ "type": "string" }, "SampledRequestsEnabled": { - "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.", + "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.", "title": "SampledRequestsEnabled", "type": "boolean" } @@ -269088,6 +273589,9 @@ { "$ref": "#/definitions/AWS::AppFlow::Flow" }, + { + "$ref": "#/definitions/AWS::AppIntegrations::Application" + }, { "$ref": "#/definitions/AWS::AppIntegrations::DataIntegration" }, @@ -269484,9 +273988,15 @@ { "$ref": "#/definitions/AWS::CodeArtifact::Domain" }, + { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup" + }, { "$ref": "#/definitions/AWS::CodeArtifact::Repository" }, + { + "$ref": "#/definitions/AWS::CodeBuild::Fleet" + }, { "$ref": "#/definitions/AWS::CodeBuild::Project" }, @@ -269688,6 +274198,9 @@ { "$ref": "#/definitions/AWS::ConnectCampaigns::Campaign" }, + { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline" + }, { "$ref": "#/definitions/AWS::ControlTower::EnabledControl" }, @@ -269814,6 +274327,27 @@ { "$ref": "#/definitions/AWS::DataSync::Task" }, + { + "$ref": "#/definitions/AWS::DataZone::DataSource" + }, + { + "$ref": "#/definitions/AWS::DataZone::Domain" + }, + { + "$ref": "#/definitions/AWS::DataZone::Environment" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile" + }, + { + "$ref": "#/definitions/AWS::DataZone::Project" + }, + { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -270510,6 +275044,9 @@ { "$ref": "#/definitions/AWS::Glue::Table" }, + { + "$ref": "#/definitions/AWS::Glue::TableOptimizer" + }, { "$ref": "#/definitions/AWS::Glue::Trigger" }, @@ -270663,6 +275200,9 @@ { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, + { + "$ref": "#/definitions/AWS::IVS::Stage" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -270714,6 +275254,9 @@ { "$ref": "#/definitions/AWS::Inspector::ResourceGroup" }, + { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration" + }, { "$ref": "#/definitions/AWS::InspectorV2::Filter" }, @@ -270942,6 +275485,12 @@ { "$ref": "#/definitions/AWS::KafkaConnect::Connector" }, + { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin" + }, + { + "$ref": "#/definitions/AWS::KafkaConnect::WorkerConfiguration" + }, { "$ref": "#/definitions/AWS::Kendra::DataSource" }, @@ -271701,6 +276250,9 @@ { "$ref": "#/definitions/AWS::RDS::GlobalCluster" }, + { + "$ref": "#/definitions/AWS::RDS::Integration" + }, { "$ref": "#/definitions/AWS::RDS::OptionGroup" }, diff --git a/samtranslator/translator/arn_generator.py b/samtranslator/translator/arn_generator.py index a9f6e0325..5ab68e082 100644 --- a/samtranslator/translator/arn_generator.py +++ b/samtranslator/translator/arn_generator.py @@ -17,16 +17,17 @@ def _get_region_from_session() -> str: def _region_to_partition(region: str) -> str: # setting default partition to aws, this will be overwritten by checking the region below region_string = region.lower() - if region_string.startswith("cn-"): - return "aws-cn" - if region_string.startswith("us-iso-"): - return "aws-iso" - if region_string.startswith("us-isob"): - return "aws-iso-b" - if region_string.startswith("us-gov"): - return "aws-us-gov" - if region_string.startswith("eu-isoe"): - return "aws-iso-e" + region_to_partition_map = { + "cn-": "aws-cn", + "us-iso-": "aws-iso", + "us-isob": "aws-iso-b", + "us-gov": "aws-us-gov", + "eu-isoe": "aws-iso-e", + "us-isof": "aws-iso-f", + } + for key, value in region_to_partition_map.items(): + if region_string.startswith(key): + return value return "aws" diff --git a/samtranslator/utils/types.py b/samtranslator/utils/types.py index 5fd723d6f..88d9db614 100644 --- a/samtranslator/utils/types.py +++ b/samtranslator/utils/types.py @@ -1,4 +1,5 @@ """Type related utils.""" + from typing import Any, Dict, TypeVar, Union T = TypeVar("T") diff --git a/samtranslator/validator/value_validator.py b/samtranslator/validator/value_validator.py index ae1b62665..699c86527 100644 --- a/samtranslator/validator/value_validator.py +++ b/samtranslator/validator/value_validator.py @@ -1,4 +1,5 @@ """A plug-able validator to help raise exception when some value is unexpected.""" + from typing import Any, Dict, Generic, Optional, TypeVar, cast from samtranslator.model.exceptions import ( diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 64000d8da..9d0f99cab 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -362,7 +362,7 @@ "CustomRules": "The custom rewrite and redirect rules for an Amplify app.", "Description": "The description of the Amplify app.", "EnableBranchAutoDeletion": "Automatically disconnect a branch in Amplify Hosting when you delete a branch from your Git repository.", - "EnvironmentVariables": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "EnvironmentVariables": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "IAMServiceRole": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", "Name": "The name of the Amplify app.", "OauthToken": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", @@ -378,7 +378,7 @@ "EnableAutoBuild": "Enables auto building for the auto created branch.", "EnablePerformanceMode": "Enables performance mode for the branch.\n\nPerformance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.", "EnablePullRequestPreview": "Sets whether pull request previews are enabled for each branch that Amplify Hosting automatically creates for your app. Amplify creates previews by deploying your app to a unique URL whenever a pull request is opened for the branch. Development and QA teams can use this preview to test the pull request before it's merged into a production or integration branch.\n\nTo provide backend support for your preview, Amplify Hosting automatically provisions a temporary backend environment that it deletes when the pull request is closed. If you want to specify a dedicated backend environment for your previews, use the `PullRequestEnvironmentName` property.\n\nFor more information, see [Web Previews](https://docs.aws.amazon.com/amplify/latest/userguide/pr-previews.html) in the *AWS Amplify Hosting User Guide* .", - "EnvironmentVariables": "Environment variables for the auto created branch.", + "EnvironmentVariables": "The environment variables for the autocreated branch.", "Framework": "The framework for the autocreated branch.", "PullRequestEnvironmentName": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "Stage": "Stage for the auto created branch." @@ -395,8 +395,8 @@ "Target": "The target pattern for a URL rewrite or redirect rule." }, "AWS::Amplify::App EnvironmentVariable": { - "Name": "", - "Value": "" + "Name": "The environment variable name.", + "Value": "The environment variable value." }, "AWS::Amplify::App Tag": { "Key": "Specifies the key for the tag.", @@ -1274,10 +1274,16 @@ "ConfigurationVersion": "The configuration version to deploy. If deploying an AWS AppConfig hosted configuration version, you can specify either the version number or version label. For all other configurations, you must specify the version number.", "DeploymentStrategyId": "The deployment strategy ID.", "Description": "A description of the deployment.", + "DynamicExtensionParameters": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", "EnvironmentId": "The environment ID.", "KmsKeyIdentifier": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "Tags": "Metadata to assign to the deployment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define." }, + "AWS::AppConfig::Deployment DynamicExtensionParameters": { + "ExtensionReference": "", + "ParameterName": "", + "ParameterValue": "" + }, "AWS::AppConfig::Deployment Tags": { "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", "Value": "The tag value can be up to 256 characters." @@ -1327,6 +1333,7 @@ }, "AWS::AppConfig::Extension Parameter": { "Description": "Information about the parameter.", + "Dynamic": "Indicates whether this parameter's value can be supplied at the extension's action point instead of during extension association. Dynamic parameters can't be marked `Required` .", "Required": "A parameter value must be specified in the extension association." }, "AWS::AppConfig::Extension Tag": { @@ -1893,19 +1900,37 @@ "AWS::AppFlow::Flow ZendeskSourceProperties": { "Object": "The object specified in the Zendesk flow source." }, + "AWS::AppIntegrations::Application": { + "ApplicationSourceConfig": "The configuration for where the application should be loaded from.", + "Description": "The description of the application.", + "Name": "The name of the application.", + "Namespace": "The namespace of the application.", + "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }." + }, + "AWS::AppIntegrations::Application ApplicationSourceConfig": { + "ExternalUrlConfig": "The external URL source for the application." + }, + "AWS::AppIntegrations::Application ExternalUrlConfig": { + "AccessUrl": "The URL to access the application.", + "ApprovedOrigins": "Additional URLs to allow list if different than the access URL." + }, + "AWS::AppIntegrations::Application Tag": { + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` ." + }, "AWS::AppIntegrations::DataIntegration": { "Description": "A description of the DataIntegration.", - "FileConfiguration": "", + "FileConfiguration": "The configuration for what files should be pulled from the source.", "KmsKey": "The KMS key for the DataIntegration.", "Name": "The name of the DataIntegration.", - "ObjectConfiguration": "", + "ObjectConfiguration": "The configuration for what data should be pulled from the source.", "ScheduleConfig": "The name of the data and how often it should be pulled from the source.", "SourceURI": "The URI of the data source.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::AppIntegrations::DataIntegration FileConfiguration": { - "Filters": "", - "Folders": "" + "Filters": "Restrictions for what files should be pulled from the source.", + "Folders": "Identifiers for the source folders to pull all files from recursively." }, "AWS::AppIntegrations::DataIntegration ScheduleConfig": { "FirstExecutionFrom": "The start date for objects to import in the first flow run as an Unix/epoch timestamp in milliseconds or in ISO-8601 format.", @@ -1913,8 +1938,8 @@ "ScheduleExpression": "How often the data should be pulled from data source." }, "AWS::AppIntegrations::DataIntegration Tag": { - "Key": "", - "Value": "" + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` ." }, "AWS::AppIntegrations::EventIntegration": { "Description": "The event integration description.", @@ -1927,8 +1952,8 @@ "Source": "The source of the events." }, "AWS::AppIntegrations::EventIntegration Tag": { - "Key": "", - "Value": "" + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` ." }, "AWS::AppMesh::GatewayRoute": { "GatewayRouteName": "The name of the gateway route.", @@ -3196,7 +3221,7 @@ "AWS::ApplicationAutoScaling::ScalableTarget": { "MaxCapacity": "The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand.", "MinCapacity": "The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand.", - "ResourceId": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "ResourceId": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "RoleARN": "Specify the Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that allows Application Auto Scaling to modify the scalable target on your behalf. This can be either an IAM service role that Application Auto Scaling can assume to make calls to other AWS resources on your behalf, or a service-linked role for the specified service. For more information, see [How Application Auto Scaling works with IAM](https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html) in the *Application Auto Scaling User Guide* .\n\nTo automatically create a service-linked role (recommended), specify the full ARN of the service-linked role in your stack template. To find the exact ARN of the service-linked role for your AWS or custom resource, see the [Service-linked roles](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html) topic in the *Application Auto Scaling User Guide* . Look for the ARN in the table at the bottom of the page.", "ScalableDimension": "The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property.\n\n- `ecs:service:DesiredCount` - The desired task count of an ECS service.\n- `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.\n- `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot Fleet.\n- `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.\n- `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.\n- `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.\n- `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.\n- `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.\n- `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.\n- `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for a SageMaker model endpoint variant.\n- `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.\n- `comprehend:document-classifier-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend document classification endpoint.\n- `comprehend:entity-recognizer-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend entity recognizer endpoint.\n- `lambda:function:ProvisionedConcurrency` - The provisioned concurrency for a Lambda function.\n- `cassandra:table:ReadCapacityUnits` - The provisioned read capacity for an Amazon Keyspaces table.\n- `cassandra:table:WriteCapacityUnits` - The provisioned write capacity for an Amazon Keyspaces table.\n- `kafka:broker-storage:VolumeSize` - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.\n- `elasticache:replication-group:NodeGroups` - The number of node groups for an Amazon ElastiCache replication group.\n- `elasticache:replication-group:Replicas` - The number of replicas per node group for an Amazon ElastiCache replication group.\n- `neptune:cluster:ReadReplicaCount` - The count of read replicas in an Amazon Neptune DB cluster.\n- `sagemaker:variant:DesiredProvisionedConcurrency` - The provisioned concurrency for a SageMaker serverless endpoint.\n- `sagemaker:inference-component:DesiredCopyCount` - The number of copies across an endpoint for a SageMaker inference component.", "ScheduledActions": "The scheduled actions for the scalable target. Duplicates aren't allowed.", @@ -3223,7 +3248,7 @@ "AWS::ApplicationAutoScaling::ScalingPolicy": { "PolicyName": "The name of the scaling policy.\n\nUpdates to the name of a target tracking scaling policy are not supported, unless you also update the metric used for scaling. To change only a target tracking scaling policy's name, first delete the policy by removing the existing `AWS::ApplicationAutoScaling::ScalingPolicy` resource from the template and updating the stack. Then, recreate the resource with the same settings and a different name.", "PolicyType": "The scaling policy type.\n\nThe following policy types are supported:\n\n`TargetTrackingScaling` \u2014Not supported for Amazon EMR\n\n`StepScaling` \u2014Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune.", - "ResourceId": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "ResourceId": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "ScalableDimension": "The scalable dimension. This string consists of the service namespace, resource type, and scaling property.\n\n- `ecs:service:DesiredCount` - The desired task count of an ECS service.\n- `elasticmapreduce:instancegroup:InstanceCount` - The instance count of an EMR Instance Group.\n- `ec2:spot-fleet-request:TargetCapacity` - The target capacity of a Spot Fleet.\n- `appstream:fleet:DesiredCapacity` - The desired capacity of an AppStream 2.0 fleet.\n- `dynamodb:table:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB table.\n- `dynamodb:table:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB table.\n- `dynamodb:index:ReadCapacityUnits` - The provisioned read capacity for a DynamoDB global secondary index.\n- `dynamodb:index:WriteCapacityUnits` - The provisioned write capacity for a DynamoDB global secondary index.\n- `rds:cluster:ReadReplicaCount` - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition.\n- `sagemaker:variant:DesiredInstanceCount` - The number of EC2 instances for a SageMaker model endpoint variant.\n- `custom-resource:ResourceType:Property` - The scalable dimension for a custom resource provided by your own application or service.\n- `comprehend:document-classifier-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend document classification endpoint.\n- `comprehend:entity-recognizer-endpoint:DesiredInferenceUnits` - The number of inference units for an Amazon Comprehend entity recognizer endpoint.\n- `lambda:function:ProvisionedConcurrency` - The provisioned concurrency for a Lambda function.\n- `cassandra:table:ReadCapacityUnits` - The provisioned read capacity for an Amazon Keyspaces table.\n- `cassandra:table:WriteCapacityUnits` - The provisioned write capacity for an Amazon Keyspaces table.\n- `kafka:broker-storage:VolumeSize` - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster.\n- `elasticache:replication-group:NodeGroups` - The number of node groups for an Amazon ElastiCache replication group.\n- `elasticache:replication-group:Replicas` - The number of replicas per node group for an Amazon ElastiCache replication group.\n- `neptune:cluster:ReadReplicaCount` - The count of read replicas in an Amazon Neptune DB cluster.\n- `sagemaker:variant:DesiredProvisionedConcurrency` - The provisioned concurrency for a SageMaker serverless endpoint.\n- `sagemaker:inference-component:DesiredCopyCount` - The number of copies across an endpoint for a SageMaker inference component.", "ScalingTargetId": "The CloudFormation-generated ID of an Application Auto Scaling scalable target. For more information about the ID, see the Return Value section of the `AWS::ApplicationAutoScaling::ScalableTarget` resource.\n\n> You must specify either the `ScalingTargetId` property, or the `ResourceId` , `ScalableDimension` , and `ServiceNamespace` properties, but not both.", "ServiceNamespace": "The namespace of the AWS service that provides the resource, or a `custom-resource` .", @@ -3288,6 +3313,7 @@ "TargetValue": "The target value for the metric. Although this property accepts numbers of type Double, it won't accept values that are either too small or too large. Values must be in the range of -2^360 to 2^360. The value must be a valid number based on the choice of metric. For example, if the metric is CPU utilization, then the target value is a percent value that represents how much of the CPU can be used before scaling out." }, "AWS::ApplicationInsights::Application": { + "AttachMissingPermission": "If set to true, the managed policies for SSM and CW will be attached to the instance roles if they are missing.", "AutoConfigurationEnabled": "If set to `true` , the application components will be configured with the monitoring configuration recommended by Application Insights.", "CWEMonitorEnabled": "Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as `instance terminated` , `failed deployment` , and others.", "ComponentMonitoringSettings": "The monitoring settings of the components.", @@ -3325,6 +3351,9 @@ "HANAPrometheusExporter": "The HANA DB Prometheus Exporter settings.", "JMXPrometheusExporter": "A list of Java metrics to monitor for the component.", "Logs": "A list of logs to monitor for the component. Only Amazon EC2 instances can use `Logs` .", + "NetWeaverPrometheusExporter": "", + "Processes": "", + "SQLServerPrometheusExporter": "", "WindowsEvents": "A list of Windows Events to monitor for the component. Only Amazon EC2 instances running on Windows can use `WindowsEvents` ." }, "AWS::ApplicationInsights::Application CustomComponent": { @@ -3362,9 +3391,23 @@ "LogPatterns": "A list of objects that define the log patterns that belong to `LogPatternSet` .", "PatternSetName": "The name of the log pattern. A log pattern name can contain up to 30 characters, and it cannot be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore." }, + "AWS::ApplicationInsights::Application NetWeaverPrometheusExporter": { + "InstanceNumbers": "", + "PrometheusPort": "", + "SAPSID": "" + }, + "AWS::ApplicationInsights::Application Process": { + "AlarmMetrics": "", + "ProcessName": "" + }, + "AWS::ApplicationInsights::Application SQLServerPrometheusExporter": { + "PrometheusPort": "", + "SQLSecretName": "" + }, "AWS::ApplicationInsights::Application SubComponentConfigurationDetails": { "AlarmMetrics": "A list of metrics to monitor for the component. All component types can use `AlarmMetrics` .", "Logs": "A list of logs to monitor for the component. Only Amazon EC2 instances can use `Logs` .", + "Processes": "", "WindowsEvents": "A list of Windows Events to monitor for the component. Only Amazon EC2 instances running on Windows can use `WindowsEvents` ." }, "AWS::ApplicationInsights::Application SubComponentTypeConfiguration": { @@ -4003,7 +4046,7 @@ "AWS::Backup::BackupPlan LifecycleResourceType": { "DeleteAfterDays": "Specifies the number of days after creation that a recovery point is deleted. Must be greater than `MoveToColdStorageAfterDays` .", "MoveToColdStorageAfterDays": "Specifies the number of days after creation that a recovery point is moved to cold storage.", - "OptInToArchiveForSupportedResources": "" + "OptInToArchiveForSupportedResources": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings." }, "AWS::Backup::BackupSelection": { "BackupPlanId": "Uniquely identifies a backup plan.", @@ -4086,19 +4129,19 @@ "S3KeyPrefix": "The prefix for where AWS Backup Audit Manager delivers your reports to Amazon S3. The prefix is this part of the following path: s3://your-bucket-name/ `prefix` /Backup/us-west-2/year/month/day/report-name. If not specified, there is no prefix." }, "AWS::Backup::ReportPlan ReportSetting": { - "Accounts": "These are the accounts to be included in the report.", + "Accounts": "These are the accounts to be included in the report.\n\nUse string value of `ROOT` to include all organizational units.", "FrameworkArns": "The Amazon Resource Names (ARNs) of the frameworks a report covers.", "OrganizationUnits": "These are the Organizational Units to be included in the report.", - "Regions": "These are the Regions to be included in the report.", + "Regions": "These are the Regions to be included in the report.\n\nUse the wildcard as the string value to include all Regions.", "ReportTemplate": "Identifies the report template for the report. Reports are built using a report template. The report templates are:\n\n`RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT`" }, "AWS::Backup::ReportPlan Tag": { - "Key": "", + "Key": "The tag key (String). The key can't start with `aws:` .\n\nLength Constraints: Minimum length of 1. Maximum length of 128.\n\nPattern: `^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`", "Value": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`" }, "AWS::Backup::RestoreTestingPlan": { "RecoveryPointSelection": "The specified criteria to assign a set of resources, such as recovery point types or backup vaults.", - "RestoreTestingPlanName": "This is the restore testing plan name.", + "RestoreTestingPlanName": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan. This cannot be changed after creation, and it must consist of only alphanumeric characters and underscores.", "ScheduleExpression": "A CRON expression in specified timezone when a restore testing plan is executed.", "ScheduleExpressionTimezone": "Optional. This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", "StartWindowHours": "Defaults to 24 hours.\n\nA value in hours after a restore test is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, this parameter has a maximum value of 168 hours (one week).", @@ -4121,7 +4164,7 @@ "ProtectedResourceConditions": "In a resource testing selection, this parameter filters by specific conditions such as `StringEquals` or `StringNotEquals` .", "ProtectedResourceType": "The type of AWS resource included in a resource testing selection; for example, an Amazon EBS volume or an Amazon RDS database.", "RestoreMetadataOverrides": "You can override certain restore metadata keys by including the parameter `RestoreMetadataOverrides` in the body of `RestoreTestingSelection` . Key values are not case sensitive.\n\nSee the complete list of [restore testing inferred metadata](https://docs.aws.amazon.com/aws-backup/latest/devguide/restore-testing-inferred-metadata.html) .", - "RestoreTestingPlanName": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan.", + "RestoreTestingPlanName": "Unique string that is the name of the restore testing plan.\n\nThe name cannot be changed after creation. The name must consist of only alphanumeric characters and underscores. Maximum length is 50.", "RestoreTestingSelectionName": "This is the unique name of the restore testing selection that belongs to the related restore testing plan.", "ValidationWindowHours": "This is amount of hours (1 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first." }, @@ -4198,6 +4241,7 @@ }, "AWS::Batch::JobDefinition": { "ContainerProperties": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", + "EcsProperties": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", "EksProperties": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", "JobDefinitionName": "The name of the job definition.", "NodeProperties": "An object with properties that are specific to multi-node parallel jobs. When `nodeProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `eksProperties` .\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", @@ -4244,6 +4288,21 @@ "HostPath": "The path for the device on the host container instance.", "Permissions": "The explicit permissions to provide to the container for the device. By default, the container has permissions for `read` , `write` , and `mknod` for the device." }, + "AWS::Batch::JobDefinition EcsProperties": { + "TaskProperties": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element." + }, + "AWS::Batch::JobDefinition EcsTaskProperties": { + "Containers": "This object is a list of containers.", + "EphemeralStorage": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "NetworkConfiguration": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "PlatformVersion": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "RuntimePlatform": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "TaskRoleArn": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "Volumes": "A list of volumes that are associated with the job." + }, "AWS::Batch::JobDefinition EfsVolumeConfiguration": { "AuthorizationConfig": "The authorization configuration details for the Amazon EFS file system.", "FileSystemId": "The Amazon EFS file system ID to use.", @@ -4349,14 +4408,18 @@ }, "AWS::Batch::JobDefinition NodeRangeProperty": { "Container": "The container details for the node range.", + "EcsProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, "AWS::Batch::JobDefinition PodProperties": { "Containers": "The properties of the container that's used on the Amazon EKS pod.", "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "ShareProcessNamespace": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", "Volumes": "Specifies the volumes for a job definition that uses Amazon EKS resources." }, "AWS::Batch::JobDefinition RepositoryCredentials": { @@ -4378,6 +4441,28 @@ "Name": "The name of the secret.", "ValueFrom": "The secret to expose to the container. The supported values are either the full Amazon Resource Name (ARN) of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.\n\n> If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full Amazon Resource Name (ARN) or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified." }, + "AWS::Batch::JobDefinition TaskContainerDependency": { + "Condition": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "ContainerName": "A unique identifier for the container." + }, + "AWS::Batch::JobDefinition TaskContainerProperties": { + "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "DependsOn": "A list of containers that this container depends on.", + "Environment": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "Essential": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "Name": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "Privileged": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "RepositoryCredentials": "The private repository authentication credentials to use.", + "ResourceRequirements": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "Secrets": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "Ulimits": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "User": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers." + }, "AWS::Batch::JobDefinition Timeout": { "AttemptDurationSeconds": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes." }, @@ -4402,6 +4487,7 @@ "AWS::Batch::JobQueue": { "ComputeEnvironmentOrder": "The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment runs a specific job. Compute environments must be in the `VALID` state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 ( `EC2` or `SPOT` ) or Fargate ( `FARGATE` or `FARGATE_SPOT` ); EC2 and Fargate compute environments can't be mixed.\n\n> All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.", "JobQueueName": "The name of the job queue. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).", + "JobStateTimeLimitActions": "The set of actions that AWS Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. AWS Batch will perform each action after `maxTimeSeconds` has passed.", "Priority": "The priority of the job queue. Job queues with a higher priority (or a higher integer value for the `priority` parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of `10` is given scheduling preference over a job queue with a priority value of `1` . All of the compute environments must be either EC2 ( `EC2` or `SPOT` ) or Fargate ( `FARGATE` or `FARGATE_SPOT` ); EC2 and Fargate compute environments can't be mixed.", "SchedulingPolicyArn": "The Amazon Resource Name (ARN) of the scheduling policy. The format is `aws: *Partition* :batch: *Region* : *Account* :scheduling-policy/ *Name*` . For example, `aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy` .", "State": "The state of the job queue. If the job queue state is `ENABLED` , it is able to accept jobs. If the job queue state is `DISABLED` , new jobs can't be added to the queue, but jobs already in the queue can finish.", @@ -4411,6 +4497,12 @@ "ComputeEnvironment": "The Amazon Resource Name (ARN) of the compute environment.", "Order": "The order of the compute environment. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower `order` integer value is tried for job placement first." }, + "AWS::Batch::JobQueue JobStateTimeLimitAction": { + "Action": "The action to take when a job is at the head of the job queue in the specified state for the specified period of time. The only supported value is `CANCEL` , which will cancel the job.", + "MaxTimeSeconds": "The approximate amount of time, in seconds, that must pass with the job in the specified state before the action is taken. The minimum value is 600 (10 minutes) and the maximum value is 86,400 (24 hours).", + "Reason": "The reason to log for the action being taken.", + "State": "The state of the job needed to trigger the action. The only supported value is `RUNNABLE` ." + }, "AWS::Batch::SchedulingPolicy": { "FairsharePolicy": "The fair share policy of the scheduling policy.", "Name": "The name of the scheduling policy. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).", @@ -5191,7 +5283,7 @@ }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightConfig": { "SessionStickinessConfig": "Session stickiness provides the ability to define multiple requests from a single viewer as a single session. This prevents the potentially inconsistent experience of sending some of a given user's requests to your staging distribution, while others are sent to your primary distribution. Define the session duration using TTL values.", - "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15." + "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightPolicyConfig": { "SessionStickinessConfig": "", @@ -5638,7 +5730,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -5680,7 +5772,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -5858,10 +5950,35 @@ "Key": "The tag key.", "Value": "The tag value." }, + "AWS::CodeArtifact::PackageGroup": { + "ContactInfo": "The contact information of the package group.", + "Description": "The description of the package group.", + "DomainName": "The domain that contains the package group.", + "DomainOwner": "The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.", + "OriginConfiguration": "Details about the package origin configuration of a package group.", + "Pattern": "The pattern of the package group. The pattern determines which packages are associated with the package group.", + "Tags": "A list of tags to be applied to the package group." + }, + "AWS::CodeArtifact::PackageGroup OriginConfiguration": { + "Restrictions": "The origin configuration settings that determine how package versions can enter repositories." + }, + "AWS::CodeArtifact::PackageGroup RestrictionType": { + "Repositories": "The repositories to add to the allowed repositories list. The allowed repositories list is used when the `RestrictionMode` is set to `ALLOW_SPECIFIC_REPOSITORIES` .", + "RestrictionMode": "The package group origin restriction setting. When the value is `INHERIT` , the value is set to the value of the first parent package group which does not have a value of `INHERIT` ." + }, + "AWS::CodeArtifact::PackageGroup Restrictions": { + "ExternalUpstream": "The package group origin restriction setting for external, upstream repositories.", + "InternalUpstream": "The package group origin restriction setting for internal, upstream repositories.", + "Publish": "The package group origin restriction setting for publishing packages." + }, + "AWS::CodeArtifact::PackageGroup Tag": { + "Key": "The tag key.", + "Value": "The tag value." + }, "AWS::CodeArtifact::Repository": { "Description": "A text description of the repository.", "DomainName": "The name of the domain that contains the repository.", - "ExternalConnections": "An array of external connections associated with the repository.", + "ExternalConnections": "An array of external connections associated with the repository. For more information, see [Supported external connection repositories](https://docs.aws.amazon.com/codeartifact/latest/ug/external-connection.html#supported-public-repositories) in the *CodeArtifact user guide* .", "PermissionsPolicyDocument": "The document that defines the resource policy that is set on a repository.", "RepositoryName": "The name of an upstream repository.", "Tags": "A list of tags to be applied to the repository.", @@ -6023,7 +6140,7 @@ "AWS::CodeBuild::Project WebhookFilter": { "ExcludeMatchedPattern": "Used to indicate that the `pattern` determines which webhook events do not trigger a build. If true, then a webhook event that does not match the `pattern` triggers a build. If false, then a webhook event that matches the `pattern` triggers a build.", "Pattern": "For a `WebHookFilter` that uses `EVENT` type, a comma-separated string that specifies one or more events. For example, the webhook filter `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` allows all push, pull request created, and pull request updated events to trigger a build.\n\nFor a `WebHookFilter` that uses any of the other filter types, a regular expression pattern. For example, a `WebHookFilter` that uses `HEAD_REF` for its `type` and the pattern `^refs/heads/` triggers a build when the head reference is a branch with a reference name `refs/heads/branch-name` .", - "Type": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events." + "Type": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only." }, "AWS::CodeBuild::ReportGroup": { "DeleteReports": "When deleting a report group, specifies if reports within the report group should be deleted.\n\n- **true** - Deletes any reports that belong to the report group before deleting the report group.\n- **false** - You must delete any reports in the report group. This is the default value. If you delete a report group that contains one or more reports, an exception is thrown.", @@ -6339,7 +6456,8 @@ "OutputArtifacts": "The name or ID of the result of the action declaration, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of output artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* .", "Region": "The action declaration's AWS Region, such as us-east-1.", "RoleArn": "The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline.", - "RunOrder": "The order in which actions are run." + "RunOrder": "The order in which actions are run.", + "TimeoutInMinutes": "A timeout duration in minutes that can be applied against the ActionType\u2019s default timeout value specified in [Quotas for AWS CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/limits.html) . This attribute is available only to the manual approval ActionType." }, "AWS::CodePipeline::Pipeline ActionTypeId": { "Category": "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.\n\n- `Source`\n- `Build`\n- `Test`\n- `Deploy`\n- `Invoke`\n- `Approval`", @@ -6570,7 +6688,7 @@ "AdminCreateUserConfig": "The configuration for creating a new user profile.", "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", - "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", @@ -6751,7 +6869,7 @@ "AWS::Cognito::UserPoolIdentityProvider": { "AttributeMapping": "A mapping of IdP attributes to standard and custom user pool attributes.", "IdpIdentifiers": "A list of IdP identifiers.", - "ProviderDetails": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "ProviderDetails": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "ProviderName": "The IdP name.", "ProviderType": "The IdP type.", "UserPoolId": "The user pool ID." @@ -7505,14 +7623,21 @@ "Key": "A valid security key in PEM format. For example:\n\n`\"-----BEGIN PUBLIC KEY-----\\ [a lot of characters] ----END PUBLIC KEY-----\"`\n\n*Minimum* : `1`\n\n*Maximum* : `1024`" }, "AWS::Connect::SecurityProfile": { + "AllowedAccessControlHierarchyGroupId": "The identifier of the hierarchy group that a security profile uses to restrict access to resources in Amazon Connect.", "AllowedAccessControlTags": "The list of tags that a security profile uses to restrict access to resources in Amazon Connect.", + "Applications": "", "Description": "The description of the security profile.", + "HierarchyRestrictedResources": "The list of resources that a security profile applies hierarchy restrictions to in Amazon Connect. Following are acceptable ResourceNames: `User` .", "InstanceArn": "The identifier of the Amazon Connect instance.", "Permissions": "Permissions assigned to the security profile. For a list of valid permissions, see [List of security profile permissions](https://docs.aws.amazon.com/connect/latest/adminguide/security-profile-list.html) .", "SecurityProfileName": "The name for the security profile.", "TagRestrictedResources": "The list of resources that a security profile applies tag restrictions to in Amazon Connect.", "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }." }, + "AWS::Connect::SecurityProfile Application": { + "ApplicationPermissions": "The permissions that the agent is granted on the application. Only the `ACCESS` permission is supported.", + "Namespace": "Namespace of the application that you want to give access to." + }, "AWS::Connect::SecurityProfile Tag": { "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" @@ -8002,7 +8127,7 @@ "RetainRule": "Information about the retention period for the snapshot archiving rule." }, "AWS::DLM::LifecyclePolicy CreateRule": { - "CronExpression": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see [Cron expressions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch User Guide* .", + "CronExpression": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see the [Cron expressions reference](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) in the *Amazon EventBridge User Guide* .", "Interval": "The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, 12, and 24.", "IntervalUnit": "The interval unit.", "Location": "*[Custom snapshot policies only]* Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify `CLOUD` . To create snapshots on the same Outpost as the source resource, specify `OUTPOST_LOCAL` . If you omit this parameter, `CLOUD` is used by default.\n\nIf the policy targets resources in an AWS Region , then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.", @@ -8137,31 +8262,6 @@ "Settings": "The settings in JSON format for a data provider.", "Tags": "" }, - "AWS::DMS::DataProvider MicrosoftSqlServerSettings": { - "CertificateArn": "", - "DatabaseName": "Database name for the endpoint.", - "Port": "Endpoint TCP port.", - "ServerName": "Fully qualified domain name of the endpoint. For an Amazon RDS SQL Server instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "SslMode": "" - }, - "AWS::DMS::DataProvider MySqlSettings": { - "CertificateArn": "", - "Port": "Endpoint TCP port.", - "ServerName": "The host name of the endpoint database.\n\nFor an Amazon RDS MySQL instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.\n\nFor an Aurora MySQL instance, this is the output of [DescribeDBClusters](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html) , in the `Endpoint` field.", - "SslMode": "" - }, - "AWS::DMS::DataProvider OracleSettings": { - "AsmServer": "For an Oracle source endpoint, your ASM server address. You can set this value from the `asm_server` value. You set `asm_server` as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see [Configuration for change data capture (CDC) on an Oracle source database](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration) .", - "CertificateArn": "", - "DatabaseName": "Database name for the endpoint.", - "Port": "Endpoint TCP port.", - "SecretsManagerOracleAsmAccessRoleArn": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the `SecretsManagerOracleAsmSecret` . This `SecretsManagerOracleAsmSecret` has the secret value that allows access to the Oracle ASM of the endpoint.\n\n> You can specify one of two sets of values for these permissions. You can specify the values for this setting and `SecretsManagerOracleAsmSecretId` . Or you can specify clear-text values for `AsmUser` , `AsmPassword` , and `AsmServerName` . You can't specify both. For more information on creating this `SecretsManagerOracleAsmSecret` and the `SecretsManagerOracleAsmAccessRoleArn` and `SecretsManagerOracleAsmSecretId` required to access it, see [Using secrets to access AWS Database Migration Service resources](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) in the *AWS Database Migration Service User Guide* .", - "SecretsManagerOracleAsmSecretId": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN, partial ARN, or friendly name of the `SecretsManagerOracleAsmSecret` that contains the Oracle ASM connection details for the Oracle endpoint.", - "SecretsManagerSecurityDbEncryptionAccessRoleArn": "", - "SecretsManagerSecurityDbEncryptionSecretId": "", - "ServerName": "Fully qualified domain name of the endpoint.\n\nFor an Amazon RDS Oracle instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "SslMode": "" - }, "AWS::DMS::DataProvider PostgreSqlSettings": { "CertificateArn": "", "DatabaseName": "Database name for the endpoint.", @@ -8170,9 +8270,6 @@ "SslMode": "" }, "AWS::DMS::DataProvider Settings": { - "MicrosoftSqlServerSettings": "", - "MySqlSettings": "", - "OracleSettings": "", "PostgreSqlSettings": "" }, "AWS::DMS::DataProvider Tag": { @@ -9067,8 +9164,8 @@ "VpcEndpointId": "The ID of the virtual private cloud (VPC) endpoint that the agent has access to. This is the client-side VPC endpoint, powered by AWS PrivateLink . If you don't have an AWS PrivateLink VPC endpoint, see [AWS PrivateLink and VPC endpoints](https://docs.aws.amazon.com//vpc/latest/userguide/endpoint-services-overview.html) in the *Amazon VPC User Guide* .\n\nFor more information about activating your agent in a private network based on a VPC, see [Using AWS DataSync in a Virtual Private Cloud](https://docs.aws.amazon.com/datasync/latest/userguide/datasync-in-vpc.html) in the *AWS DataSync User Guide.*\n\nA VPC endpoint ID looks like this: `vpce-01234d5aff67890e1` ." }, "AWS::DataSync::Agent Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationAzureBlob": { "AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for your transfer](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", @@ -9084,8 +9181,8 @@ "AzureBlobSasToken": "Specifies a SAS token that provides permissions to access your Azure Blob Storage.\n\nThe token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:\n\n`sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D`" }, "AWS::DataSync::LocationAzureBlob Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationEFS": { "AccessPointArn": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", @@ -9101,8 +9198,8 @@ "SubnetArn": "Specifies the ARN of a subnet where DataSync creates the [network interfaces](https://docs.aws.amazon.com/datasync/latest/userguide/datasync-network.html#required-network-interfaces) for managing traffic during your transfer.\n\nThe subnet must be located:\n\n- In the same virtual private cloud (VPC) as the Amazon EFS file system.\n- In the same Availability Zone as at least one mount target for the Amazon EFS file system.\n\n> You don't need to specify a subnet that includes a file system mount target." }, "AWS::DataSync::LocationEFS Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxLustre": { "FsxFilesystemArn": "The Amazon Resource Name (ARN) for the FSx for Lustre file system.", @@ -9111,8 +9208,8 @@ "Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location." }, "AWS::DataSync::LocationFSxLustre Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxONTAP": { "Protocol": "Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.", @@ -9141,8 +9238,8 @@ "Version": "By default, DataSync automatically chooses an SMB protocol version based on negotiation with your SMB file server. You also can configure DataSync to use a specific SMB version, but we recommend doing this only if DataSync has trouble negotiating with the SMB file server automatically.\n\nThese are the following options for configuring the SMB version:\n\n- `AUTOMATIC` (default): DataSync and the SMB file server negotiate the highest version of SMB that they mutually support between 2.1 and 3.1.1.\n\nThis is the recommended option. If you instead choose a specific version that your file server doesn't support, you may get an `Operation Not Supported` error.\n- `SMB3` : Restricts the protocol negotiation to only SMB version 3.0.2.\n- `SMB2` : Restricts the protocol negotiation to only SMB version 2.1.\n- `SMB2_0` : Restricts the protocol negotiation to only SMB version 2.0.\n- `SMB1` : Restricts the protocol negotiation to only SMB version 1.0.\n\n> The `SMB1` option isn't available when [creating an Amazon FSx for NetApp ONTAP location](https://docs.aws.amazon.com/datasync/latest/userguide/API_CreateLocationFsxOntap.html) ." }, "AWS::DataSync::LocationFSxONTAP Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxOpenZFS": { "FsxFilesystemArn": "The Amazon Resource Name (ARN) of the FSx for OpenZFS file system.", @@ -9161,8 +9258,8 @@ "NFS": "Represents the Network File System (NFS) protocol that DataSync uses to access your FSx for OpenZFS file system." }, "AWS::DataSync::LocationFSxOpenZFS Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationFSxWindows": { "Domain": "Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.\n\nIf you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.", @@ -9174,8 +9271,8 @@ "User": "The user who has the permissions to access files and folders in the FSx for Windows File Server file system.\n\nFor information about choosing a user name that ensures sufficient permissions to files, folders, and metadata, see [user](https://docs.aws.amazon.com/datasync/latest/userguide/create-fsx-location.html#FSxWuser) ." }, "AWS::DataSync::LocationFSxWindows Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationHDFS": { "AgentArns": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", @@ -9201,8 +9298,8 @@ "RpcProtection": "The Remote Procedure Call (RPC) protection setting configured on the HDFS cluster. This setting corresponds to your `hadoop.rpc.protection` setting in your `core-site.xml` file on your Hadoop cluster." }, "AWS::DataSync::LocationHDFS Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationNFS": { "MountOptions": "Specifies the options that DataSync can use to mount your NFS file server.", @@ -9218,8 +9315,8 @@ "AgentArns": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location." }, "AWS::DataSync::LocationNFS Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationObjectStorage": { "AccessKey": "Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server.", @@ -9234,8 +9331,8 @@ "Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. Tags can help you manage, filter, and search for your resources. We recommend creating a name tag for your location." }, "AWS::DataSync::LocationObjectStorage Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationS3": { "S3BucketArn": "The ARN of the Amazon S3 bucket.", @@ -9248,8 +9345,8 @@ "BucketAccessRoleArn": "Specifies the ARN of the IAM role that DataSync uses to access your S3 bucket." }, "AWS::DataSync::LocationS3 Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationSMB": { "AgentArns": "The Amazon Resource Names (ARNs) of agents to use for a Server Message Block (SMB) location.", @@ -9265,8 +9362,8 @@ "Version": "By default, DataSync automatically chooses an SMB protocol version based on negotiation with your SMB file server. You also can configure DataSync to use a specific SMB version, but we recommend doing this only if DataSync has trouble negotiating with the SMB file server automatically.\n\nThese are the following options for configuring the SMB version:\n\n- `AUTOMATIC` (default): DataSync and the SMB file server negotiate the highest version of SMB that they mutually support between 2.1 and 3.1.1.\n\nThis is the recommended option. If you instead choose a specific version that your file server doesn't support, you may get an `Operation Not Supported` error.\n- `SMB3` : Restricts the protocol negotiation to only SMB version 3.0.2.\n- `SMB2` : Restricts the protocol negotiation to only SMB version 2.1.\n- `SMB2_0` : Restricts the protocol negotiation to only SMB version 2.0.\n- `SMB1` : Restricts the protocol negotiation to only SMB version 1.0.\n\n> The `SMB1` option isn't available when [creating an Amazon FSx for NetApp ONTAP location](https://docs.aws.amazon.com/datasync/latest/userguide/API_CreateLocationFsxOntap.html) ." }, "AWS::DataSync::LocationSMB Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::StorageSystem": { "AgentArns": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.", @@ -9286,14 +9383,15 @@ "Username": "Specifies the user name for your storage system's management interface." }, "AWS::DataSync::StorageSystem Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::Task": { "CloudWatchLogGroupArn": "The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task.\n\nFor more information about how to use CloudWatch Logs with DataSync, see [Monitoring Your Task](https://docs.aws.amazon.com/datasync/latest/userguide/monitor-datasync.html#cloudwatchlogs) in the *AWS DataSync User Guide.*\n\nFor more information about these groups, see [Working with Log Groups and Log Streams](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) in the *Amazon CloudWatch Logs User Guide* .", "DestinationLocationArn": "The Amazon Resource Name (ARN) of an AWS storage resource's location.", "Excludes": "Specifies a list of filter rules that exclude specific data during your transfer. For more information and examples, see [Filtering data transferred by DataSync](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", "Includes": "Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see [Filtering data transferred by DataSync](https://docs.aws.amazon.com/datasync/latest/userguide/filtering.html) .", + "ManifestConfig": "The configuration of the manifest that lists the files or objects to transfer. For more information, see [Specifying what DataSync transfers by using a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html) .", "Name": "The name of a task. This value is a text reference that is used to identify the task in the console.", "Options": "Specifies the configuration options for a task. Some options include preserving file or object metadata and verifying data integrity.\n\nYou can also override these options before starting an individual run of a task (also known as a *task execution* ). For more information, see [StartTaskExecution](https://docs.aws.amazon.com/datasync/latest/userguide/API_StartTaskExecution.html) .", "Schedule": "Specifies a schedule used to periodically transfer files from a source to a destination location. The schedule should be specified in UTC time. For more information, see [Scheduling your task](https://docs.aws.amazon.com/datasync/latest/userguide/task-scheduling.html) .", @@ -9311,6 +9409,17 @@ "FilterType": "The type of filter rule to apply. AWS DataSync only supports the SIMPLE_PATTERN rule type.", "Value": "A single filter string that consists of the patterns to include or exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: `/folder1|/folder2`" }, + "AWS::DataSync::Task ManifestConfig": { + "Action": "Specifies what DataSync uses the manifest for.", + "Format": "Specifies the file format of your manifest. For more information, see [Creating a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-create) .", + "Source": "Specifies the manifest that you want DataSync to use and where it's hosted.\n\n> You must specify this parameter if you're configuring a new manifest on or after February 7, 2024.\n> \n> If you don't, you'll get a 400 status code and `ValidationException` error stating that you're missing the IAM role for DataSync to access the S3 bucket where you're hosting your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) ." + }, + "AWS::DataSync::Task ManifestConfigSourceS3": { + "BucketAccessRoleArn": "Specifies the AWS Identity and Access Management (IAM) role that allows DataSync to access your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) .", + "ManifestObjectPath": "Specifies the Amazon S3 object key of your manifest. This can include a prefix (for example, `prefix/my-manifest.csv` ).", + "ManifestObjectVersionId": "Specifies the object version ID of the manifest that you want DataSync to use. If you don't set this, DataSync uses the latest version of the object.", + "S3BucketArn": "Specifies the Amazon Resource Name (ARN) of the S3 bucket where you're hosting your manifest." + }, "AWS::DataSync::Task Options": { "Atime": "A file metadata value that shows the last time that a file was accessed (that is, when the file was read or written to). If you set `Atime` to `BEST_EFFORT` , AWS DataSync attempts to preserve the original `Atime` attribute on all source files (that is, the version before the PREPARING phase). However, `Atime` 's behavior is not fully standard across platforms, so AWS DataSync can only do this on a best-effort basis.\n\nDefault value: `BEST_EFFORT`\n\n`BEST_EFFORT` : Attempt to preserve the per-file `Atime` value (recommended).\n\n`NONE` : Ignore `Atime` .\n\n> If `Atime` is set to `BEST_EFFORT` , `Mtime` must be set to `PRESERVE` .\n> \n> If `Atime` is set to `NONE` , `Mtime` must also be `NONE` .", "BytesPerSecond": "A value that limits the bandwidth used by AWS DataSync . For example, if you want AWS DataSync to use a maximum of 1 MB, set this value to `1048576` (=1024*1024).", @@ -9334,17 +9443,15 @@ "Transferred": "Specifies the level of reporting for the files, objects, and directories that DataSync attempted to transfer.", "Verified": "Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify during your transfer." }, - "AWS::DataSync::Task S3": { - "BucketAccessRoleArn": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/creating-task-reports.html) .", - "S3BucketArn": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", - "Subdirectory": "Specifies a bucket prefix for your report." - }, "AWS::DataSync::Task Skipped": { "ReportLevel": "Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't.\n\n- `ERRORS_ONLY` : A report shows what DataSync was unable to skip.\n- `SUCCESSES_AND_ERRORS` : A report shows what DataSync was able and unable to skip." }, + "AWS::DataSync::Task Source": { + "S3": "Specifies the S3 bucket where you're hosting your manifest." + }, "AWS::DataSync::Task Tag": { - "Key": "", - "Value": "" + "Key": "The key for an AWS resource tag.", + "Value": "The value for an AWS resource tag." }, "AWS::DataSync::Task TaskReportConfig": { "Destination": "Specifies the Amazon S3 bucket where DataSync uploads your task report. For more information, see [Task reports](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html#task-report-access) .", @@ -9353,6 +9460,11 @@ "Overrides": "Customizes the reporting level for aspects of your task report. For example, your report might generally only include errors, but you could specify that you want a list of successes and errors just for the files that DataSync attempted to delete in your destination location.", "ReportLevel": "Specifies whether you want your task report to include only what went wrong with your transfer or a list of what succeeded and didn't.\n\n- `ERRORS_ONLY` : A report shows what DataSync was unable to transfer, skip, verify, and delete.\n- `SUCCESSES_AND_ERRORS` : A report shows what DataSync was able and unable to transfer, skip, verify, and delete." }, + "AWS::DataSync::Task TaskReportConfigDestinationS3": { + "BucketAccessRoleArn": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .", + "S3BucketArn": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", + "Subdirectory": "Specifies a bucket prefix for your report." + }, "AWS::DataSync::Task TaskSchedule": { "ScheduleExpression": "A cron expression that specifies when AWS DataSync initiates a scheduled transfer from a source to a destination location." }, @@ -9827,10 +9939,18 @@ "PointInTimeRecoverySpecification": "The settings used to enable point in time recovery. When not specified, defaults to point in time recovery disabled for the replica.", "ReadProvisionedThroughputSettings": "Defines read capacity settings for the replica table.", "Region": "The region in which this replica exists.", + "ReplicaStreamSpecification": "Represents the DynamoDB Streams configuration for a global table replica.", + "ResourcePolicy": "A resource-based policy document that contains permissions to add to the specified replica of a DynamoDB global table. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", "SSESpecification": "Allows you to specify a customer-managed key for the replica. When using customer-managed keys for server-side encryption, this property must have a value in all replicas.", "TableClass": "The table class of the specified table. Valid values are `STANDARD` and `STANDARD_INFREQUENT_ACCESS` .", "Tags": "An array of key-value pairs to apply to this replica.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::DynamoDB::GlobalTable ReplicaStreamSpecification": { + "ResourcePolicy": "A resource-based policy document that contains the permissions for the specified stream of a DynamoDB global table replica. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nYou can update the `ResourcePolicy` property if you've specified more than one table using the [AWS ::DynamoDB::GlobalTable](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-globaltable.html) resource." + }, + "AWS::DynamoDB::GlobalTable ResourcePolicy": { + "PolicyDocument": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, its indexes, and stream. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) ." + }, "AWS::DynamoDB::GlobalTable SSESpecification": { "SSEEnabled": "Indicates whether server-side encryption is performed using an AWS managed key or an AWS owned key. If enabled (true), server-side encryption type is set to KMS and an AWS managed key is used ( AWS KMS charges apply). If disabled (false) or not specified,server-side encryption is set to an AWS owned key. If you choose to use KMS encryption, you can also use customer managed KMS keys by specifying them in the `ReplicaSpecification.SSESpecification` object. You cannot mix AWS managed and customer managed KMS keys.", "SSEType": "Server-side encryption type. The only supported value is:\n\n- `KMS` - Server-side encryption that uses AWS Key Management Service . The key is stored in your account and is managed by AWS KMS ( AWS KMS charges apply)." @@ -9867,6 +9987,7 @@ "LocalSecondaryIndexes": "Local secondary indexes to be created on the table. You can create up to 5 local secondary indexes. Each index is scoped to a given hash key value. The size of each hash key can be up to 10 gigabytes.", "PointInTimeRecoverySpecification": "The settings used to enable point in time recovery.", "ProvisionedThroughput": "Throughput for the specified table, which consists of values for `ReadCapacityUnits` and `WriteCapacityUnits` . For more information about the contents of a provisioned throughput structure, see [Amazon DynamoDB Table ProvisionedThroughput](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ProvisionedThroughput.html) .\n\nIf you set `BillingMode` as `PROVISIONED` , you must specify this property. If you set `BillingMode` as `PAY_PER_REQUEST` , you cannot specify this property.", + "ResourcePolicy": "A resource-based policy document that contains permissions to add to the specified table. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nWhen you attach a resource-based policy while creating a table, the policy creation is *strongly consistent* . For information about the considerations that you should keep in mind while attaching a resource-based policy, see [Resource-based policy considerations](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) .", "SSESpecification": "Specifies the settings to enable server-side encryption.", "StreamSpecification": "The settings for the DynamoDB table stream, which capture changes to items stored in the table.", "TableClass": "The table class of the new table. Valid values are `STANDARD` and `STANDARD_INFREQUENT_ACCESS` .", @@ -9925,6 +10046,9 @@ "ReadCapacityUnits": "The maximum number of strongly consistent reads consumed per second before DynamoDB returns a `ThrottlingException` . For more information, see [Specifying Read and Write Requirements](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) in the *Amazon DynamoDB Developer Guide* .\n\nIf read/write capacity mode is `PAY_PER_REQUEST` the value is set to 0.", "WriteCapacityUnits": "The maximum number of writes consumed per second before DynamoDB returns a `ThrottlingException` . For more information, see [Specifying Read and Write Requirements](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) in the *Amazon DynamoDB Developer Guide* .\n\nIf read/write capacity mode is `PAY_PER_REQUEST` the value is set to 0." }, + "AWS::DynamoDB::Table ResourcePolicy": { + "PolicyDocument": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, index, or both. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) ." + }, "AWS::DynamoDB::Table S3BucketSource": { "S3Bucket": "The S3 bucket that is being imported from.", "S3BucketOwner": "The account number of the S3 bucket that is being imported from. If the bucket is owned by the requester this is optional.", @@ -9936,6 +10060,7 @@ "SSEType": "Server-side encryption type. The only supported value is:\n\n- `KMS` - Server-side encryption that uses AWS Key Management Service . The key is stored in your account and is managed by AWS KMS ( AWS KMS charges apply)." }, "AWS::DynamoDB::Table StreamSpecification": { + "ResourcePolicy": "Creates or updates a resource-based policy document that contains the permissions for DynamoDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", "StreamViewType": "When an item in the table is modified, `StreamViewType` determines what information is written to the stream for this table. Valid values for `StreamViewType` are:\n\n- `KEYS_ONLY` - Only the key attributes of the modified item are written to the stream.\n- `NEW_IMAGE` - The entire item, as it appears after it was modified, is written to the stream.\n- `OLD_IMAGE` - The entire item, as it appeared before it was modified, is written to the stream.\n- `NEW_AND_OLD_IMAGES` - Both the new and the old item images of the item are written to the stream." }, "AWS::DynamoDB::Table Tag": { @@ -10091,6 +10216,7 @@ "AWS::EC2::DHCPOptions": { "DomainName": "This value is used to complete unqualified DNS hostnames. If you're using AmazonProvidedDNS in `us-east-1` , specify `ec2.internal` . If you're using AmazonProvidedDNS in another Region, specify *region* . `compute.internal` (for example, `ap-northeast-1.compute.internal` ). Otherwise, specify a domain name (for example, *MyCompany.com* ).", "DomainNameServers": "The IPv4 addresses of up to four domain name servers, or `AmazonProvidedDNS` . The default is `AmazonProvidedDNS` . To have your instance receive a custom DNS hostname as specified in `DomainName` , you must set this property to a custom DNS server.", + "Ipv6AddressPreferredLeaseTime": "A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.", "NetbiosNameServers": "The IPv4 addresses of up to four NetBIOS name servers.", "NetbiosNodeType": "The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2 (broadcast and multicast are not currently supported).", "NtpServers": "The IPv4 addresses of up to four Network Time Protocol (NTP) servers.", @@ -10275,8 +10401,8 @@ }, "AWS::EC2::FlowLog": { "DeliverCrossAccountRole": "The ARN of the IAM role that allows the service to publish flow logs across accounts.", - "DeliverLogsPermissionArn": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", - "DestinationOptions": "The destination options. The following options are supported:\n\n- `FileFormat` - The format for the flow log ( `plain-text` | `parquet` ). The default is `plain-text` .\n- `HiveCompatiblePartitions` - Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3 ( `true` | `false` ). The default is `false` .\n- `PerHourPartition` - Indicates whether to partition the flow log per hour ( `true` | `false` ). The default is `false` .", + "DeliverLogsPermissionArn": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", + "DestinationOptions": "The destination options.", "LogDestination": "The destination for the flow log data. The meaning of this parameter depends on the destination type.\n\n- If the destination type is `cloud-watch-logs` , specify the ARN of a CloudWatch Logs log group. For example:\n\narn:aws:logs: *region* : *account_id* :log-group: *my_group*\n\nAlternatively, use the `LogGroupName` parameter.\n- If the destination type is `s3` , specify the ARN of an S3 bucket. For example:\n\narn:aws:s3::: *my_bucket* / *my_subfolder* /\n\nThe subfolder is optional. Note that you can't use `AWSLogs` as a subfolder name.\n- If the destination type is `kinesis-data-firehose` , specify the ARN of a Kinesis Data Firehose delivery stream. For example:\n\narn:aws:firehose: *region* : *account_id* :deliverystream: *my_stream*", "LogDestinationType": "The type of destination for the flow log data.\n\nDefault: `cloud-watch-logs`", "LogFormat": "The fields to include in the flow log record, in the order in which they should appear. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see [Flow log records](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) in the *Amazon VPC User Guide* or [Transit Gateway Flow Log records](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-flow-logs.html#flow-log-records) in the *AWS Transit Gateway Guide* .\n\nSpecify the fields using the `${field-id}` format, separated by spaces.", @@ -10605,7 +10731,7 @@ "SnapshotId": "The ID of the snapshot.", "Throughput": "The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. The following are the supported volumes sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", - "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* ." + "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* ." }, "AWS::EC2::LaunchTemplate ElasticGpuSpecification": { "Type": "The type of Elastic Graphics accelerator. For more information about the values to specify for `Type` , see [Elastic Graphics Basics](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html#elastic-graphics-basics) , specifically the Elastic Graphics accelerator column, in the *Amazon Elastic Compute Cloud User Guide for Windows Instances* ." @@ -11108,20 +11234,20 @@ "ConnectionTrackingSpecification": "A connection tracking specification for the network interface.", "Description": "A description for the network interface.", "EnablePrimaryIpv6": "If you\u2019re modifying a network interface in a dual-stack or IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if the instance that this ENI will be attached to relies on its IPv6 address not changing. AWS will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.", - "GroupSet": "The security group IDs associated with this network interface.", + "GroupSet": "The IDs of the security groups associated with this network interface.", "InterfaceType": "The type of network interface. The default is `interface` . The supported values are `efa` and `trunk` .", "Ipv4PrefixCount": "The number of IPv4 prefixes to be automatically assigned to the network interface.\n\nWhen creating a network interface, you can't specify a count of IPv4 prefixes if you've specified one of the following: specific IPv4 prefixes, specific private IPv4 addresses, or a count of private IPv4 addresses.", "Ipv4Prefixes": "The IPv4 delegated prefixes that are assigned to the network interface.\n\nWhen creating a network interface, you can't specify IPv4 prefixes if you've specified one of the following: a count of IPv4 prefixes, specific private IPv4 addresses, or a count of private IPv4 addresses.", - "Ipv6AddressCount": "The number of IPv6 addresses to assign to a network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", - "Ipv6Addresses": "One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet to associate with the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "Ipv6AddressCount": "The number of IPv6 addresses to assign to the network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "Ipv6Addresses": "The IPv6 addresses from the IPv6 CIDR block range of your subnet to assign to the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", "Ipv6PrefixCount": "The number of IPv6 prefixes to be automatically assigned to the network interface.\n\nWhen creating a network interface, you can't specify a count of IPv6 prefixes if you've specified one of the following: specific IPv6 prefixes, specific IPv6 addresses, or a count of IPv6 addresses.", "Ipv6Prefixes": "The IPv6 delegated prefixes that are assigned to the network interface.\n\nWhen creating a network interface, you can't specify IPv6 prefixes if you've specified one of the following: a count of IPv6 prefixes, specific IPv6 addresses, or a count of IPv6 addresses.", - "PrivateIpAddress": "Assigns a single private IP address to the network interface, which is used as the primary private IP address. If you want to specify multiple private IP address, use the `PrivateIpAddresses` property.", - "PrivateIpAddresses": "Assigns private IP addresses to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", + "PrivateIpAddress": "The private IPv4 address to assign to the network interface as the primary private IP address. If you want to specify multiple private IP addresses, use the `PrivateIpAddresses` property.", + "PrivateIpAddresses": "The private IPv4 addresses to assign to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", "SecondaryPrivateIpAddressCount": "The number of secondary private IPv4 addresses to assign to a network interface. When you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses within the subnet's IPv4 CIDR range. You can't specify this option and specify more than one private IP address using `privateIpAddresses` .\n\nWhen creating a Network Interface, you can't specify a count of private IPv4 addresses if you've specified one of the following: specific private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", "SourceDestCheck": "Enable or disable source/destination checks, which ensure that the instance is either the source or the destination of any traffic that it receives. If the value is `true` , source/destination checks are enabled; otherwise, they are disabled. The default value is `true` . You must disable source/destination checks if the instance runs services such as network address translation, routing, or firewalls.", "SubnetId": "The ID of the subnet to associate with the network interface.", - "Tags": "An arbitrary set of tags (key-value pairs) for this network interface." + "Tags": "The tags to apply to the network interface." }, "AWS::EC2::NetworkInterface ConnectionTrackingSpecification": { "TcpEstablishedTimeout": "Timeout (in seconds) for idle TCP connections in an established state. Min: 60 seconds. Max: 432000 seconds (5 days). Default: 432000 seconds. Recommended: Less than 432000 seconds.", @@ -11229,18 +11355,19 @@ "VpcId": "The ID of the VPC for the security group. If you do not specify a VPC, the default is to use the default VPC for the Region. If there's no specified VPC and no default VPC, security group creation fails." }, "AWS::EC2::SecurityGroup Egress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "A description for the security group rule.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", - "DestinationPrefixListId": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", - "DestinationSecurityGroupId": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "DestinationPrefixListId": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", + "DestinationSecurityGroupId": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", + "SourceSecurityGroupId": "", "ToPort": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes)." }, "AWS::EC2::SecurityGroup Ingress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "Updates the description of an ingress (inbound) security group rule. You can replace an existing description, or add a description to a rule that did not have one previously.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", @@ -11255,19 +11382,19 @@ "Value": "The tag value." }, "AWS::EC2::SecurityGroupEgress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "The description of an egress (outbound) security group rule.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", - "DestinationPrefixListId": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", - "DestinationSecurityGroupId": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "DestinationPrefixListId": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", + "DestinationSecurityGroupId": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "GroupId": "The ID of the security group. You must specify either the security group ID or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID.", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", "ToPort": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes)." }, "AWS::EC2::SecurityGroupIngress": { - "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", - "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIp": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "CidrIpv6": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "Description": "Updates the description of an ingress (inbound) security group rule. You can replace an existing description, or add a description to a rule that did not have one previously.\n\nConstraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*", "FromPort": "The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type number. A value of `-1` indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you must specify all codes.\n\nUse this for ICMP and any protocol that uses ports.", "GroupId": "The ID of the security group.", @@ -11315,7 +11442,7 @@ "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nFor `io2` volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . On other instances, you can achieve performance up to 32,000 IOPS.\n\nThis parameter is required for `io1` and `io2` volumes. The default for `gp3` volumes is 3,000 IOPS.", "SnapshotId": "The ID of the snapshot.", "VolumeSize": "The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size.\n\nThe following are the supported sizes for each volume type:\n\n- `gp2` and `gp3` : 1 - 16,384 GiB\n- `io1` : 4 - 16,384 GiB\n- `io2` : 4 - 65,536 GiB\n- `st1` and `sc1` : 125 - 16,384 GiB\n- `standard` : 1 - 1024 GiB", - "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* ." + "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* ." }, "AWS::EC2::SpotFleet FleetLaunchTemplateSpecification": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -11944,7 +12071,7 @@ "AWS::EC2::Volume": { "AutoEnableIO": "Indicates whether the volume is auto-enabled for I/O operations. By default, Amazon EBS disables I/O to the volume from attached EC2 instances when it determines that a volume's data is potentially inconsistent. If the consistency of the volume is not a concern, and you prefer that the volume be made available immediately if it's impaired, you can configure the volume to automatically enable I/O.", "AvailabilityZone": "The ID of the Availability Zone in which to create the volume. For example, `us-east-1a` .", - "Encrypted": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "Encrypted": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "Iops": "The number of I/O operations per second (IOPS). For `gp3` , `io1` , and `io2` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000 - 16,000 IOPS\n- `io1` : 100 - 64,000 IOPS\n- `io2` : 100 - 256,000 IOPS\n\nFor `io2` volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . On other instances, you can achieve performance up to 32,000 IOPS.\n\nThis parameter is required for `io1` and `io2` volumes. The default for `gp3` volumes is 3,000 IOPS. This parameter is not supported for `gp2` , `st1` , `sc1` , or `standard` volumes.", "KmsKeyId": "The identifier of the AWS KMS key to use for Amazon EBS encryption. If `KmsKeyId` is specified, the encrypted state must be `true` .\n\nIf you omit this property and your account is enabled for encryption by default, or *Encrypted* is set to `true` , then the volume is encrypted using the default key specified for your account. If your account does not have a default key, then the volume is encrypted using the AWS managed key .\n\nAlternatively, if you want to specify a different key, you can specify one of the following:\n\n- Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.\n- Key alias. Specify the alias for the key, prefixed with `alias/` . For example, for a key with the alias `my_cmk` , use `alias/my_cmk` . Or to specify the AWS managed key , use `alias/aws/ebs` .\n- Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.\n- Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.", "MultiAttachEnabled": "Indicates whether Amazon EBS Multi-Attach is enabled.\n\nAWS CloudFormation does not currently support updating a single-attach volume to be multi-attach enabled, updating a multi-attach enabled volume to be single-attach, or updating the size or number of I/O operations per second (IOPS) of a multi-attach enabled volume.", @@ -11953,7 +12080,7 @@ "SnapshotId": "The snapshot from which to create the volume. You must specify either a snapshot ID or a volume size.", "Tags": "The tags to apply to the volume during creation.", "Throughput": "The throughput to provision for a volume, with a maximum of 1,000 MiB/s.\n\nThis parameter is valid only for `gp3` volumes. The default value is 125.\n\nValid Range: Minimum value of 125. Maximum value of 1000.", - "VolumeType": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`" + "VolumeType": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`" }, "AWS::EC2::Volume Tag": { "Key": "The tag key.", @@ -12103,7 +12230,7 @@ "AWS::ECS::ClusterCapacityProviderAssociations CapacityProviderStrategy": { "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", "CapacityProvider": "The short name of the capacity provider.", - "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* ." + "Weight": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* ." }, "AWS::ECS::PrimaryTaskSet": { "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task set exists in.", @@ -12125,7 +12252,7 @@ "PlacementConstraints": "An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the task definition and those specified at runtime.", "PlacementStrategies": "The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules for each service.", "PlatformVersion": "The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the `LATEST` platform version is used. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", - "PropagateTags": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nThe default is `NONE` .", + "PropagateTags": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nYou must set this to a value other than `NONE` when you use Cost Explorer. For more information, see [Amazon ECS usage reports](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/usage-reports.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe default is `NONE` .", "Role": "The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the `awsvpc` network mode. If you specify the `role` parameter, you must also specify a load balancer object with the `loadBalancers` parameter.\n\n> If your account has already created the Amazon ECS service-linked role, that role is used for your service unless you specify a role here. The service-linked role is required if your task definition uses the `awsvpc` network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you don't specify a role here. For more information, see [Using service-linked roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nIf your specified role has a path other than `/` , then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name `bar` has a path of `/foo/` then you would specify `/foo/bar` as the role name. For more information, see [Friendly names and paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) in the *IAM User Guide* .", "SchedulingStrategy": "The scheduling strategy to use for the service. For more information, see [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nThere are two service scheduler strategies available:\n\n- `REPLICA` -The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service uses the `CODE_DEPLOY` or `EXTERNAL` deployment controller types.\n- `DAEMON` -The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that don't meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.\n\n> Tasks using the Fargate launch type or the `CODE_DEPLOY` or `EXTERNAL` deployment controller types don't support the `DAEMON` scheduling strategy.", "ServiceConnectConfiguration": "The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", @@ -12187,7 +12314,7 @@ "Type": "The type of constraint. Use `distinctInstance` to ensure that each task in a particular group is running on a different container instance. Use `memberOf` to restrict the selection to a group of valid candidates." }, "AWS::ECS::Service PlacementStrategy": { - "Field": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "Field": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "Type": "The type of placement strategy. The `random` placement strategy randomly places tasks on available candidates. The `spread` placement strategy spreads placement across available candidates evenly based on the `field` parameter. The `binpack` strategy places tasks on available candidates that have the least available amount of the resource that's specified with the `field` parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory but still enough to run the task." }, "AWS::ECS::Service Secret": { @@ -12339,11 +12466,11 @@ "TransitEncryptionPort": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* ." }, "AWS::ECS::TaskDefinition EnvironmentFile": { - "Type": "The file type to use. The only supported value is `s3` .", + "Type": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "Value": "The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file." }, "AWS::ECS::TaskDefinition EphemeralStorage": { - "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB." + "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." }, "AWS::ECS::TaskDefinition FirelensConfiguration": { "Options": "The options to use when configuring the log router. This field is optional and can be used to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event.\n\nIf specified, valid option keys are:\n\n- `enable-ecs-log-metadata` , which can be `true` or `false`\n- `config-file-type` , which can be `s3` or `file`\n- `config-file-value` , which is either an S3 ARN or a file path", @@ -12701,20 +12828,20 @@ "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." }, "AWS::EKS::Nodegroup": { - "AmiType": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "AmiType": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "CapacityType": "The capacity type of your managed node group.", "ClusterName": "The name of your cluster.", - "DiskSize": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "DiskSize": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "ForceUpdateEnabled": "Force the update if any `Pod` on the existing node group can't be drained due to a `Pod` disruption budget issue. If an update fails because all Pods can't be drained, you can force the update after it fails to terminate the old node whether or not any `Pod` is running on the node.", - "InstanceTypes": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "InstanceTypes": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "Labels": "The Kubernetes `labels` applied to the nodes in the node group.\n\n> Only `labels` that are applied with the Amazon EKS API are shown here. There may be other Kubernetes `labels` applied to the nodes in this group.", - "LaunchTemplate": "An object representing a node group's launch template specification. If specified, then do not specify `instanceTypes` , `diskSize` , or `remoteAccess` and make sure that the launch template meets the requirements in `launchTemplateSpecification` .", - "NodeRole": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "LaunchTemplate": "An object representing a node group's launch template specification. When using this object, don't directly specify `instanceTypes` , `diskSize` , or `remoteAccess` . Make sure that the launch template meets the requirements in `launchTemplateSpecification` . Also refer to [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "NodeRole": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "NodegroupName": "The unique name to give your node group.", "ReleaseVersion": "The AMI version of the Amazon EKS optimized AMI to use with your node group (for example, `1.14.7- *YYYYMMDD*` ). By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see [Amazon EKS optimized Linux AMI Versions](https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) in the *Amazon EKS User Guide* .\n\n> Changing this value triggers an update of the node group if one is available. You can't update other properties at the same time as updating `Release Version` .", - "RemoteAccess": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "RemoteAccess": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "ScalingConfig": "The scaling configuration details for the Auto Scaling group that is created for your node group.", - "Subnets": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "Subnets": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "Tags": "Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or AWS resources.", "Taints": "The Kubernetes taints to be applied to the nodes in the node group when they are created. Effect is one of `No_Schedule` , `Prefer_No_Schedule` , or `No_Execute` . Kubernetes taints can be used together with tolerations to control how workloads are scheduled to your nodes. For more information, see [Node taints on managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/node-taints-managed-node-groups.html) .", "UpdateConfig": "The node group update configuration.", @@ -12771,7 +12898,7 @@ "LogEncryptionKmsKeyId": "The AWS KMS key used for encrypting log files. This attribute is only available with Amazon EMR 5.30.0 and later, excluding Amazon EMR 6.0.0.", "LogUri": "The path to the Amazon S3 location where logs for this cluster are stored.", "ManagedScalingPolicy": "Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as Amazon EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.", - "Name": "The name of the cluster.", + "Name": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "OSReleaseLabel": "The Amazon Linux release specified in a cluster launch RunJobFlow request. If no Amazon Linux release was specified, the default Amazon Linux release is shown in the response.", "PlacementGroupConfigs": "", "ReleaseLabel": "The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form `emr-x.x.x` , where x.x.x is an Amazon EMR release version such as `emr-5.14.0` . For more information about Amazon EMR release versions and included application versions and features, see [](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/) . The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use `AmiVersion` .", @@ -12887,7 +13014,7 @@ "TaskInstanceFleets": "Describes the EC2 instances and instance configurations for the task instance fleets when using clusters with the instance fleet configuration. These task instance fleets are added to the cluster as part of the cluster launch. Each task instance fleet must have a unique name specified so that CloudFormation can differentiate between the task instance fleets.\n\n> You can currently specify only one task instance fleet for a cluster. After creating the cluster, you can only modify the mutable properties of `InstanceFleetConfig` , which are `TargetOnDemandCapacity` and `TargetSpotCapacity` . Modifying any other property results in cluster replacement. > To allow a maximum of 30 Amazon EC2 instance types per fleet, include `TaskInstanceFleets` when you create your cluster. If you create your cluster without `TaskInstanceFleets` , Amazon EMR uses its default allocation strategy, which allows for a maximum of five Amazon EC2 instance types.", "TaskInstanceGroups": "Describes the EC2 instances and instance configurations for task instance groups when using clusters with the uniform instance group configuration. These task instance groups are added to the cluster as part of the cluster launch. Each task instance group must have a unique name specified so that CloudFormation can differentiate between the task instance groups.\n\n> After creating the cluster, you can only modify the mutable properties of `InstanceGroupConfig` , which are `AutoScalingPolicy` and `InstanceCount` . Modifying any other property results in cluster replacement.", "TerminationProtected": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", - "UnhealthyNodeReplacement": "" + "UnhealthyNodeReplacement": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster." }, "AWS::EMR::Cluster KerberosAttributes": { "ADDomainJoinPassword": "The Active Directory password for `ADDomainJoinUser` .", @@ -13825,7 +13952,7 @@ "Type": "The type of load balancer. The default is `application` ." }, "AWS::ElasticLoadBalancingV2::LoadBalancer LoadBalancerAttribute": { - "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "Key": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", "Value": "The value of the attribute." }, "AWS::ElasticLoadBalancingV2::LoadBalancer SubnetMapping": { @@ -14674,7 +14801,7 @@ }, "AWS::FSx::FileSystem DiskIopsConfiguration": { "Iops": "The total number of SSD IOPS provisioned for the file system.\n\nThe minimum and maximum values for this property depend on the value of `HAPairs` and `StorageCapacity` . The minimum value is calculated as `StorageCapacity` * 3 * `HAPairs` (3 IOPS per GB of `StorageCapacity` ). The maximum value is calculated as 200,000 * `HAPairs` .\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) if the value of `Iops` is outside of the minimum or maximum values.", - "Mode": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value." + "Mode": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value." }, "AWS::FSx::FileSystem LustreConfiguration": { "AutoImportPolicy": "(Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use this property to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. `AutoImportPolicy` can have the following values:\n\n- `NONE` - (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or changed objects after choosing this option.\n- `NEW` - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that do not currently exist in the FSx file system.\n- `NEW_CHANGED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose this option.\n- `NEW_CHANGED_DELETED` - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.\n\nFor more information, see [Automatically import updates from your S3 bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html) .\n\n> This parameter is not supported for Lustre file systems with a data repository association.", @@ -14700,11 +14827,11 @@ "DiskIopsConfiguration": "The SSD IOPS configuration for the FSx for ONTAP file system.", "EndpointIpAddressRange": "(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC\u2019s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.", "FsxAdminPassword": "The ONTAP administrative password for the `fsxadmin` user with which you administer your file system using the NetApp ONTAP CLI and REST API.", - "HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "PreferredSubnetId": "Required when `DeploymentType` is set to `MULTI_AZ_1` . This specifies the subnet in which you want the preferred file server to be located.", "RouteTableIds": "(Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.\n\n> Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with `Key: AmazonFSx; Value: ManagedByAmazonFSx` . When creating FSx for ONTAP Multi-AZ file systems using AWS CloudFormation we recommend that you add the `Key: AmazonFSx; Value: ManagedByAmazonFSx` tag manually.", "ThroughputCapacity": "Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see [Managing throughput capacity](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-throughput-capacity.html) in the FSx for ONTAP User Guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value.\n- The value of `ThroughputCapacity` when divided by the value of `HAPairs` is outside of the valid range for `ThroughputCapacity` .", - "ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "WeeklyMaintenanceStartTime": "A recurring weekly time, in the format `D:HH:MM` .\n\n`D` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see [the ISO-8601 spec as described on Wikipedia](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date) .\n\n`HH` is the zero-padded hour of the day (0-23), and `MM` is the zero-padded minute of the hour.\n\nFor example, `1:05:00` specifies maintenance at 5 AM Monday." }, "AWS::FSx::FileSystem OpenZFSConfiguration": { @@ -15441,6 +15568,7 @@ "CrawlerSecurityConfiguration": "The name of the `SecurityConfiguration` structure to be used by this crawler.", "DatabaseName": "The name of the database in which the crawler's output is stored.", "Description": "A description of the crawler.", + "LakeFormationConfiguration": "Specifies whether the crawler should use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.", "Name": "The name of the crawler.", "RecrawlPolicy": "A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.", "Role": "The Amazon Resource Name (ARN) of an IAM role that's used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.", @@ -15477,6 +15605,10 @@ "Exclusions": "A list of glob patterns used to exclude from the crawl. For more information, see [Catalog Tables with a Crawler](https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html) .", "Path": "The path of the JDBC target." }, + "AWS::Glue::Crawler LakeFormationConfiguration": { + "AccountId": "Required for cross account crawls. For same account crawls as the target data, this can be left as null.", + "UseLakeFormationCredentials": "Specifies whether to use AWS Lake Formation credentials for the crawler instead of the IAM role credentials." + }, "AWS::Glue::Crawler MongoDBTarget": { "ConnectionName": "The name of the connection to use to connect to the Amazon DocumentDB or MongoDB target.", "Path": "The path of the Amazon DocumentDB or MongoDB target (database/collection)." @@ -15869,7 +16001,7 @@ "CatalogId": "The catalog ID of the table.", "DatabaseName": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", "TableName": "The table name. For Hive compatibility, this must be entirely lowercase.", - "TableOptimizerConfiguration": "", + "TableOptimizerConfiguration": "Specifies configuration details of a table optimizer.", "Type": "The type of table optimizer. Currently, the only valid value is compaction." }, "AWS::Glue::TableOptimizer TableOptimizerConfiguration": { @@ -17430,7 +17562,9 @@ }, "AWS::InternetMonitor::Monitor": { "HealthEventsConfig": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", + "IncludeLinkedAccounts": "", "InternetMeasurementsLogDelivery": "Publish internet measurements for a monitor for all city-networks (up to the 500,000 service limit) to another location, such as an Amazon S3 bucket. Measurements are also published to Amazon CloudWatch Logs for the first 500 (by traffic volume) city-networks (client locations and ASNs, typically internet service providers or ISPs).", + "LinkedAccountId": "", "MaxCityNetworksToMonitor": "The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network, such as an internet service provider, that clients access the resources through.\n\nFor more information, see [Choosing a city-network maximum value](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/IMCityNetworksMaximum.html) in *Using Amazon CloudWatch Internet Monitor* .", "MonitorName": "The name of the monitor. A monitor name can contain only alphanumeric characters, dashes (-), periods (.), and underscores (_).", "Resources": "The resources that have been added for the monitor, listed by their Amazon Resource Names (ARNs). Use this option to add or remove resources when making an update.\n\n> Be aware that if you include content in the `Resources` field when you update a monitor, the `ResourcesToAdd` and `ResourcesToRemove` fields must be empty.", @@ -18980,17 +19114,17 @@ }, "AWS::IoTSiteWise::AccessPolicy": { "AccessPolicyIdentity": "The identity for this access policy. Choose an IAM Identity Center user, an IAM Identity Center group, or an IAM user.", - "AccessPolicyPermission": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "AccessPolicyPermission": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "AccessPolicyResource": "The AWS IoT SiteWise Monitor resource for this access policy. Choose either a portal or a project." }, "AWS::IoTSiteWise::AccessPolicy AccessPolicyIdentity": { "IamRole": "An IAM role identity.", "IamUser": "An IAM user identity.", - "User": "The IAM Identity Center user to which this access policy maps." + "User": "An IAM Identity Center user identity." }, "AWS::IoTSiteWise::AccessPolicy AccessPolicyResource": { - "Portal": "The AWS IoT SiteWise Monitor portal for this access policy.", - "Project": "The AWS IoT SiteWise Monitor project for this access policy." + "Portal": "Identifies an AWS IoT SiteWise Monitor portal.", + "Project": "Identifies a specific AWS IoT SiteWise Monitor project." }, "AWS::IoTSiteWise::AccessPolicy IamRole": { "arn": "The ARN of the IAM role. For more information, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in the *IAM User Guide* ." @@ -19005,62 +19139,78 @@ "id": "The ID of the project." }, "AWS::IoTSiteWise::AccessPolicy User": { - "id": "The ID of the user." + "id": "The IAM Identity Center ID of the user." }, "AWS::IoTSiteWise::Asset": { - "AssetDescription": "A description for the asset.", - "AssetHierarchies": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "AssetDescription": "The ID of the asset, in UUID format.", + "AssetExternalId": "The external ID of the asset model composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "AssetHierarchies": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "AssetModelId": "The ID of the asset model from which to create the asset. This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", - "AssetName": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "AssetName": "A friendly name for the asset.", "AssetProperties": "The list of asset properties for the asset.\n\nThis object doesn't include properties that you define in composite models. You can find composite model properties in the `assetCompositeModels` object.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Asset AssetHierarchy": { "ChildAssetId": "The Id of the child asset.", - "LogicalId": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` ." + "ExternalId": "The external ID of the hierarchy, if it has one. When you update an asset hierarchy, you may assign an external ID if it doesn't already have one. You can't change the external ID of an asset hierarchy that already has one. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Id": "The ID of the hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set.", + "LogicalId": "The ID of the hierarchy. This ID is a `hierarchyId` ." }, "AWS::IoTSiteWise::Asset AssetProperty": { - "Alias": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", - "LogicalId": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "NotificationState": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "Alias": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", + "ExternalId": "The external ID of the property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Id": "The ID of the asset property.\n\n> This is a return value and can't be set.", + "LogicalId": "The `LogicalID` of the asset property.", + "NotificationState": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "Unit": "The unit (such as `Newtons` or `RPM` ) of the asset property." }, "AWS::IoTSiteWise::Asset Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::AssetModel": { - "AssetModelCompositeModels": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "AssetModelCompositeModels": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "AssetModelDescription": "A description for the asset model.", - "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", - "AssetModelName": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "AssetModelProperties": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "AssetModelExternalId": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "AssetModelName": "A unique, friendly name for the asset model.", + "AssetModelProperties": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "AssetModelType": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::AssetModel AssetModelCompositeModel": { + "ComposedAssetModelId": "The ID of a component model which is reused to create this composite model.", "CompositeModelProperties": "The asset property definitions for this composite model.", "Description": "The description of the composite model.", + "ExternalId": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Id": "The ID of the asset model composite model.\n\n> This is a return value and can't be set.", "Name": "The name of the composite model.", + "ParentAssetModelCompositeModelExternalId": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", + "Path": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", "Type": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` ." }, "AWS::IoTSiteWise::AssetModel AssetModelHierarchy": { - "ChildAssetModelId": "The Id of the asset model.", - "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", - "Name": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` ." + "ChildAssetModelId": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "ExternalId": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Id": "The ID of the asset model hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set. \n\n- If you are callling [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) to create a *new* hierarchy: You can specify its ID here, if desired. AWS IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.\n- If you are calling UpdateAssetModel to modify an *existing* hierarchy: This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", + "LogicalId": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", + "Name": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation." }, "AWS::IoTSiteWise::AssetModel AssetModelProperty": { - "DataType": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "DataType": "The data type of the asset model property.", "DataTypeSpec": "The data type of the structure for this property. This parameter exists on properties that have the `STRUCT` data type.", - "LogicalId": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", - "Name": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", - "Type": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "ExternalId": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "Id": "The ID of the property.\n\n> This is a return value and can't be set.", + "LogicalId": "The `LogicalID` of the asset model property.", + "Name": "The name of the asset model property.", + "Type": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "Unit": "The unit of the asset model property, such as `Newtons` or `RPM` ." }, "AWS::IoTSiteWise::AssetModel Attribute": { "DefaultValue": "The default value of the asset model property attribute. All assets that you create from the asset model contain this attribute value. You can update an attribute's value after you create an asset. For more information, see [Updating attribute values](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/update-attribute-values.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::AssetModel ExpressionVariable": { - "Name": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "Name": "The friendly name of the variable to be used in the expression.", "Value": "The variable that identifies an asset property from which to use values." }, "AWS::IoTSiteWise::AssetModel Metric": { @@ -19071,15 +19221,18 @@ "AWS::IoTSiteWise::AssetModel MetricWindow": { "Tumbling": "The tumbling time interval window." }, + "AWS::IoTSiteWise::AssetModel PropertyPathDefinition": { + "Name": "The name of the path segment." + }, "AWS::IoTSiteWise::AssetModel PropertyType": { - "Attribute": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", - "Metric": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", - "Transform": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "Attribute": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", + "Metric": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", + "Transform": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "TypeName": "The type of property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` ." }, "AWS::IoTSiteWise::AssetModel Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::AssetModel Transform": { "Expression": "The mathematical expression that defines the transformation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.\n\nFor more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", @@ -19090,8 +19243,13 @@ "Offset": "The offset for the tumbling window. The `offset` parameter accepts the following:\n\n- The offset time.\n\nFor example, if you specify `18h` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.\n- If you create the metric after 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) the next day.\n- The ISO 8601 format.\n\nFor example, if you specify `PT18H` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) on the day when you create the metric.\n- If you create the metric after 6 PM (UTC), you get the first aggregation result at 6 PM (UTC) the next day.\n- The 24-hour clock.\n\nFor example, if you specify `00:03:00` for `offset` , `5m` for `interval` , and you create the metric at 2 PM (UTC), you get the first aggregation result at 2:03 PM (UTC). You get the second aggregation result at 2:08 PM (UTC).\n- The offset time zone.\n\nFor example, if you specify `2021-07-23T18:00-08` for `offset` and `1d` for `interval` , AWS IoT SiteWise aggregates data in one of the following ways:\n\n- If you create the metric before or at 6 PM (PST), you get the first aggregation result at 6 PM (PST) on the day when you create the metric.\n- If you create the metric after 6 PM (PST), you get the first aggregation result at 6 PM (PST) the next day." }, "AWS::IoTSiteWise::AssetModel VariableValue": { - "HierarchyLogicalId": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", - "PropertyLogicalId": "The `LogicalID` of the property to use as the variable." + "HierarchyExternalId": "The external ID of the hierarchy being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "HierarchyId": "The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\nYou use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same `propertyId` . For example, you might have separately grouped assets that come from the same asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "HierarchyLogicalId": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "PropertyExternalId": "The external ID of the property being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "PropertyId": "The ID of the property to use as the variable. You can use the property `name` if it's from the same asset model. If the property has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> This is a return value and can't be set.", + "PropertyLogicalId": "The `LogicalID` of the property that is being referenced.", + "PropertyPath": "The path of the property. Each step of the path is the name of the step. See the following example:\n\n`PropertyPath: Name: AssetModelName Name: Composite1 Name: NestedComposite`" }, "AWS::IoTSiteWise::Dashboard": { "DashboardDefinition": "The dashboard definition specified in a JSON literal. For detailed information, see [Creating dashboards (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-dashboards-using-aws-cli.html) in the *AWS IoT SiteWise User Guide* .", @@ -19101,37 +19259,41 @@ "Tags": "A list of key-value pairs that contain metadata for the dashboard. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Dashboard Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Gateway": { "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", - "GatewayName": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "GatewayName": "A unique, friendly name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Gateway GatewayCapabilitySummary": { "CapabilityConfiguration": "The JSON document that defines the configuration for the gateway capability. For more information, see [Configuring data sources (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/configure-sources.html#configure-source-cli) in the *AWS IoT SiteWise User Guide* .", - "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` ." + "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` ." }, "AWS::IoTSiteWise::Gateway GatewayPlatform": { "Greengrass": "A gateway that runs on AWS IoT Greengrass .", - "GreengrassV2": "A gateway that runs on AWS IoT Greengrass V2 ." + "GreengrassV2": "A gateway that runs on AWS IoT Greengrass V2 .", + "SiemensIE": "" }, "AWS::IoTSiteWise::Gateway Greengrass": { - "GroupArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* ." + "GroupArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* ." }, "AWS::IoTSiteWise::Gateway GreengrassV2": { "CoreDeviceThingName": "The name of the AWS IoT thing for your AWS IoT Greengrass V2 core device." }, + "AWS::IoTSiteWise::Gateway SiemensIE": { + "IotCoreThingName": "" + }, "AWS::IoTSiteWise::Gateway Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Portal": { "Alarms": "Contains the configuration information of an alarm created in an AWS IoT SiteWise Monitor portal. You can use the alarm to monitor an asset property and get notified when the asset property value is outside a specified range. For more information, see [Monitoring with alarms](https://docs.aws.amazon.com/iot-sitewise/latest/appguide/monitor-alarms.html) in the *AWS IoT SiteWise Application Guide* .", "NotificationSenderEmail": "The email address that sends alarm notifications.\n\n> If you use the [AWS IoT Events managed Lambda function](https://docs.aws.amazon.com/iotevents/latest/developerguide/lambda-support.html) to manage your emails, you must [verify the sender email address in Amazon SES](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html) .", - "PortalAuthMode": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "PortalAuthMode": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "PortalContactEmail": "The AWS administrator's contact email address.", "PortalDescription": "A description for the portal.", "PortalName": "A friendly name for the portal.", @@ -19143,8 +19305,8 @@ "NotificationLambdaArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Lambda function that manages alarm notifications. For more information, see [Managing alarm notifications](https://docs.aws.amazon.com/iotevents/latest/developerguide/lambda-support.html) in the *AWS IoT Events Developer Guide* ." }, "AWS::IoTSiteWise::Portal Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTSiteWise::Project": { "AssetIds": "A list that contains the IDs of each asset associated with the project.", @@ -19154,8 +19316,8 @@ "Tags": "A list of key-value pairs that contain metadata for the project. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Project Tag": { - "Key": "", - "Value": "" + "Key": "The key or name that identifies the tag.", + "Value": "The value of the tag." }, "AWS::IoTTwinMaker::ComponentType": { "ComponentTypeId": "The ID of the component type.", @@ -19523,7 +19685,7 @@ "LastUplinkReceivedAt": "The date and time when the most recent uplink was received.", "LoRaWAN": "The device configuration information to use to create the wireless device. Must be at least one of OtaaV10x, OtaaV11, AbpV11, or AbpV10x.", "Name": "The name of the new resource.", - "Positioning": "", + "Positioning": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", "Tags": "The tags are an array of key-value pairs to attach to the specified resource. Tags can have a minimum of 0 and a maximum of 50 items.", "ThingArn": "The ARN of the thing to associate with the wireless device.", "Type": "The wireless device type." @@ -19537,19 +19699,19 @@ "SessionKeys": "Session keys for ABP v1.1." }, "AWS::IoTWireless::WirelessDevice Application": { - "DestinationName": "", - "FPort": "", - "Type": "" + "DestinationName": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "FPort": "The name of the new destination for the device.", + "Type": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device." }, "AWS::IoTWireless::WirelessDevice FPorts": { - "Applications": "" + "Applications": "LoRaWAN application configuration, which can be used to perform geolocation." }, "AWS::IoTWireless::WirelessDevice LoRaWANDevice": { "AbpV10x": "ABP device object for LoRaWAN specification v1.0.x.", "AbpV11": "ABP device object for create APIs for v1.1.", "DevEui": "The DevEUI value.", "DeviceProfileId": "The ID of the device profile for the new wireless device.", - "FPorts": "", + "FPorts": "List of FPort assigned for different LoRaWAN application packages to use.", "OtaaV10x": "OTAA device object for create APIs for v1.0.x", "OtaaV11": "OTAA device object for v1.1 for create APIs.", "ServiceProfileId": "The ID of the service profile." @@ -19654,6 +19816,7 @@ "LogDelivery": "The settings for delivering connector logs to Amazon CloudWatch Logs.", "Plugins": "Specifies which plugin to use for the connector. You must specify a single-element list. Amazon MSK Connect does not currently support specifying multiple plugins.", "ServiceExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.", + "Tags": "", "WorkerConfiguration": "The worker configurations that are in use with the connector." }, "AWS::KafkaConnect::Connector ApacheKafkaCluster": { @@ -19713,6 +19876,10 @@ "AWS::KafkaConnect::Connector ScaleOutPolicy": { "CpuUtilizationPercentage": "The CPU utilization percentage threshold at which you want connector scale out to be triggered." }, + "AWS::KafkaConnect::Connector Tag": { + "Key": "", + "Value": "" + }, "AWS::KafkaConnect::Connector Vpc": { "SecurityGroups": "The security groups for the connector.", "Subnets": "The subnets for the connector." @@ -19726,6 +19893,39 @@ "Firehose": "Details about delivering logs to Amazon Kinesis Data Firehose.", "S3": "Details about delivering logs to Amazon S3." }, + "AWS::KafkaConnect::CustomPlugin": { + "ContentType": "The format of the plugin file.", + "Description": "The description of the custom plugin.", + "Location": "Information about the location of the custom plugin.", + "Name": "The name of the custom plugin.", + "Tags": "" + }, + "AWS::KafkaConnect::CustomPlugin CustomPluginFileDescription": { + "FileMd5": "The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.", + "FileSize": "The size in bytes of the custom plugin file. You can use it to validate the file." + }, + "AWS::KafkaConnect::CustomPlugin CustomPluginLocation": { + "S3Location": "The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3." + }, + "AWS::KafkaConnect::CustomPlugin S3Location": { + "BucketArn": "The Amazon Resource Name (ARN) of an S3 bucket.", + "FileKey": "The file key for an object in an S3 bucket.", + "ObjectVersion": "The version of an object in an S3 bucket." + }, + "AWS::KafkaConnect::CustomPlugin Tag": { + "Key": "", + "Value": "" + }, + "AWS::KafkaConnect::WorkerConfiguration": { + "Description": "The description of a worker configuration.", + "Name": "The name of the worker configuration.", + "PropertiesFileContent": "Base64 encoded contents of the connect-distributed.properties file.", + "Tags": "" + }, + "AWS::KafkaConnect::WorkerConfiguration Tag": { + "Key": "", + "Value": "" + }, "AWS::Kendra::DataSource": { "CustomDocumentEnrichmentConfiguration": "Configuration information for altering document metadata and content during the document ingestion process.", "DataSourceConfiguration": "Configuration information for an Amazon Kendra data source. The contents of the configuration depend on the type of data source. You can only specify one type of data source in the configuration.\n\nYou can't specify the `Configuration` parameter when the `Type` parameter is set to `CUSTOM` .\n\nThe `Configuration` parameter is required for all other data sources.", @@ -19902,8 +20102,8 @@ "AccessControlListConfiguration": "Provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see [Access control for S3 data sources](https://docs.aws.amazon.com/kendra/latest/dg/s3-acl.html) .", "BucketName": "The name of the bucket that contains the documents.", "DocumentsMetadataConfiguration": "Specifies document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document.", - "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", - "InclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "InclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "InclusionPrefixes": "A list of S3 prefixes for the documents that should be included in the index." }, "AWS::Kendra::DataSource S3Path": { @@ -20041,6 +20241,7 @@ "Description": "A description for the FAQ.", "FileFormat": "The format of the input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that includes custom attributes.\n\nThe format must match the format of the file stored in the S3 bucket identified in the S3Path parameter.\n\nValid values are:\n\n- `CSV`\n- `CSV_WITH_HEADER`\n- `JSON`", "IndexId": "The identifier of the index that contains the FAQ.", + "LanguageCode": "The code for a language. This shows a supported language for the FAQ document as part of the summary information for FAQs. English is supported by default. For more information on supported languages, including their codes, see [Adding documents in languages other than English](https://docs.aws.amazon.com/kendra/latest/dg/in-adding-languages.html) .", "Name": "The name that you assigned the FAQ when you created or updated the FAQ.", "RoleArn": "The Amazon Resource Name (ARN) of a role with permission to access the S3 bucket that contains the FAQ.", "S3Path": "The Amazon Simple Storage Service (Amazon S3) location of the FAQ input data.", @@ -20091,9 +20292,9 @@ }, "AWS::Kendra::Index Relevance": { "Duration": "Specifies the time period that the boost applies to. For example, to make the boost apply to documents with the field value within the last month, you would use \"2628000s\". Once the field value is beyond the specified range, the effect of the boost drops off. The higher the importance, the faster the effect drops off. If you don't specify a value, the default is 3 months. The value of the field is a numeric string followed by the character \"s\", for example \"86400s\" for one day, or \"604800s\" for one week.\n\nOnly applies to `DATE` fields.", - "Freshness": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. You can only set the `Freshness` field on one `DATE` type field. Only applies to `DATE` fields.", + "Freshness": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. Only applies to `DATE` fields.", "Importance": "The relative importance of the field in the search. Larger numbers provide more of a boost than smaller numbers.", - "RankOrder": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` and `DOUBLE` fields.", + "RankOrder": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` fields.", "ValueImportanceItems": "An array of key-value pairs for different boosts when they appear in the search result list. For example, if you want to boost query terms that match the \"department\" field in the result, query terms that match this field are boosted in the result. You can add entries from the department field to boost documents with those values higher.\n\nFor example, you can add entries to the map with names of departments. If you add \"HR\", 5 and \"Legal\",3 those departments are given special attention when they appear in the metadata of a document." }, "AWS::Kendra::Index Search": { @@ -20295,7 +20496,7 @@ "ApplicationSnapshotConfiguration": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.", "EnvironmentProperties": "Describes execution properties for a Managed Service for Apache Flink application.", "FlinkApplicationConfiguration": "The creation and update parameters for a Managed Service for Apache Flink application.", - "SqlApplicationConfiguration": "The creation and update parameters for a SQL-based Managed Service for Apache Flink application.", + "SqlApplicationConfiguration": "The creation and update parameters for a SQL-based Kinesis Data Analytics application.", "VpcConfigurations": "The array of descriptions of VPC configurations available to the application.", "ZeppelinApplicationConfiguration": "The configuration parameters for a Kinesis Data Analytics Studio notebook." }, @@ -20355,7 +20556,7 @@ "InputSchema": "Describes the format of the data in the streaming source, and how each data element maps to corresponding columns in the in-application stream that is being created.\n\nAlso used to describe the format of the reference data source.", "KinesisFirehoseInput": "If the streaming source is an Amazon Kinesis Data Firehose delivery stream, identifies the delivery stream's ARN.", "KinesisStreamsInput": "If the streaming source is an Amazon Kinesis data stream, identifies the stream's Amazon Resource Name (ARN).", - "NamePrefix": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Managed Service for Apache Flink then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on." + "NamePrefix": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Kinesis Data Analytics then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on." }, "AWS::KinesisAnalyticsV2::Application InputLambdaProcessor": { "ResourceARN": "The ARN of the Amazon Lambda function that operates on records in the stream.\n\n> To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see [Example ARNs: Amazon Lambda](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda)" @@ -20455,7 +20656,7 @@ }, "AWS::KinesisAnalyticsV2::ApplicationOutput": { "ApplicationName": "The name of the application.", - "Output": "Describes a SQL-based Managed Service for Apache Flink application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream." + "Output": "Describes a SQL-based Kinesis Data Analytics application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream." }, "AWS::KinesisAnalyticsV2::ApplicationOutput DestinationSchema": { "RecordFormatType": "Specifies the format of the records on the output stream." @@ -20478,7 +20679,7 @@ }, "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource": { "ApplicationName": "The name of the application.", - "ReferenceDataSource": "For a SQL-based Managed Service for Apache Flink application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table." + "ReferenceDataSource": "For a SQL-based Kinesis Data Analytics application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table." }, "AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource CSVMappingParameters": { "RecordColumnDelimiter": "The column delimiter. For example, in a CSV format, a comma (\",\") is the typical column delimiter.", @@ -20529,7 +20730,7 @@ "S3DestinationConfiguration": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "SnowflakeDestinationConfiguration": "Configure Snowflake destination", "SplunkDestinationConfiguration": "The configuration of a destination in Splunk for the delivery stream.", - "Tags": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream." + "Tags": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)" }, "AWS::KinesisFirehose::DeliveryStream AmazonOpenSearchServerlessBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).", @@ -21042,7 +21243,7 @@ }, "AWS::Lambda::Alias": { "Description": "A description of the alias.", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionVersion": "The function version that the alias invokes.", "Name": "The name of the alias.", "ProvisionedConcurrencyConfig": "Specifies a [provisioned concurrency](https://docs.aws.amazon.com/lambda/latest/dg/configuration-concurrency.html) configuration for a function's alias.", @@ -21095,7 +21296,7 @@ "Enabled": "When true, the event source mapping is active. When false, Lambda pauses polling and invocation.\n\nDefault: True", "EventSourceArn": "The Amazon Resource Name (ARN) of the event source.\n\n- *Amazon Kinesis* \u2013 The ARN of the data stream or a stream consumer.\n- *Amazon DynamoDB Streams* \u2013 The ARN of the stream.\n- *Amazon Simple Queue Service* \u2013 The ARN of the queue.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) ).\n- *Amazon MQ* \u2013 The ARN of the broker.\n- *Amazon DocumentDB* \u2013 The ARN of the DocumentDB change stream.", "FilterCriteria": "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) .", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "FunctionResponseTypes": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", @@ -21250,7 +21451,7 @@ "AWS::Lambda::Permission": { "Action": "The action that the principal can use on the function. For example, `lambda:InvokeFunction` or `lambda:GetFunction` .", "EventSourceToken": "For Alexa Smart Home functions, a token that the invoker must supply.", - "FunctionName": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionUrlAuthType": "The type of authentication that your function URL uses. Set to `AWS_IAM` if you want to restrict access to authenticated users only. Set to `NONE` if you want to bypass IAM authentication to create a public endpoint. For more information, see [Security and auth model for Lambda function URLs](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) .", "Principal": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "PrincipalOrgID": "The identifier for your organization in AWS Organizations . Use this to grant permissions to all the AWS accounts under this organization.", @@ -21275,7 +21476,7 @@ "AWS::Lambda::Version": { "CodeSha256": "Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. Updates are not supported for this property.", "Description": "A description for the version to override the description in the function configuration. Updates are not supported for this property.", - "FunctionName": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "ProvisionedConcurrencyConfig": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", "RuntimePolicy": "" }, @@ -22696,6 +22897,9 @@ "TargetKafkaClusterArn": "", "TopicReplication": "" }, + "AWS::MSK::Replicator ReplicationStartingPosition": { + "Type": "" + }, "AWS::MSK::Replicator Tag": { "Key": "", "Value": "" @@ -22704,6 +22908,7 @@ "CopyAccessControlListsForTopics": "", "CopyTopicConfigurations": "", "DetectAndCopyNewTopics": "", + "StartingPosition": "", "TopicsToExclude": "", "TopicsToReplicate": "" }, @@ -26779,6 +26984,7 @@ "Enabled": "Specifies whether to enable the email channel for the application.", "FromAddress": "The verified email address that you want to send email from when you send email through the channel.", "Identity": "The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple Email Service (Amazon SES), that you want to use when you send email through the channel.", + "OrchestrationSendingRoleArn": "", "RoleArn": "The ARN of the AWS Identity and Access Management (IAM) role that you want Amazon Pinpoint to use when it submits email-related event data for the channel." }, "AWS::Pinpoint::EmailTemplate": { @@ -35785,10 +35991,10 @@ "DomainIAMRoleName": "Specifies the name of the IAM role to use when making API calls to the Directory Service.\n\nValid for: Aurora DB clusters only", "EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see [Publishing Database Logs to Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the *Amazon Aurora User Guide* .\n\n*Aurora MySQL*\n\nValid values: `audit` , `error` , `general` , `slowquery`\n\n*Aurora PostgreSQL*\n\nValid values: `postgresql`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "EnableGlobalWriteForwarding": "Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.\n\nYou can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.\n\nValid for Cluster Type: Aurora DB clusters only", - "EnableHttpEndpoint": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "EnableHttpEndpoint": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nFor more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.*\n\nValid for: Aurora DB clusters only", "Engine": "The name of the database engine to be used for this DB cluster.\n\nValid Values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only supports Aurora Serverless v1.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n- [Limitations of parallel query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n\nValid for: Aurora DB clusters only", + "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "EngineVersion": "The version number of the database engine to use.\n\nTo list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (8.0-compatible), use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nYou can supply either `5.7` or `8.0` to use the default engine version for Aurora MySQL version 2 or version 3, respectively.\n\nTo list all of the available engine versions for Aurora PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for MySQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"`\n\n*Aurora MySQL*\n\nFor information, see [Database engine updates for Amazon Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the *Amazon Aurora User Guide* .\n\n*Aurora PostgreSQL*\n\nFor information, see [Amazon Aurora PostgreSQL releases and engine versions](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the *Amazon Aurora User Guide* .\n\n*MySQL*\n\nFor information, see [Amazon RDS for MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide* .\n\n*PostgreSQL*\n\nFor information, see [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the *Amazon RDS User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "GlobalClusterIdentifier": "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource.\n\nIf you aren't configuring a global database cluster, don't specify this property.\n\n> To remove the DB cluster from a global database cluster, specify an empty value for the `GlobalClusterIdentifier` property. \n\nFor information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.\n\nFor information about valid IOPS values, see [Provisioned IOPS storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide* .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nConstraints:\n\n- Must be a multiple between .5 and 50 of the storage amount for the DB cluster.", @@ -35817,7 +36023,7 @@ "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "Tags": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "UseLatestRestorableTime": "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this DB cluster.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" @@ -35901,7 +36107,7 @@ "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nThis property is supported for RDS for MariaDB, RDS for MySQL, and RDS for PostgreSQL. For more information, see [IAM Database Authentication for MariaDB, MySQL, and PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon RDS User Guide.*\n\n*Amazon Aurora*\n\nNot applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.", "EnablePerformanceInsights": "Specifies whether to enable Performance Insights for the DB instance. For more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.", "Endpoint": "The connection endpoint for the DB instance.\n\n> The endpoint might not be shown for instances with the status of `creating` .", - "Engine": "The name of the database engine that you want to use for this DB instance.\n\nNot every database engine is available in every AWS Region.\n\n> When you are creating a DB instance, the `Engine` property is required. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "Engine": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "EngineVersion": "The version number of the database engine to use.\n\nFor a list of valid engine versions, use the `DescribeDBEngineVersions` action.\n\nThe following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.\n\n*Amazon Aurora*\n\nNot applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.\n\n*Db2*\n\nSee [Amazon RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Db2.html#Db2.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*MariaDB*\n\nSee [MariaDB on Amazon RDS Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*Microsoft SQL Server*\n\nSee [Microsoft SQL Server Versions on Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) in the *Amazon RDS User Guide.*\n\n*MySQL*\n\nSee [MySQL on Amazon RDS Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*Oracle*\n\nSee [Oracle Database Engine Release Notes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html) in the *Amazon RDS User Guide.*\n\n*PostgreSQL*\n\nSee [Supported PostgreSQL Database Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts.General.DBVersions) in the *Amazon RDS User Guide.*", "Iops": "The number of I/O operations per second (IOPS) that the database provisions. The value must be equal to or greater than 1000.\n\nIf you specify this property, you must follow the range of allowed ratios of your requested IOPS rate to the amount of storage that you allocate (IOPS to allocated storage). For example, you can provision an Oracle database instance with 1000 IOPS and 200 GiB of storage (a ratio of 5:1), or specify 2000 IOPS with 200 GiB of storage (a ratio of 10:1). For more information, see [Amazon RDS Provisioned IOPS Storage to Improve Performance](https://docs.aws.amazon.com/AmazonRDS/latest/DeveloperGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide* .\n\n> If you specify `io1` for the `StorageType` property, then you must also specify the `Iops` property. \n\nConstraints:\n\n- For RDS for Db2, MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the storage amount for the DB instance.\n- For RDS for SQL Server - Must be a multiple between 1 and 50 of the storage amount for the DB instance.", "KmsKeyId": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", @@ -35934,7 +36140,7 @@ "SourceRegion": "The ID of the region that contains the source DB instance for the read replica.", "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB instance is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB instance to be encrypted, then don't set this property or set it to `false` .\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", - "StorageType": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "Tags": "An optional array of key-value pairs to apply to this DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", @@ -36197,7 +36403,7 @@ "NodeType": "The node type to be provisioned for the cluster. For information about node types, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nValid Values: `ds2.xlarge` | `ds2.8xlarge` | `dc1.large` | `dc1.8xlarge` | `dc2.large` | `dc2.8xlarge` | `ra3.xlplus` | `ra3.4xlarge` | `ra3.16xlarge`", "NumberOfNodes": "The number of compute nodes in the cluster. This parameter is required when the *ClusterType* parameter is specified as `multi-node` .\n\nFor information about determining how many nodes you need, go to [Working with Clusters](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#how-many-nodes) in the *Amazon Redshift Cluster Management Guide* .\n\nIf you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster.\n\nDefault: `1`\n\nConstraints: Value must be at least 1 and no more than 100.", "OwnerAccount": "The AWS account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.", - "Port": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "Port": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "PreferredMaintenanceWindow": "The weekly time range (in UTC) during which automated cluster maintenance can occur.\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nDefault: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see [Maintenance Windows](https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-windows) in Amazon Redshift Cluster Management Guide.\n\nValid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun\n\nConstraints: Minimum 30-minute window.", "PubliclyAccessible": "If `true` , the cluster can be accessed from a public network.", "ResourceAction": "The Amazon Redshift operation to be performed. Supported operations are `pause-cluster` , `resume-cluster` , and `failover-primary-compute` .", @@ -36346,7 +36552,6 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "NamespaceResourcePolicy": "The resource policy that will be attached to the namespace.", "RedshiftIdcApplicationArn": "The ARN for the Redshift application that integrates with IAM Identity Center.", - "SnapshotCopyConfigurations": "", "Tags": "The map of the key-value pairs used to tag the namespace." }, "AWS::RedshiftServerless::Namespace Namespace": { @@ -36364,11 +36569,6 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "Status": "The status of the namespace." }, - "AWS::RedshiftServerless::Namespace SnapshotCopyConfiguration": { - "DestinationKmsKeyId": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", - "DestinationRegion": "The destination AWS Region to copy snapshots to.", - "SnapshotRetentionPeriod": "The retention period of snapshots that are copied to the destination AWS Region ." - }, "AWS::RedshiftServerless::Namespace Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -36608,6 +36808,12 @@ "RpoInSecs": "Recovery Point Objective (RPO) in seconds.", "RtoInSecs": "Recovery Time Objective (RTO) in seconds." }, + "AWS::ResilienceHub::ResiliencyPolicy PolicyMap": { + "AZ": "Defines the RTO and RPO targets for Availability Zone disruption.", + "Hardware": "Defines the RTO and RPO targets for hardware disruption.", + "Region": "Defines the RTO and RPO targets for Regional disruption.", + "Software": "Defines the RTO and RPO targets for software disruption." + }, "AWS::ResourceExplorer2::DefaultViewAssociation": { "ViewArn": "The ARN of the view to set as the default for the AWS Region and AWS account in which you call this operation. The specified view must already exist in the specified Region." }, @@ -38229,7 +38435,7 @@ "OperatingSystem": "Defines the operating system the patch baseline applies to. The default value is `WINDOWS` .", "PatchGroups": "The name of the patch group to be registered with the patch baseline.", "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", - "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "Sources": "Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only.", "Tags": "Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to." }, @@ -38951,7 +39157,7 @@ "AWS::SageMaker::EndpointConfig ServerlessConfig": { "MaxConcurrency": "The maximum number of concurrent invocations your serverless endpoint can process.", "MemorySizeInMB": "The memory size of your serverless endpoint. Valid values are in 1 GB increments: 1024 MB, 2048 MB, 3072 MB, 4096 MB, 5120 MB, or 6144 MB.", - "ProvisionedConcurrency": "" + "ProvisionedConcurrency": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) ." }, "AWS::SageMaker::EndpointConfig Tag": { "Key": "The tag key. Tag keys must be unique per resource.", @@ -39149,7 +39355,7 @@ }, "AWS::SageMaker::Model ContainerDefinition": { "ContainerHostname": "This parameter is ignored for models that contain only a `PrimaryContainer` .\n\nWhen a `ContainerDefinition` is part of an inference pipeline, the value of the parameter uniquely identifies the container for the purposes of logging and metrics. For information, see [Use Logs and Metrics to Monitor an Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html) . If you don't specify a value for this parameter for a `ContainerDefinition` that is part of an inference pipeline, a unique name is automatically assigned based on the position of the `ContainerDefinition` in the pipeline. If you specify a value for the `ContainerHostName` for any `ContainerDefinition` that is part of an inference pipeline, you must specify a value for the `ContainerHostName` parameter of every `ContainerDefinition` in that pipeline.", - "Environment": "The environment variables to set in the Docker container. Each key and value in the `Environment` string to string map can have length of up to 1024. We support up to 16 entries in the map.", + "Environment": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "Image": "The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both `registry/repository[:tag]` and `registry/repository[@digest]` image path formats. For more information, see [Using Your Own Algorithms with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "ImageConfig": "Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see [Use a Private Docker Registry for Real-Time Inference Containers](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-containers-inference-private.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "InferenceSpecificationName": "The inference specification name in the model package version.", @@ -40776,7 +40982,7 @@ }, "AWS::Shield::DRTAccess": { "LogBucketList": "Authorizes the Shield Response Team (SRT) to access the specified Amazon S3 bucket containing log data such as Application Load Balancer access logs, CloudFront logs, or logs from third party sources. You can associate up to 10 Amazon S3 buckets with your subscription.\n\nUse this to share information with the SRT that's not available in AWS WAF logs.\n\nTo use the services of the SRT, you must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .", - "RoleArn": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you." + "RoleArn": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you." }, "AWS::Shield::ProactiveEngagement": { "EmergencyContactList": "The list of email addresses and phone numbers that the Shield Response Team (SRT) can use to contact you for escalations to the SRT and to initiate proactive customer support, plus any relevant notes.\n\nTo enable proactive engagement, the contact list must include at least one phone number.\n\nIf you provide more than one contact, in the notes, indicate the circumstances under which each contact should be used. Include primary and secondary contact designations, and provide the hours of availability and time zones for each contact.\n\nExample contact notes:\n\n- This is a hotline that's staffed 24x7x365. Please work with the responding analyst and they will get the appropriate person on the call.\n- Please contact the secondary phone number if the hotline doesn't respond within 5 minutes.", @@ -40807,7 +41013,7 @@ "Value": "Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive." }, "AWS::Shield::ProtectionGroup": { - "Aggregation": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "Aggregation": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "Members": "The ARNs (Amazon Resource Names) of the resources to include in the protection group. You must set this when you set `Pattern` to `ARBITRARY` and you must not set it for any other `Pattern` setting.", "Pattern": "The criteria to use to choose the protected resources for inclusion in the group. You can include all resources that have protections, provide a list of resource ARNs (Amazon Resource Names), or include all resources of a specified resource type.", "ProtectionGroupId": "The name of the protection group. You use this to identify the protection group in lists and to manage the protection group, for example to update, delete, or describe it.", @@ -41166,7 +41372,7 @@ "AWS::Transfer::Connector As2Config": { "BasicAuthSecretId": "Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in AWS Secrets Manager .\n\nThe default value for this parameter is `null` , which indicates that Basic authentication is not enabled for the connector.\n\nIf the connector should use Basic authentication, the secret needs to be in the following format:\n\n`{ \"Username\": \"user-name\", \"Password\": \"user-password\" }`\n\nReplace `user-name` and `user-password` with the credentials for the actual user that is being authenticated.\n\nNote the following:\n\n- You are storing these credentials in Secrets Manager, *not passing them directly* into this API.\n- If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the AWS management console, you can have the system create the secret for you.\n\nIf you have previously enabled Basic authentication for a connector, you can disable it by using the `UpdateConnector` API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:\n\n`update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'`", "Compression": "Specifies whether the AS2 file is compressed.", - "EncryptionAlgorithm": "The algorithm that is used to encrypt the file.\n\n> You can only specify `NONE` if the URL for your connector uses HTTPS. This ensures that no traffic is sent in clear text.", + "EncryptionAlgorithm": "The algorithm that is used to encrypt the file.\n\nNote the following:\n\n- Do not use the `DES_EDE3_CBC` algorithm unless you must support a legacy client that requires it, as it is a weak encryption algorithm.\n- You can only specify `NONE` if the URL for your connector uses HTTPS. Using HTTPS ensures that no traffic is sent in clear text.", "LocalProfileId": "A unique identifier for the AS2 local profile.", "MdnResponse": "Used for outbound requests (from an AWS Transfer Family server to a partner AS2 server) to determine whether the partner response for transfers is synchronous or asynchronous. Specify either of the following values:\n\n- `SYNC` : The system expects a synchronous MDN response, confirming that the file was transferred successfully (or not).\n- `NONE` : Specifies that no MDN response is required.", "MdnSigningAlgorithm": "The signing algorithm for the MDN response.\n\n> If set to DEFAULT (or not set at all), the value for `SigningAlgorithm` is used.", @@ -41801,7 +42007,7 @@ "AWS::WAFv2::LoggingConfiguration": { "LogDestinationConfigs": "The logging destination configuration that you want to associate with the web ACL.\n\n> You can associate one logging destination to a web ACL.", "LoggingFilter": "Filtering that specifies which web requests are kept in the logs and which are dropped. You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation.", - "RedactedFields": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` .", + "RedactedFields": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` . > This setting has no impact on request sampling. With request sampling, the only way to exclude fields is by disabling sampling in the web ACL visibility configuration.", "ResourceArn": "The Amazon Resource Name (ARN) of the web ACL that you want to associate with `LogDestinationConfigs` ." }, "AWS::WAFv2::LoggingConfiguration ActionCondition": { @@ -41865,7 +42071,7 @@ "CustomResponse": "Defines a custom response for the web request.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* ." }, "AWS::WAFv2::RuleGroup Body": { - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::RuleGroup ByteMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", @@ -41917,10 +42123,11 @@ }, "AWS::WAFv2::RuleGroup FieldToMatch": { "AllQueryArguments": "Inspect all query arguments.", - "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "Cookies": "Inspect the request cookies. You must configure scope and pattern matching filters in the `Cookies` object, to define the set of cookies and the parts of the cookies that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's cookies and only the first 200 cookies are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize cookie content in the `Cookies` object. AWS WAF applies the pattern matching filters to the cookies that it receives from the underlying host service.", "Headers": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", - "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "JA3Fingerprint": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "Method": "Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "QueryString": "Inspect the query string. This is the part of a URL that appears after a `?` character, if any.", "SingleHeader": "Inspect a single header. Provide the name of the header to inspect, for example, `User-Agent` or `Referer` . This setting isn't case sensitive.\n\nExample JSON: `\"SingleHeader\": { \"Name\": \"haystack\" }`\n\nAlternately, you can filter and inspect all headers with the `Headers` `FieldToMatch` setting.", @@ -41957,11 +42164,14 @@ "AWS::WAFv2::RuleGroup ImmunityTimeProperty": { "ImmunityTime": "The amount of time, in seconds, that a `CAPTCHA` or challenge timestamp is considered valid by AWS WAF . The default setting is 300.\n\nFor the Challenge action, the minimum setting is 300." }, + "AWS::WAFv2::RuleGroup JA3Fingerprint": { + "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + }, "AWS::WAFv2::RuleGroup JsonBody": { "InvalidFallbackBehavior": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", "MatchPattern": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", "MatchScope": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::RuleGroup JsonMatchPattern": { "All": "Match all of the elements. See also `MatchScope` in the `JsonBody` `FieldToMatch` specification.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", @@ -41986,6 +42196,7 @@ "AWS::WAFv2::RuleGroup RateBasedStatement": { "AggregateKeyType": "Setting that indicates how to aggregate the request counts.\n\n> Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling. \n\n- `CONSTANT` - Count and limit the requests that match the rate-based rule's scope-down statement. With this option, the counted requests aren't further aggregated. The scope-down statement is the only specification used. When the count of all requests that satisfy the scope-down statement goes over the limit, AWS WAF applies the rule action to all requests that satisfy the scope-down statement.\n\nWith this option, you must configure the `ScopeDownStatement` property.\n- `CUSTOM_KEYS` - Aggregate the request counts using one or more web request components as the aggregate keys.\n\nWith this option, you must specify the aggregate keys in the `CustomKeys` property.\n\nTo aggregate on only the IP address or only the forwarded IP address, don't use custom keys. Instead, set the aggregate key type to `IP` or `FORWARDED_IP` .\n- `FORWARDED_IP` - Aggregate the request counts on the first IP address in an HTTP header.\n\nWith this option, you must specify the header to use in the `ForwardedIPConfig` property.\n\nTo aggregate on a combination of the forwarded IP address with other aggregate keys, use `CUSTOM_KEYS` .\n- `IP` - Aggregate the request counts on the IP address from the web request origin.\n\nTo aggregate on a combination of the IP address with other aggregate keys, use `CUSTOM_KEYS` .", "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", + "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." @@ -42077,7 +42288,7 @@ "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", - "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "SqliMatchStatement": "A rule statement that inspects for malicious SQL code. Attackers insert malicious SQL code into web requests to do things like modify your database or extract data from it.", "XssMatchStatement": "A rule statement that inspects for cross-site scripting (XSS) attacks. In XSS attacks, the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers." }, @@ -42092,14 +42303,14 @@ "AWS::WAFv2::RuleGroup VisibilityConfig": { "CloudWatchMetricsEnabled": "Indicates whether the associated resource sends metrics to Amazon CloudWatch. For the list of available metrics, see [AWS WAF Metrics](https://docs.aws.amazon.com/waf/latest/developerguide/monitoring-cloudwatch.html#waf-metrics) in the *AWS WAF Developer Guide* .\n\nFor web ACLs, the metrics are for web requests that have the web ACL default action applied. AWS WAF applies the default action to web requests that pass the inspection of all rules in the web ACL without being either allowed or blocked. For more information,\nsee [The web ACL default action](https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-default-action.html) in the *AWS WAF Developer Guide* .", "MetricName": "A name of the Amazon CloudWatch metric dimension. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names that are reserved for AWS WAF , for example `All` and `Default_Action` .", - "SampledRequestsEnabled": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console." + "SampledRequestsEnabled": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration." }, "AWS::WAFv2::RuleGroup XssMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", "TextTransformations": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by `FieldToMatch` , starting from the lowest priority setting, before inspecting the content for a match." }, "AWS::WAFv2::WebACL": { - "AssociationConfig": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "AssociationConfig": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "CaptchaConfig": "Specifies how AWS WAF should handle `CAPTCHA` evaluations for rules that don't have their own `CaptchaConfig` settings. If you don't specify this, AWS WAF uses its default settings for `CaptchaConfig` .", "ChallengeConfig": "Specifies how AWS WAF should handle challenge evaluations for rules that don't have their own `ChallengeConfig` settings. If you don't specify this, AWS WAF uses its default settings for `ChallengeConfig` .", "CustomResponseBodies": "A map of custom response keys and content bodies. When you create a rule with a block action, you can send a custom response to the web request. You define these for the web ACL, and then use them in the rules and default actions that you define in the web ACL.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* .\n\nFor information about the limits on count and size for custom request and response settings, see [AWS WAF quotas](https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) in the *AWS WAF Developer Guide* .", @@ -42136,13 +42347,13 @@ "Statements": "The statements to combine with AND logic. You can use any statements that can be nested." }, "AWS::WAFv2::WebACL AssociationConfig": { - "RequestBody": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) ." + "RequestBody": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes)." }, "AWS::WAFv2::WebACL BlockAction": { "CustomResponse": "Defines a custom response for the web request.\n\nFor information about customizing web requests and responses, see [Customizing web requests and responses in AWS WAF](https://docs.aws.amazon.com/waf/latest/developerguide/waf-custom-request-response.html) in the *AWS WAF Developer Guide* ." }, "AWS::WAFv2::WebACL Body": { - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::WebACL ByteMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", @@ -42204,10 +42415,11 @@ }, "AWS::WAFv2::WebACL FieldToMatch": { "AllQueryArguments": "Inspect all query arguments.", - "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "Body": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "Cookies": "Inspect the request cookies. You must configure scope and pattern matching filters in the `Cookies` object, to define the set of cookies and the parts of the cookies that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's cookies and only the first 200 cookies are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize cookie content in the `Cookies` object. AWS WAF applies the pattern matching filters to the cookies that it receives from the underlying host service.", "Headers": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", - "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "JA3Fingerprint": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "JsonBody": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "Method": "Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "QueryString": "Inspect the query string. This is the part of a URL that appears after a `?` character, if any.", "SingleHeader": "Inspect a single header. Provide the name of the header to inspect, for example, `User-Agent` or `Referer` . This setting isn't case sensitive.\n\nExample JSON: `\"SingleHeader\": { \"Name\": \"haystack\" }`\n\nAlternately, you can filter and inspect all headers with the `Headers` `FieldToMatch` setting.", @@ -42244,11 +42456,14 @@ "AWS::WAFv2::WebACL ImmunityTimeProperty": { "ImmunityTime": "The amount of time, in seconds, that a `CAPTCHA` or challenge timestamp is considered valid by AWS WAF . The default setting is 300.\n\nFor the Challenge action, the minimum setting is 300." }, + "AWS::WAFv2::WebACL JA3Fingerprint": { + "FallbackBehavior": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement." + }, "AWS::WAFv2::WebACL JsonBody": { "InvalidFallbackBehavior": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", "MatchPattern": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", "MatchScope": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" + "OversizeHandling": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`" }, "AWS::WAFv2::WebACL JsonMatchPattern": { "All": "Match all of the elements. See also `MatchScope` in the `JsonBody` `FieldToMatch` specification.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", @@ -42292,6 +42507,7 @@ "AWS::WAFv2::WebACL RateBasedStatement": { "AggregateKeyType": "Setting that indicates how to aggregate the request counts.\n\n> Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling. \n\n- `CONSTANT` - Count and limit the requests that match the rate-based rule's scope-down statement. With this option, the counted requests aren't further aggregated. The scope-down statement is the only specification used. When the count of all requests that satisfy the scope-down statement goes over the limit, AWS WAF applies the rule action to all requests that satisfy the scope-down statement.\n\nWith this option, you must configure the `ScopeDownStatement` property.\n- `CUSTOM_KEYS` - Aggregate the request counts using one or more web request components as the aggregate keys.\n\nWith this option, you must specify the aggregate keys in the `CustomKeys` property.\n\nTo aggregate on only the IP address or only the forwarded IP address, don't use custom keys. Instead, set the aggregate key type to `IP` or `FORWARDED_IP` .\n- `FORWARDED_IP` - Aggregate the request counts on the first IP address in an HTTP header.\n\nWith this option, you must specify the header to use in the `ForwardedIPConfig` property.\n\nTo aggregate on a combination of the forwarded IP address with other aggregate keys, use `CUSTOM_KEYS` .\n- `IP` - Aggregate the request counts on the IP address from the web request origin.\n\nTo aggregate on a combination of the IP address with other aggregate keys, use `CUSTOM_KEYS` .", "CustomKeys": "Specifies the aggregate keys to use in a rate-base rule.", + "EvaluationWindowSec": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", "ForwardedIPConfig": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", "Limit": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a `ScopeDownStatement` , this limit is applied only to the requests that match the statement.\n\nExamples:\n\n- If you aggregate on just the IP address, this is the limit on requests from any single IP address.\n- If you aggregate on the HTTP method and the query argument name \"city\", then this is the limit on requests for any single method, city pair.", "ScopeDownStatement": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement. When you use a scope-down statement, the rate-based rule only tracks and rate limits requests that match the scope-down statement. You can use any nestable `Statement` in the scope-down statement, and you can nest statements at any level, the same as you can for a rule statement." @@ -42339,7 +42555,7 @@ "TextTransformations": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by `FieldToMatch` , starting from the lowest priority setting, before inspecting the content for a match." }, "AWS::WAFv2::WebACL RequestBodyAssociatedResourceTypeConfig": { - "DefaultSizeInspectionLimit": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`" + "DefaultSizeInspectionLimit": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`" }, "AWS::WAFv2::WebACL RequestInspection": { "PasswordField": "The name of the field in the request payload that contains your customer's password.\n\nHow you specify this depends on the request inspection payload type.\n\n- For JSON payloads, specify the field name in JSON pointer syntax. For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation [JavaScript Object Notation (JSON) Pointer](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6901) .\n\nFor example, for the JSON payload `{ \"form\": { \"password\": \"THE_PASSWORD\" } }` , the password field specification is `/form/password` .\n- For form encoded payload types, use the HTML form names.\n\nFor example, for an HTML form with the input element named `password1` , the password field specification is `password1` .", @@ -42435,7 +42651,7 @@ "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", "RuleGroupReferenceStatement": "A rule statement used to run the rules that are defined in a `RuleGroup` . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\n\nYou cannot nest a `RuleGroupReferenceStatement` , for example for use inside a `NotStatement` or `OrStatement` . You cannot use a rule group reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.", - "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "SqliMatchStatement": "A rule statement that inspects for malicious SQL code. Attackers insert malicious SQL code into web requests to do things like modify your database or extract data from it.", "XssMatchStatement": "A rule statement that inspects for cross-site scripting (XSS) attacks. In XSS attacks, the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers." }, @@ -42450,7 +42666,7 @@ "AWS::WAFv2::WebACL VisibilityConfig": { "CloudWatchMetricsEnabled": "Indicates whether the associated resource sends metrics to Amazon CloudWatch. For the list of available metrics, see [AWS WAF Metrics](https://docs.aws.amazon.com/waf/latest/developerguide/monitoring-cloudwatch.html#waf-metrics) in the *AWS WAF Developer Guide* .\n\nFor web ACLs, the metrics are for web requests that have the web ACL default action applied. AWS WAF applies the default action to web requests that pass the inspection of all rules in the web ACL without being either allowed or blocked. For more information,\nsee [The web ACL default action](https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-default-action.html) in the *AWS WAF Developer Guide* .", "MetricName": "A name of the Amazon CloudWatch metric dimension. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names that are reserved for AWS WAF , for example `All` and `Default_Action` .", - "SampledRequestsEnabled": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console." + "SampledRequestsEnabled": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration." }, "AWS::WAFv2::WebACL XssMatchStatement": { "FieldToMatch": "The part of the web request that you want AWS WAF to inspect.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index c2258435f..1344dfe80 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -2055,6 +2055,11 @@ "title": "Password", "type": "string" }, + "ReplicationUser": { + "markdownDescription": "Defines if this user is intended for CRDR replication purposes.", + "title": "ReplicationUser", + "type": "boolean" + }, "Username": { "markdownDescription": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data.", "title": "Username", @@ -2365,7 +2370,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", + "markdownDescription": "The environment variables for the Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", "title": "EnvironmentVariables", "type": "array" }, @@ -2474,7 +2479,7 @@ "items": { "$ref": "#/definitions/AWS::Amplify::App.EnvironmentVariable" }, - "markdownDescription": "Environment variables for the auto created branch.", + "markdownDescription": "The environment variables for the autocreated branch.", "title": "EnvironmentVariables", "type": "array" }, @@ -2551,12 +2556,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2815,6 +2820,16 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, + "Certificate": { + "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", + "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", + "title": "Certificate" + }, + "CertificateSettings": { + "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", + "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", + "title": "CertificateSettings" + }, "DomainName": { "markdownDescription": "The domain name for the domain association.", "title": "DomainName", @@ -2832,6 +2847,11 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" + }, + "UpdateStatus": { + "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", + "title": "UpdateStatus", + "type": "string" } }, "required": [ @@ -2862,6 +2882,43 @@ ], "type": "object" }, + "AWS::Amplify::Domain.Certificate": { + "additionalProperties": false, + "properties": { + "CertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CertificateArn", + "type": "string" + }, + "CertificateType": { + "markdownDescription": "The type of SSL/TLS certificate that you want to use.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CertificateVerificationDNSRecord": { + "markdownDescription": "The DNS record for certificate verification.", + "title": "CertificateVerificationDNSRecord", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Amplify::Domain.CertificateSettings": { + "additionalProperties": false, + "properties": { + "CertificateType": { + "markdownDescription": "The certificate type.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "title": "CertificateType", + "type": "string" + }, + "CustomCertificateArn": { + "markdownDescription": "The Amazon resource name (ARN) for the custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "title": "CustomCertificateArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::Amplify::Domain.SubDomainSetting": { "additionalProperties": false, "properties": { @@ -3024,14 +3081,6 @@ "type": "array" } }, - "required": [ - "BindingProperties", - "ComponentType", - "Name", - "Overrides", - "Properties", - "Variants" - ], "type": "object" }, "Type": { @@ -3050,8 +3099,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3169,6 +3217,11 @@ "title": "Predicates", "type": "array" }, + "SlotName": { + "markdownDescription": "The name of a component slot.", + "title": "SlotName", + "type": "string" + }, "UserAttribute": { "markdownDescription": "An authenticated user attribute.", "title": "UserAttribute", @@ -3219,6 +3272,11 @@ }, "title": "Properties", "type": "object" + }, + "SourceId": { + "markdownDescription": "The unique ID of the child component in its original source system, such as Figma.", + "title": "SourceId", + "type": "string" } }, "required": [ @@ -3312,6 +3370,11 @@ "title": "Action", "type": "string" }, + "BindingEvent": { + "markdownDescription": "Binds an event to an action on a component. When you specify a `bindingEvent` , the event is called when the action is performed.", + "title": "BindingEvent", + "type": "string" + }, "Parameters": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Component.ActionParameters", "markdownDescription": "Describes information about the action.", @@ -3518,6 +3581,11 @@ "title": "Operand", "type": "string" }, + "OperandType": { + "markdownDescription": "The type of value to use when performing the evaluation.", + "title": "OperandType", + "type": "string" + }, "Operator": { "markdownDescription": "The operator to use to perform the evaluation.", "title": "Operator", @@ -3668,15 +3736,6 @@ "type": "object" } }, - "required": [ - "DataType", - "Fields", - "FormActionType", - "Name", - "SchemaVersion", - "SectionalElements", - "Style" - ], "type": "object" }, "Type": { @@ -3695,8 +3754,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -3989,9 +4047,49 @@ ], "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue": { + "additionalProperties": false, + "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties", + "markdownDescription": "Describes the properties to customize with data at runtime.", + "title": "BindingProperties" + }, + "Type": { + "markdownDescription": "The property type.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValueProperties": { + "additionalProperties": false, + "properties": { + "Model": { + "markdownDescription": "An Amplify DataStore model.", + "title": "Model", + "type": "string" + } + }, + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormInputValueProperty": { "additionalProperties": false, "properties": { + "BindingProperties": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties", + "markdownDescription": "The information to bind fields to data at runtime.", + "title": "BindingProperties" + }, + "Concat": { + "items": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputValueProperty" + }, + "markdownDescription": "A list of form properties to concatenate to create the value to assign to this field property.", + "title": "Concat", + "type": "array" + }, "Value": { "markdownDescription": "The value to assign to the input field.", "title": "Value", @@ -4000,6 +4098,25 @@ }, "type": "object" }, + "AWS::AmplifyUIBuilder::Form.FormInputValuePropertyBindingProperties": { + "additionalProperties": false, + "properties": { + "Field": { + "markdownDescription": "The data field to bind the property to.", + "title": "Field", + "type": "string" + }, + "Property": { + "markdownDescription": "The form property to bind to the data field.", + "title": "Property", + "type": "string" + } + }, + "required": [ + "Property" + ], + "type": "object" + }, "AWS::AmplifyUIBuilder::Form.FormStyle": { "additionalProperties": false, "properties": { @@ -4098,6 +4215,17 @@ "AWS::AmplifyUIBuilder::Form.ValueMappings": { "additionalProperties": false, "properties": { + "BindingProperties": { + "additionalProperties": false, + "markdownDescription": "The information to bind fields to data at runtime.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.FormInputBindingPropertiesValue" + } + }, + "title": "BindingProperties", + "type": "object" + }, "Values": { "items": { "$ref": "#/definitions/AWS::AmplifyUIBuilder::Form.ValueMapping" @@ -4190,10 +4318,6 @@ "type": "array" } }, - "required": [ - "Name", - "Values" - ], "type": "object" }, "Type": { @@ -4212,8 +4336,7 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, @@ -8874,6 +8997,14 @@ "title": "Description", "type": "string" }, + "DynamicExtensionParameters": { + "items": { + "$ref": "#/definitions/AWS::AppConfig::Deployment.DynamicExtensionParameters" + }, + "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "title": "DynamicExtensionParameters", + "type": "array" + }, "EnvironmentId": { "markdownDescription": "The environment ID.", "title": "EnvironmentId", @@ -8923,131 +9054,28 @@ ], "type": "object" }, - "AWS::AppConfig::Deployment.Tags": { + "AWS::AppConfig::Deployment.DynamicExtensionParameters": { "additionalProperties": false, "properties": { - "Key": { - "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", - "title": "Key", - "type": "string" - }, - "Value": { - "markdownDescription": "The tag value can be up to 256 characters.", - "title": "Value", - "type": "string" - } - }, - "type": "object" - }, - "AWS::AppConfig::DeploymentStrategy": { - "additionalProperties": false, - "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ExtensionReference": { + "markdownDescription": "", + "title": "ExtensionReference", "type": "string" }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "DeploymentDurationInMinutes": { - "markdownDescription": "Total amount of time for a deployment to last.", - "title": "DeploymentDurationInMinutes", - "type": "number" - }, - "Description": { - "markdownDescription": "A description of the deployment strategy.", - "title": "Description", - "type": "string" - }, - "FinalBakeTimeInMinutes": { - "markdownDescription": "Specifies the amount of time AWS AppConfig monitors for Amazon CloudWatch alarms after the configuration has been deployed to 100% of its targets, before considering the deployment to be complete. If an alarm is triggered during this time, AWS AppConfig rolls back the deployment. You must configure permissions for AWS AppConfig to roll back based on CloudWatch alarms. For more information, see [Configuring permissions for rollback based on Amazon CloudWatch alarms](https://docs.aws.amazon.com/appconfig/latest/userguide/getting-started-with-appconfig-cloudwatch-alarms-permissions.html) in the *AWS AppConfig User Guide* .", - "title": "FinalBakeTimeInMinutes", - "type": "number" - }, - "GrowthFactor": { - "markdownDescription": "The percentage of targets to receive a deployed configuration during each interval.", - "title": "GrowthFactor", - "type": "number" - }, - "GrowthType": { - "markdownDescription": "The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:\n\n*Linear* : For this type, AWS AppConfig processes the deployment by dividing the total number of targets by the value specified for `Step percentage` . For example, a linear deployment that uses a `Step percentage` of 10 deploys the configuration to 10 percent of the hosts. After those deployments are complete, the system deploys the configuration to the next 10 percent. This continues until 100% of the targets have successfully received the configuration.\n\n*Exponential* : For this type, AWS AppConfig processes the deployment exponentially using the following formula: `G*(2^N)` . In this formula, `G` is the growth factor specified by the user and `N` is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:\n\n`2*(2^0)`\n\n`2*(2^1)`\n\n`2*(2^2)`\n\nExpressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.", - "title": "GrowthType", - "type": "string" - }, - "Name": { - "markdownDescription": "A name for the deployment strategy.", - "title": "Name", - "type": "string" - }, - "ReplicateTo": { - "markdownDescription": "Save the deployment strategy to a Systems Manager (SSM) document.", - "title": "ReplicateTo", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/AWS::AppConfig::DeploymentStrategy.Tags" - }, - "markdownDescription": "Assigns metadata to an AWS AppConfig resource. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "DeploymentDurationInMinutes", - "GrowthFactor", - "Name", - "ReplicateTo" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::AppConfig::DeploymentStrategy" - ], + "ParameterName": { + "markdownDescription": "", + "title": "ParameterName", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ParameterValue": { + "markdownDescription": "", + "title": "ParameterValue", "type": "string" } }, - "required": [ - "Type", - "Properties" - ], "type": "object" }, - "AWS::AppConfig::DeploymentStrategy.Tags": { + "AWS::AppConfig::Deployment.Tags": { "additionalProperties": false, "properties": { "Key": { @@ -9063,7 +9091,131 @@ }, "type": "object" }, - "AWS::AppConfig::Environment": { + "AWS::AppConfig::DeploymentStrategy": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DeploymentDurationInMinutes": { + "markdownDescription": "Total amount of time for a deployment to last.", + "title": "DeploymentDurationInMinutes", + "type": "number" + }, + "Description": { + "markdownDescription": "A description of the deployment strategy.", + "title": "Description", + "type": "string" + }, + "FinalBakeTimeInMinutes": { + "markdownDescription": "Specifies the amount of time AWS AppConfig monitors for Amazon CloudWatch alarms after the configuration has been deployed to 100% of its targets, before considering the deployment to be complete. If an alarm is triggered during this time, AWS AppConfig rolls back the deployment. You must configure permissions for AWS AppConfig to roll back based on CloudWatch alarms. For more information, see [Configuring permissions for rollback based on Amazon CloudWatch alarms](https://docs.aws.amazon.com/appconfig/latest/userguide/getting-started-with-appconfig-cloudwatch-alarms-permissions.html) in the *AWS AppConfig User Guide* .", + "title": "FinalBakeTimeInMinutes", + "type": "number" + }, + "GrowthFactor": { + "markdownDescription": "The percentage of targets to receive a deployed configuration during each interval.", + "title": "GrowthFactor", + "type": "number" + }, + "GrowthType": { + "markdownDescription": "The algorithm used to define how percentage grows over time. AWS AppConfig supports the following growth types:\n\n*Linear* : For this type, AWS AppConfig processes the deployment by dividing the total number of targets by the value specified for `Step percentage` . For example, a linear deployment that uses a `Step percentage` of 10 deploys the configuration to 10 percent of the hosts. After those deployments are complete, the system deploys the configuration to the next 10 percent. This continues until 100% of the targets have successfully received the configuration.\n\n*Exponential* : For this type, AWS AppConfig processes the deployment exponentially using the following formula: `G*(2^N)` . In this formula, `G` is the growth factor specified by the user and `N` is the number of steps until the configuration is deployed to all targets. For example, if you specify a growth factor of 2, then the system rolls out the configuration as follows:\n\n`2*(2^0)`\n\n`2*(2^1)`\n\n`2*(2^2)`\n\nExpressed numerically, the deployment rolls out as follows: 2% of the targets, 4% of the targets, 8% of the targets, and continues until the configuration has been deployed to all targets.", + "title": "GrowthType", + "type": "string" + }, + "Name": { + "markdownDescription": "A name for the deployment strategy.", + "title": "Name", + "type": "string" + }, + "ReplicateTo": { + "markdownDescription": "Save the deployment strategy to a Systems Manager (SSM) document.", + "title": "ReplicateTo", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::AppConfig::DeploymentStrategy.Tags" + }, + "markdownDescription": "Assigns metadata to an AWS AppConfig resource. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define. You can specify a maximum of 50 tags for a resource.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DeploymentDurationInMinutes", + "GrowthFactor", + "Name", + "ReplicateTo" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppConfig::DeploymentStrategy" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppConfig::DeploymentStrategy.Tags": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The tag value can be up to 256 characters.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::AppConfig::Environment": { "additionalProperties": false, "properties": { "Condition": { @@ -9110,7 +9262,7 @@ }, "Monitors": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Monitors" + "$ref": "#/definitions/AWS::AppConfig::Environment.Monitor" }, "markdownDescription": "Amazon CloudWatch alarms to monitor during the deployment process.", "title": "Monitors", @@ -9123,7 +9275,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/AWS::AppConfig::Environment.Tags" + "$ref": "#/definitions/Tag" }, "markdownDescription": "Metadata to assign to the environment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define.", "title": "Tags", @@ -9157,28 +9309,23 @@ ], "type": "object" }, - "AWS::AppConfig::Environment.Monitors": { + "AWS::AppConfig::Environment.Monitor": { "additionalProperties": false, "properties": { "AlarmArn": { + "markdownDescription": "Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.", + "title": "AlarmArn", "type": "string" }, "AlarmRoleArn": { + "markdownDescription": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` .", + "title": "AlarmRoleArn", "type": "string" } }, - "type": "object" - }, - "AWS::AppConfig::Environment.Tags": { - "additionalProperties": false, - "properties": { - "Key": { - "type": "string" - }, - "Value": { - "type": "string" - } - }, + "required": [ + "AlarmArn" + ], "type": "object" }, "AWS::AppConfig::Extension": { @@ -9291,6 +9438,11 @@ "title": "Description", "type": "string" }, + "Dynamic": { + "markdownDescription": "Indicates whether this parameter's value can be supplied at the extension's action point instead of during extension association. Dynamic parameters can't be marked `Required` .", + "title": "Dynamic", + "type": "boolean" + }, "Required": { "markdownDescription": "A parameter value must be specified in the extension association.", "title": "Required", @@ -12244,6 +12396,135 @@ ], "type": "object" }, + "AWS::AppIntegrations::Application": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicationSourceConfig": { + "$ref": "#/definitions/AWS::AppIntegrations::Application.ApplicationSourceConfig", + "markdownDescription": "The configuration for where the application should be loaded from.", + "title": "ApplicationSourceConfig" + }, + "Description": { + "markdownDescription": "The description of the application.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the application.", + "title": "Name", + "type": "string" + }, + "Namespace": { + "markdownDescription": "The namespace of the application.", + "title": "Namespace", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ApplicationSourceConfig", + "Description", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::AppIntegrations::Application" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::AppIntegrations::Application.ApplicationSourceConfig": { + "additionalProperties": false, + "properties": { + "ExternalUrlConfig": { + "$ref": "#/definitions/AWS::AppIntegrations::Application.ExternalUrlConfig", + "markdownDescription": "The external URL source for the application.", + "title": "ExternalUrlConfig" + } + }, + "required": [ + "ExternalUrlConfig" + ], + "type": "object" + }, + "AWS::AppIntegrations::Application.ExternalUrlConfig": { + "additionalProperties": false, + "properties": { + "AccessUrl": { + "markdownDescription": "The URL to access the application.", + "title": "AccessUrl", + "type": "string" + }, + "ApprovedOrigins": { + "items": { + "type": "string" + }, + "markdownDescription": "Additional URLs to allow list if different than the access URL.", + "title": "ApprovedOrigins", + "type": "array" + } + }, + "required": [ + "AccessUrl", + "ApprovedOrigins" + ], + "type": "object" + }, "AWS::AppIntegrations::DataIntegration": { "additionalProperties": false, "properties": { @@ -12286,7 +12567,7 @@ }, "FileConfiguration": { "$ref": "#/definitions/AWS::AppIntegrations::DataIntegration.FileConfiguration", - "markdownDescription": "", + "markdownDescription": "The configuration for what files should be pulled from the source.", "title": "FileConfiguration" }, "KmsKey": { @@ -12300,7 +12581,7 @@ "type": "string" }, "ObjectConfiguration": { - "markdownDescription": "", + "markdownDescription": "The configuration for what data should be pulled from the source.", "title": "ObjectConfiguration", "type": "object" }, @@ -12355,7 +12636,7 @@ "additionalProperties": false, "properties": { "Filters": { - "markdownDescription": "", + "markdownDescription": "Restrictions for what files should be pulled from the source.", "title": "Filters", "type": "object" }, @@ -12363,7 +12644,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Identifiers for the source folders to pull all files from recursively.", "title": "Folders", "type": "array" } @@ -18526,6 +18807,11 @@ "title": "AtRestEncryptionEnabled", "type": "boolean" }, + "HealthMetricsConfig": { + "markdownDescription": "Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:\n\n- *NetworkBandwidthOutAllowanceExceeded* : The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.\n- *EngineCPUUtilization* : The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.\n\nMetrics will be recorded by API ID. You can set the value to `ENABLED` or `DISABLED` .", + "title": "HealthMetricsConfig", + "type": "string" + }, "TransitEncryptionEnabled": { "markdownDescription": "Transit encryption flag when connecting to cache. You cannot update this setting after creation.", "title": "TransitEncryptionEnabled", @@ -18721,6 +19007,11 @@ "markdownDescription": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", "title": "LambdaConfig" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", + "title": "MetricsConfig", + "type": "string" + }, "Name": { "markdownDescription": "Friendly name for you to identify your AppSync data source after creation.", "title": "Name", @@ -19406,6 +19697,16 @@ "title": "AuthenticationType", "type": "string" }, + "EnhancedMetricsConfig": { + "$ref": "#/definitions/AWS::AppSync::GraphQLApi.EnhancedMetricsConfig", + "markdownDescription": "Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.\n\nEnhanced metrics can be configured at the resolver, data source, and operation levels. For more information, see [Monitoring and logging](https://docs.aws.amazon.com//appsync/latest/devguide/monitoring.html#cw-metrics) in the *AWS AppSync User Guide* .", + "title": "EnhancedMetricsConfig" + }, + "EnvironmentVariables": { + "markdownDescription": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .\n\n*Pattern* : `^[A-Za-z]+\\\\w*$\\\\`\n\n*Minimum* : 2\n\n*Maximum* : 64", + "title": "EnvironmentVariables", + "type": "object" + }, "IntrospectionConfig": { "markdownDescription": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "title": "IntrospectionConfig", @@ -19552,6 +19853,32 @@ }, "type": "object" }, + "AWS::AppSync::GraphQLApi.EnhancedMetricsConfig": { + "additionalProperties": false, + "properties": { + "DataSourceLevelMetricsBehavior": { + "markdownDescription": "Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:\n\n- *Requests* : The number of invocations that occured during a request.\n- *Latency* : The time to complete a data source invocation.\n- *Errors* : The number of errors that occurred during a data source invocation.\n\nThese metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. `dataSourceLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_DATA_SOURCE_METRICS` : Records and emits metric data for all data sources in the request.\n- `PER_DATA_SOURCE_METRICS` : Records and emits metric data for data sources that have the `MetricsConfig` value set to `ENABLED` .", + "title": "DataSourceLevelMetricsBehavior", + "type": "string" + }, + "OperationLevelMetricsConfig": { + "markdownDescription": "Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:\n\n- *Requests* : The number of times a specified GraphQL operation was called.\n- *GraphQL errors* : The number of GraphQL errors that occurred during a specified GraphQL operation.\n\nMetrics will be recorded by API ID and operation name. You can set the value to `ENABLED` or `DISABLED` .", + "title": "OperationLevelMetricsConfig", + "type": "string" + }, + "ResolverLevelMetricsBehavior": { + "markdownDescription": "Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:\n\n- *GraphQL errors* : The number of GraphQL errors that occurred.\n- *Requests* : The number of invocations that occurred during a request.\n- *Latency* : The time to complete a resolver invocation.\n- *Cache hits* : The number of cache hits during a request.\n- *Cache misses* : The number of cache misses during a request.\n\nThese metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. `resolverLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_RESOLVER_METRICS` : Records and emits metric data for all resolvers in the request.\n- `PER_RESOLVER_METRICS` : Records and emits metric data for resolvers that have the `MetricsConfig` value set to `ENABLED` .", + "title": "ResolverLevelMetricsBehavior", + "type": "string" + } + }, + "required": [ + "DataSourceLevelMetricsBehavior", + "OperationLevelMetricsConfig", + "ResolverLevelMetricsBehavior" + ], + "type": "object" + }, "AWS::AppSync::GraphQLApi.LambdaAuthorizerConfig": { "additionalProperties": false, "properties": { @@ -19798,6 +20125,11 @@ "title": "MaxBatchSize", "type": "number" }, + "MetricsConfig": { + "markdownDescription": "Enables or disables enhanced resolver metrics for specified resolvers. Note that `MetricsConfig` won't be used unless the `resolverLevelMetricsBehavior` value is set to `PER_RESOLVER_METRICS` . If the `resolverLevelMetricsBehavior` is set to `FULL_REQUEST_RESOLVER_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.", + "title": "MetricsConfig", + "type": "string" + }, "PipelineConfig": { "$ref": "#/definitions/AWS::AppSync::Resolver.PipelineConfig", "markdownDescription": "Functions linked with the pipeline resolver.", @@ -20093,7 +20425,7 @@ "type": "number" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20279,7 +20611,7 @@ "type": "string" }, "ResourceId": { - "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/default/sample-webapp` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", + "markdownDescription": "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier.\n\n- ECS service - The resource type is `service` and the unique identifier is the cluster name and service name. Example: `service/my-cluster/my-service` .\n- Spot Fleet - The resource type is `spot-fleet-request` and the unique identifier is the Spot Fleet request ID. Example: `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE` .\n- EMR cluster - The resource type is `instancegroup` and the unique identifier is the cluster ID and instance group ID. Example: `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0` .\n- AppStream 2.0 fleet - The resource type is `fleet` and the unique identifier is the fleet name. Example: `fleet/sample-fleet` .\n- DynamoDB table - The resource type is `table` and the unique identifier is the table name. Example: `table/my-table` .\n- DynamoDB global secondary index - The resource type is `index` and the unique identifier is the index name. Example: `table/my-table/index/my-table-index` .\n- Aurora DB cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:my-db-cluster` .\n- SageMaker endpoint variant - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- Custom resources are not supported with a resource type. This parameter must specify the `OutputValue` from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our [GitHub repository](https://docs.aws.amazon.com/https://github.com/aws/aws-auto-scaling-custom-resource) .\n- Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE` .\n- Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: `arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE` .\n- Lambda provisioned concurrency - The resource type is `function` and the unique identifier is the function name with a function version or alias name suffix that is not `$LATEST` . Example: `function:my-function:prod` or `function:my-function:1` .\n- Amazon Keyspaces table - The resource type is `table` and the unique identifier is the table name. Example: `keyspace/mykeyspace/table/mytable` .\n- Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: `arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5` .\n- Amazon ElastiCache replication group - The resource type is `replication-group` and the unique identifier is the replication group name. Example: `replication-group/mycluster` .\n- Neptune cluster - The resource type is `cluster` and the unique identifier is the cluster name. Example: `cluster:mycluster` .\n- SageMaker serverless endpoint - The resource type is `variant` and the unique identifier is the resource ID. Example: `endpoint/my-end-point/variant/KMeansClustering` .\n- SageMaker inference component - The resource type is `inference-component` and the unique identifier is the resource ID. Example: `inference-component/my-inference-component` .", "title": "ResourceId", "type": "string" }, @@ -20641,6 +20973,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AttachMissingPermission": { + "markdownDescription": "If set to true, the managed policies for SSM and CW will be attached to the instance roles if they are missing.", + "title": "AttachMissingPermission", + "type": "boolean" + }, "AutoConfigurationEnabled": { "markdownDescription": "If set to `true` , the application components will be configured with the monitoring configuration recommended by Application Insights.", "title": "AutoConfigurationEnabled", @@ -20864,6 +21201,24 @@ "title": "Logs", "type": "array" }, + "NetWeaverPrometheusExporter": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.NetWeaverPrometheusExporter", + "markdownDescription": "", + "title": "NetWeaverPrometheusExporter" + }, + "Processes": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.Process" + }, + "markdownDescription": "", + "title": "Processes", + "type": "array" + }, + "SQLServerPrometheusExporter": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.SQLServerPrometheusExporter", + "markdownDescription": "", + "title": "SQLServerPrometheusExporter" + }, "WindowsEvents": { "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.WindowsEvent" @@ -21050,6 +21405,77 @@ ], "type": "object" }, + "AWS::ApplicationInsights::Application.NetWeaverPrometheusExporter": { + "additionalProperties": false, + "properties": { + "InstanceNumbers": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "InstanceNumbers", + "type": "array" + }, + "PrometheusPort": { + "markdownDescription": "", + "title": "PrometheusPort", + "type": "string" + }, + "SAPSID": { + "markdownDescription": "", + "title": "SAPSID", + "type": "string" + } + }, + "required": [ + "InstanceNumbers", + "SAPSID" + ], + "type": "object" + }, + "AWS::ApplicationInsights::Application.Process": { + "additionalProperties": false, + "properties": { + "AlarmMetrics": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.AlarmMetric" + }, + "markdownDescription": "", + "title": "AlarmMetrics", + "type": "array" + }, + "ProcessName": { + "markdownDescription": "", + "title": "ProcessName", + "type": "string" + } + }, + "required": [ + "AlarmMetrics", + "ProcessName" + ], + "type": "object" + }, + "AWS::ApplicationInsights::Application.SQLServerPrometheusExporter": { + "additionalProperties": false, + "properties": { + "PrometheusPort": { + "markdownDescription": "", + "title": "PrometheusPort", + "type": "string" + }, + "SQLSecretName": { + "markdownDescription": "", + "title": "SQLSecretName", + "type": "string" + } + }, + "required": [ + "PrometheusPort", + "SQLSecretName" + ], + "type": "object" + }, "AWS::ApplicationInsights::Application.SubComponentConfigurationDetails": { "additionalProperties": false, "properties": { @@ -21069,6 +21495,14 @@ "title": "Logs", "type": "array" }, + "Processes": { + "items": { + "$ref": "#/definitions/AWS::ApplicationInsights::Application.Process" + }, + "markdownDescription": "", + "title": "Processes", + "type": "array" + }, "WindowsEvents": { "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.WindowsEvent" @@ -22424,6 +22858,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::AutoScaling::AutoScalingGroup.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", @@ -24724,9 +25163,6 @@ "title": "MappingTemplate", "type": "string" }, - "ModifiedAt": { - "type": "string" - }, "Name": { "markdownDescription": "Returns the descriptive name for the transformer.", "title": "Name", @@ -25040,6 +25476,11 @@ "markdownDescription": "Specifies the number of days after creation that a recovery point is moved to cold storage.", "title": "MoveToColdStorageAfterDays", "type": "number" + }, + "OptInToArchiveForSupportedResources": { + "markdownDescription": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", + "title": "OptInToArchiveForSupportedResources", + "type": "boolean" } }, "type": "object" @@ -25681,7 +26122,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the accounts to be included in the report.", + "markdownDescription": "These are the accounts to be included in the report.\n\nUse string value of `ROOT` to include all organizational units.", "title": "Accounts", "type": "array" }, @@ -25705,7 +26146,7 @@ "items": { "type": "string" }, - "markdownDescription": "These are the Regions to be included in the report.", + "markdownDescription": "These are the Regions to be included in the report.\n\nUse the wildcard as the string value to include all Regions.", "title": "Regions", "type": "array" }, @@ -25761,7 +26202,7 @@ "title": "RecoveryPointSelection" }, "RestoreTestingPlanName": { - "markdownDescription": "This is the restore testing plan name.", + "markdownDescription": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan. This cannot be changed after creation, and it must consist of only alphanumeric characters and underscores.", "title": "RestoreTestingPlanName", "type": "string" }, @@ -25932,7 +26373,7 @@ "type": "object" }, "RestoreTestingPlanName": { - "markdownDescription": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan.", + "markdownDescription": "Unique string that is the name of the restore testing plan.\n\nThe name cannot be changed after creation. The name must consist of only alphanumeric characters and underscores. Maximum length is 50.", "title": "RestoreTestingPlanName", "type": "string" }, @@ -26472,6 +26913,11 @@ "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "An object that contains the properties for the Amazon ECS resources of a job.When `ecsProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `eksProperties` , or `nodeProperties` .", + "title": "EcsProperties" + }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", @@ -26488,13 +26934,7 @@ "title": "NodeProperties" }, "Parameters": { - "additionalProperties": true, "markdownDescription": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Parameters", "type": "object" }, @@ -26522,18 +26962,12 @@ "type": "number" }, "Tags": { - "additionalProperties": true, "markdownDescription": "The tags that are applied to the job definition.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Tags", "type": "object" }, "Timeout": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.JobTimeout", + "$ref": "#/definitions/AWS::Batch::JobDefinition.Timeout", "markdownDescription": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", "title": "Timeout" }, @@ -26569,6 +27003,22 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.AuthorizationConfig": { + "additionalProperties": false, + "properties": { + "AccessPointId": { + "markdownDescription": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", + "title": "AccessPointId", + "type": "string" + }, + "Iam": { + "markdownDescription": "Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints) in the *AWS Batch User Guide* . EFS IAM authorization requires that `TransitEncryption` be `ENABLED` and that a `JobRoleArn` is specified.", + "title": "Iam", + "type": "string" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.ContainerProperties": { "additionalProperties": false, "properties": { @@ -26635,7 +27085,7 @@ }, "MountPoints": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoint" + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" }, "markdownDescription": "The mount points for data volumes in your container. This parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", "title": "MountPoints", @@ -26656,6 +27106,11 @@ "title": "ReadonlyRootFilesystem", "type": "boolean" }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, "ResourceRequirements": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" @@ -26697,7 +27152,7 @@ }, "Volumes": { "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Volume" + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" }, "markdownDescription": "A list of data volumes used in a job.", "title": "Volumes", @@ -26733,34 +27188,111 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSAuthorizationConfig": { + "AWS::Batch::JobDefinition.EcsProperties": { "additionalProperties": false, "properties": { - "AccessPointId": { + "TaskProperties": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" + }, + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "title": "TaskProperties", + "type": "array" + } + }, + "required": [ + "TaskProperties" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.EcsTaskProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerProperties" + }, + "markdownDescription": "This object is a list of containers.", + "title": "Containers", + "type": "array" + }, + "EphemeralStorage": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EphemeralStorage", + "markdownDescription": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", + "title": "EphemeralStorage" + }, + "ExecutionRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", + "title": "ExecutionRoleArn", "type": "string" }, - "Iam": { + "IpcMode": { + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` .\n\nIf `host` is specified, all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified `task` share the same IPC resources.\n\nIf `none` is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.\n\nIf no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the Docker run reference.", + "title": "IpcMode", + "type": "string" + }, + "NetworkConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", + "title": "NetworkConfiguration" + }, + "PidMode": { + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the Docker run reference.", + "title": "PidMode", "type": "string" + }, + "PlatformVersion": { + "markdownDescription": "The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "PlatformVersion", + "type": "string" + }, + "RuntimePlatform": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RuntimePlatform", + "markdownDescription": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", + "title": "RuntimePlatform" + }, + "TaskRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.\n\n> This is object is comparable to [ContainerProperties:jobRoleArn](https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerProperties.html) .", + "title": "TaskRoleArn", + "type": "string" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Volumes" + }, + "markdownDescription": "A list of volumes that are associated with the job.", + "title": "Volumes", + "type": "array" } }, "type": "object" }, - "AWS::Batch::JobDefinition.EFSVolumeConfiguration": { + "AWS::Batch::JobDefinition.EfsVolumeConfiguration": { "additionalProperties": false, "properties": { "AuthorizationConfig": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSAuthorizationConfig" + "$ref": "#/definitions/AWS::Batch::JobDefinition.AuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon EFS file system.", + "title": "AuthorizationConfig" }, "FileSystemId": { + "markdownDescription": "The Amazon EFS file system ID to use.", + "title": "FileSystemId", "type": "string" }, "RootDirectory": { + "markdownDescription": "The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying `/` has the same effect as omitting this parameter. The maximum length is 4,096 characters.\n\n> If an EFS access point is specified in the `authorizationConfig` , the root directory parameter must either be omitted or set to `/` , which enforces the path set on the Amazon EFS access point.", + "title": "RootDirectory", "type": "string" }, "TransitEncryption": { + "markdownDescription": "Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Encrypting data in transit](https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryption", "type": "string" }, "TransitEncryptionPort": { + "markdownDescription": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* .", + "title": "TransitEncryptionPort", "type": "number" } }, @@ -26858,24 +27390,12 @@ "additionalProperties": false, "properties": { "Limits": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to reserve for the container. The values vary based on the `name` that's specified. Resources can be requested using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> To maximize your resource utilization, provide your jobs with as much memory as possible for the specific instance type that you are using. To learn how, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that's reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both places, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that's reserved for the container. Values must be a whole integer. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both places, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Limits", "type": "object" }, "Requests": { - "additionalProperties": true, "markdownDescription": "The type and quantity of the resources to request for the container. The values vary based on the `name` that's specified. Resources can be requested by using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that are reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that are reserved for the container. Values must be a whole integer. `nvidia.com/gpu` can be specified in `limits` , `requests` , or both. If `nvidia.com/gpu` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Requests", "type": "object" } @@ -26961,56 +27481,11 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.EksMetadata": { - "additionalProperties": false, - "properties": { - "Labels": { - "additionalProperties": true, - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.EksPodProperties": { - "additionalProperties": false, - "properties": { - "Containers": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" - }, - "type": "array" - }, - "DnsPolicy": { - "type": "string" - }, - "HostNetwork": { - "type": "boolean" - }, - "Metadata": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksMetadata" - }, - "ServiceAccountName": { - "type": "string" - }, - "Volumes": { - "items": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.EksProperties": { "additionalProperties": false, "properties": { "PodProperties": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EksPodProperties", + "$ref": "#/definitions/AWS::Batch::JobDefinition.PodProperties", "markdownDescription": "The properties for the Kubernetes pod resources of a job.", "title": "PodProperties" } @@ -27135,24 +27610,6 @@ }, "type": "object" }, - "AWS::Batch::JobDefinition.Host": { - "additionalProperties": false, - "properties": { - "SourcePath": { - "type": "string" - } - }, - "type": "object" - }, - "AWS::Batch::JobDefinition.JobTimeout": { - "additionalProperties": false, - "properties": { - "AttemptDurationSeconds": { - "type": "number" - } - }, - "type": "object" - }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -27204,13 +27661,7 @@ "type": "string" }, "Options": { - "additionalProperties": true, "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "type": "string" - } - }, "title": "Options", "type": "object" }, @@ -27228,16 +27679,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.MountPoint": { + "AWS::Batch::JobDefinition.Metadata": { + "additionalProperties": false, + "properties": { + "Labels": { + "markdownDescription": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.", + "title": "Labels", + "type": "object" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.MountPoints": { "additionalProperties": false, "properties": { "ContainerPath": { + "markdownDescription": "The path on the container where the host volume is mounted.", + "title": "ContainerPath", "type": "string" }, "ReadOnly": { + "markdownDescription": "If this value is `true` , the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is `false` .", + "title": "ReadOnly", "type": "boolean" }, "SourceVolume": { + "markdownDescription": "The name of the volume to mount.", + "title": "SourceVolume", "type": "string" } }, @@ -27291,6 +27759,19 @@ "markdownDescription": "The container details for the node range.", "title": "Container" }, + "EcsProperties": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsProperties", + "markdownDescription": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "title": "EcsProperties" + }, + "InstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", + "title": "InstanceTypes", + "type": "array" + }, "TargetNodes": { "markdownDescription": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties.", "title": "TargetNodes", @@ -27302,6 +27783,75 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.PodProperties": { + "additionalProperties": false, + "properties": { + "Containers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "title": "Containers", + "type": "array" + }, + "DnsPolicy": { + "markdownDescription": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "title": "DnsPolicy", + "type": "string" + }, + "HostNetwork": { + "markdownDescription": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "title": "HostNetwork", + "type": "boolean" + }, + "InitContainers": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" + }, + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "title": "InitContainers", + "type": "array" + }, + "Metadata": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Metadata", + "markdownDescription": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "title": "Metadata" + }, + "ServiceAccountName": { + "markdownDescription": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "title": "ServiceAccountName", + "type": "string" + }, + "ShareProcessNamespace": { + "markdownDescription": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", + "title": "ShareProcessNamespace", + "type": "boolean" + }, + "Volumes": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.EksVolume" + }, + "markdownDescription": "Specifies the volumes for a job definition that uses Amazon EKS resources.", + "title": "Volumes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.RepositoryCredentials": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials.", + "title": "CredentialsParameter", + "type": "string" + } + }, + "required": [ + "CredentialsParameter" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.ResourceRequirement": { "additionalProperties": false, "properties": { @@ -27373,6 +27923,147 @@ ], "type": "object" }, + "AWS::Batch::JobDefinition.TaskContainerDependency": { + "additionalProperties": false, + "properties": { + "Condition": { + "markdownDescription": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a zero status. This condition can't be set on an essential container.", + "title": "Condition", + "type": "string" + }, + "ContainerName": { + "markdownDescription": "A unique identifier for the container.", + "title": "ContainerName", + "type": "string" + } + }, + "required": [ + "Condition", + "ContainerName" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.TaskContainerProperties": { + "additionalProperties": false, + "properties": { + "Command": { + "items": { + "type": "string" + }, + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) .", + "title": "Command", + "type": "array" + }, + "DependsOn": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.TaskContainerDependency" + }, + "markdownDescription": "A list of containers that this container depends on.", + "title": "DependsOn", + "type": "array" + }, + "Environment": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Environment" + }, + "markdownDescription": "The environment variables to pass to a container. This parameter maps to Env inthe [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with `AWS_BATCH` . This naming convention is reserved for variables that AWS Batch sets.", + "title": "Environment", + "type": "array" + }, + "Essential": { + "markdownDescription": "If the essential parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "Essential", + "type": "boolean" + }, + "Image": { + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `repository-url/image:tag` or `repository-url/image@digest` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of the [*docker run*](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "title": "Image", + "type": "string" + }, + "LinuxParameters": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LinuxParameters", + "markdownDescription": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .", + "title": "LinuxParameters" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the *Docker documentation* .\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "LogConfiguration" + }, + "MountPoints": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.MountPoints" + }, + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the [--volume](https://docs.aws.amazon.com/) option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "title": "MountPoints", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a container. The name can be used as a unique identifier to target your `dependsOn` and `Overrides` objects.", + "title": "Name", + "type": "string" + }, + "Privileged": { + "markdownDescription": "When this parameter is `true` , the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on Fargate.", + "title": "Privileged", + "type": "boolean" + }, + "ReadonlyRootFilesystem": { + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "title": "ReadonlyRootFilesystem", + "type": "boolean" + }, + "RepositoryCredentials": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.RepositoryCredentials", + "markdownDescription": "The private repository authentication credentials to use.", + "title": "RepositoryCredentials" + }, + "ResourceRequirements": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ResourceRequirement" + }, + "markdownDescription": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "title": "ResourceRequirements", + "type": "array" + }, + "Secrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Secret" + }, + "markdownDescription": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the Amazon Elastic Container Service Developer Guide.", + "title": "Secrets", + "type": "array" + }, + "Ulimits": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.Ulimit" + }, + "markdownDescription": "A list of `ulimits` to set in the container. If a `ulimit` value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nAmazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The `nofile` resource limit sets a restriction on the number of open files that a container can use. The default `nofile` soft limit is `1024` and the default hard limit is `65535` .\n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version `--format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", + "title": "Ulimits", + "type": "array" + }, + "User": { + "markdownDescription": "The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the `root user (UID 0)` . We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gi`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Image" + ], + "type": "object" + }, + "AWS::Batch::JobDefinition.Timeout": { + "additionalProperties": false, + "properties": { + "AttemptDurationSeconds": { + "markdownDescription": "The job timeout time (in seconds) that's measured from the job attempt's `startedAt` timestamp. After this time passes, AWS Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60 seconds.\n\nFor array jobs, the timeout applies to the child jobs, not to the parent array job.\n\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.", + "title": "AttemptDurationSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::Batch::JobDefinition.Tmpfs": { "additionalProperties": false, "properties": { @@ -27427,16 +28118,33 @@ ], "type": "object" }, - "AWS::Batch::JobDefinition.Volume": { + "AWS::Batch::JobDefinition.Volumes": { "additionalProperties": false, "properties": { "EfsVolumeConfiguration": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.EFSVolumeConfiguration" + "$ref": "#/definitions/AWS::Batch::JobDefinition.EfsVolumeConfiguration", + "markdownDescription": "This is used when you're using an Amazon Elastic File System file system for job storage. For more information, see [Amazon EFS Volumes](https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the *AWS Batch User Guide* .", + "title": "EfsVolumeConfiguration" }, "Host": { - "$ref": "#/definitions/AWS::Batch::JobDefinition.Host" + "$ref": "#/definitions/AWS::Batch::JobDefinition.VolumesHost", + "markdownDescription": "The contents of the `host` parameter determine whether your data volume persists on the host container instance and where it's stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", + "title": "Host" }, "Name": { + "markdownDescription": "The name of the volume. It can be up to 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). This name is referenced in the `sourceVolume` parameter of container definition `mountPoints` .", + "title": "Name", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Batch::JobDefinition.VolumesHost": { + "additionalProperties": false, + "properties": { + "SourcePath": { + "markdownDescription": "The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.\n\n> This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs.", + "title": "SourcePath", "type": "string" } }, @@ -27490,6 +28198,14 @@ "title": "JobQueueName", "type": "string" }, + "JobStateTimeLimitActions": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobQueue.JobStateTimeLimitAction" + }, + "markdownDescription": "The set of actions that AWS Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. AWS Batch will perform each action after `maxTimeSeconds` has passed.", + "title": "JobStateTimeLimitActions", + "type": "array" + }, "Priority": { "markdownDescription": "The priority of the job queue. Job queues with a higher priority (or a higher integer value for the `priority` parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of `10` is given scheduling preference over a job queue with a priority value of `1` . All of the compute environments must be either EC2 ( `EC2` or `SPOT` ) or Fargate ( `FARGATE` or `FARGATE_SPOT` ); EC2 and Fargate compute environments can't be mixed.", "title": "Priority", @@ -27564,6 +28280,38 @@ ], "type": "object" }, + "AWS::Batch::JobQueue.JobStateTimeLimitAction": { + "additionalProperties": false, + "properties": { + "Action": { + "markdownDescription": "The action to take when a job is at the head of the job queue in the specified state for the specified period of time. The only supported value is `CANCEL` , which will cancel the job.", + "title": "Action", + "type": "string" + }, + "MaxTimeSeconds": { + "markdownDescription": "The approximate amount of time, in seconds, that must pass with the job in the specified state before the action is taken. The minimum value is 600 (10 minutes) and the maximum value is 86,400 (24 hours).", + "title": "MaxTimeSeconds", + "type": "number" + }, + "Reason": { + "markdownDescription": "The reason to log for the action being taken.", + "title": "Reason", + "type": "string" + }, + "State": { + "markdownDescription": "The state of the job needed to trigger the action. The only supported value is `RUNNABLE` .", + "title": "State", + "type": "string" + } + }, + "required": [ + "Action", + "MaxTimeSeconds", + "Reason", + "State" + ], + "type": "object" + }, "AWS::Batch::SchedulingPolicy": { "additionalProperties": false, "properties": { @@ -29482,6 +30230,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AutoScalingSpecifications": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSpecification", + "markdownDescription": "The optional auto scaling capacity settings for a table in provisioned capacity mode.", + "title": "AutoScalingSpecifications" + }, "BillingMode": { "$ref": "#/definitions/AWS::Cassandra::Table.BillingMode", "markdownDescription": "The billing mode for the table, which determines how you'll be charged for reads and writes:\n\n- *On-demand mode* (default) - You pay based on the actual reads and writes your application performs.\n- *Provisioned mode* - Lets you specify the number of reads and writes per second that you need for your application.\n\nIf you don't specify a value for this property, then the table will use on-demand mode.", @@ -29536,6 +30289,14 @@ "title": "RegularColumns", "type": "array" }, + "ReplicaSpecifications": { + "items": { + "$ref": "#/definitions/AWS::Cassandra::Table.ReplicaSpecification" + }, + "markdownDescription": "The AWS Region specific settings of a multi-Region table.\n\nFor a multi-Region table, you can configure the table's read capacity differently per AWS Region. You can do this by configuring the following parameters.\n\n- `region` : The Region where these settings are applied. (Required)\n- `readCapacityUnits` : The provisioned read capacity units. (Optional)\n- `readCapacityAutoScaling` : The read capacity auto scaling settings for the table. (Optional)", + "title": "ReplicaSpecifications", + "type": "array" + }, "TableName": { "markdownDescription": "The name of the table to be created. The table name is case sensitive. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the table name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> If you specify a name, you can't perform updates that require replacing this resource. You can perform updates that require no interruption or some interruption. If you must replace the resource, specify a new name. \n\n*Length constraints:* Minimum length of 3. Maximum length of 255.\n\n*Pattern:* `^[a-zA-Z0-9][a-zA-Z0-9_]{1,47}$`", "title": "TableName", @@ -29577,6 +30338,48 @@ ], "type": "object" }, + "AWS::Cassandra::Table.AutoScalingSetting": { + "additionalProperties": false, + "properties": { + "AutoScalingDisabled": { + "markdownDescription": "This optional parameter enables auto scaling for the table if set to `false` .", + "title": "AutoScalingDisabled", + "type": "boolean" + }, + "MaximumUnits": { + "markdownDescription": "Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MaximumUnits", + "type": "number" + }, + "MinimumUnits": { + "markdownDescription": "The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).", + "title": "MinimumUnits", + "type": "number" + }, + "ScalingPolicy": { + "$ref": "#/definitions/AWS::Cassandra::Table.ScalingPolicy", + "markdownDescription": "Amazon Keyspaces supports the `target tracking` auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.", + "title": "ScalingPolicy" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.AutoScalingSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's read capacity.", + "title": "ReadCapacityAutoScaling" + }, + "WriteCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The auto scaling settings for the table's write capacity.", + "title": "WriteCapacityAutoScaling" + } + }, + "type": "object" + }, "AWS::Cassandra::Table.BillingMode": { "additionalProperties": false, "properties": { @@ -29674,6 +30477,70 @@ ], "type": "object" }, + "AWS::Cassandra::Table.ReplicaSpecification": { + "additionalProperties": false, + "properties": { + "ReadCapacityAutoScaling": { + "$ref": "#/definitions/AWS::Cassandra::Table.AutoScalingSetting", + "markdownDescription": "The read capacity auto scaling settings for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityAutoScaling" + }, + "ReadCapacityUnits": { + "markdownDescription": "The provisioned read capacity units for the multi-Region table in the specified AWS Region.", + "title": "ReadCapacityUnits", + "type": "number" + }, + "Region": { + "markdownDescription": "The AWS Region.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "Region" + ], + "type": "object" + }, + "AWS::Cassandra::Table.ScalingPolicy": { + "additionalProperties": false, + "properties": { + "TargetTrackingScalingPolicyConfiguration": { + "$ref": "#/definitions/AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration", + "markdownDescription": "The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity.", + "title": "TargetTrackingScalingPolicyConfiguration" + } + }, + "type": "object" + }, + "AWS::Cassandra::Table.TargetTrackingScalingPolicyConfiguration": { + "additionalProperties": false, + "properties": { + "DisableScaleIn": { + "markdownDescription": "Specifies if `scale-in` is enabled.\n\nWhen auto scaling automatically decreases capacity for a table, the table *scales in* . When scaling policies are set, they can't scale in the table lower than its minimum capacity.", + "title": "DisableScaleIn", + "type": "boolean" + }, + "ScaleInCooldown": { + "markdownDescription": "Specifies a `scale-in` cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleInCooldown", + "type": "number" + }, + "ScaleOutCooldown": { + "markdownDescription": "Specifies a scale out cool down period.\n\nA cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.", + "title": "ScaleOutCooldown", + "type": "number" + }, + "TargetValue": { + "markdownDescription": "Specifies the target value for the target tracking auto scaling policy.\n\nAmazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define `targetValue` as a percentage. An `integer` between 20 and 90.", + "title": "TargetValue", + "type": "number" + } + }, + "required": [ + "TargetValue" + ], + "type": "object" + }, "AWS::CertificateManager::Account": { "additionalProperties": false, "properties": { @@ -33288,7 +34155,7 @@ "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and .15.", + "markdownDescription": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution.", "title": "Weight", "type": "number" } @@ -36246,7 +37113,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36569,7 +37436,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type.\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs. \n\nThe `resources.ARN` field can be set one of the following.\n\nIf resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSM::ManagedNode` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats:\n\n- `arn::ssm:::managed-instance/`\n- `arn::ec2:::instance/`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -37258,6 +38125,9 @@ "AWS::CloudWatch::AnomalyDetector.SingleMetricAnomalyDetector": { "additionalProperties": false, "properties": { + "AccountId": { + "type": "string" + }, "Dimensions": { "items": { "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.Dimension" @@ -37838,6 +38708,164 @@ ], "type": "object" }, + "AWS::CodeArtifact::PackageGroup": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ContactInfo": { + "markdownDescription": "The contact information of the package group.", + "title": "ContactInfo", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the package group.", + "title": "Description", + "type": "string" + }, + "DomainName": { + "markdownDescription": "The domain that contains the package group.", + "title": "DomainName", + "type": "string" + }, + "DomainOwner": { + "markdownDescription": "The 12-digit account number of the AWS account that owns the domain. It does not include dashes or spaces.", + "title": "DomainOwner", + "type": "string" + }, + "OriginConfiguration": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.OriginConfiguration", + "markdownDescription": "Details about the package origin configuration of a package group.", + "title": "OriginConfiguration" + }, + "Pattern": { + "markdownDescription": "The pattern of the package group. The pattern determines which packages are associated with the package group.", + "title": "Pattern", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags to be applied to the package group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainName", + "Pattern" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeArtifact::PackageGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.OriginConfiguration": { + "additionalProperties": false, + "properties": { + "Restrictions": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.Restrictions", + "markdownDescription": "The origin configuration settings that determine how package versions can enter repositories.", + "title": "Restrictions" + } + }, + "required": [ + "Restrictions" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.RestrictionType": { + "additionalProperties": false, + "properties": { + "Repositories": { + "items": { + "type": "string" + }, + "markdownDescription": "The repositories to add to the allowed repositories list. The allowed repositories list is used when the `RestrictionMode` is set to `ALLOW_SPECIFIC_REPOSITORIES` .", + "title": "Repositories", + "type": "array" + }, + "RestrictionMode": { + "markdownDescription": "The package group origin restriction setting. When the value is `INHERIT` , the value is set to the value of the first parent package group which does not have a value of `INHERIT` .", + "title": "RestrictionMode", + "type": "string" + } + }, + "required": [ + "RestrictionMode" + ], + "type": "object" + }, + "AWS::CodeArtifact::PackageGroup.Restrictions": { + "additionalProperties": false, + "properties": { + "ExternalUpstream": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for external, upstream repositories.", + "title": "ExternalUpstream" + }, + "InternalUpstream": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for internal, upstream repositories.", + "title": "InternalUpstream" + }, + "Publish": { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup.RestrictionType", + "markdownDescription": "The package group origin restriction setting for publishing packages.", + "title": "Publish" + } + }, + "type": "object" + }, "AWS::CodeArtifact::Repository": { "additionalProperties": false, "properties": { @@ -37890,7 +38918,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of external connections associated with the repository.", + "markdownDescription": "An array of external connections associated with the repository. For more information, see [Supported external connection repositories](https://docs.aws.amazon.com/codeartifact/latest/ug/external-connection.html#supported-public-repositories) in the *CodeArtifact user guide* .", "title": "ExternalConnections", "type": "array" }, @@ -37948,6 +38976,92 @@ ], "type": "object" }, + "AWS::CodeBuild::Fleet": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaseCapacity": { + "markdownDescription": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", + "title": "BaseCapacity", + "type": "number" + }, + "ComputeType": { + "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "title": "ComputeType", + "type": "string" + }, + "EnvironmentType": { + "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "title": "EnvironmentType", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the compute fleet.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeBuild::Fleet" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::CodeBuild::Project": { "additionalProperties": false, "properties": { @@ -38282,6 +39396,9 @@ "title": "EnvironmentVariables", "type": "array" }, + "Fleet": { + "$ref": "#/definitions/AWS::CodeBuild::Project.ProjectFleet" + }, "Image": { "markdownDescription": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", "title": "Image", @@ -38309,7 +39426,9 @@ } }, "required": [ - "Image" + "ComputeType", + "Image", + "Type" ], "type": "object" }, @@ -38685,7 +39804,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", + "markdownDescription": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.", "title": "Type", "type": "string" } @@ -40530,6 +41649,11 @@ "title": "DisableInboundStageTransitions", "type": "array" }, + "ExecutionMode": { + "markdownDescription": "The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.", + "title": "ExecutionMode", + "type": "string" + }, "Name": { "markdownDescription": "The name of the pipeline.", "title": "Name", @@ -40663,6 +41787,11 @@ "markdownDescription": "The order in which actions are run.", "title": "RunOrder", "type": "number" + }, + "TimeoutInMinutes": { + "markdownDescription": "A timeout duration in minutes that can be applied against the ActionType\u2019s default timeout value specified in [Quotas for AWS CodePipeline](https://docs.aws.amazon.com/codepipeline/latest/userguide/limits.html) . This attribute is available only to the manual approval ActionType.", + "title": "TimeoutInMinutes", + "type": "number" } }, "required": [ @@ -40788,9 +41917,39 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitBranchFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitConfiguration": { "additionalProperties": false, "properties": { + "PullRequest": { + "items": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPullRequestFilter" + }, + "markdownDescription": "The field where the repository event that will start the pipeline is specified as pull requests.", + "title": "PullRequest", + "type": "array" + }, "Push": { "items": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitPushFilter" @@ -40810,9 +41969,65 @@ ], "type": "object" }, + "AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria": { + "additionalProperties": false, + "properties": { + "Excludes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.", + "title": "Excludes", + "type": "array" + }, + "Includes": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.", + "title": "Includes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::CodePipeline::Pipeline.GitPullRequestFilter": { + "additionalProperties": false, + "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the pull request trigger configuration.", + "title": "Branches" + }, + "Events": { + "items": { + "type": "string" + }, + "markdownDescription": "The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.", + "title": "Events", + "type": "array" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the pull request trigger configuration.", + "title": "FilePaths" + } + }, + "type": "object" + }, "AWS::CodePipeline::Pipeline.GitPushFilter": { "additionalProperties": false, "properties": { + "Branches": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitBranchFilterCriteria", + "markdownDescription": "The field that specifies to filter on branches for the push trigger configuration.", + "title": "Branches" + }, + "FilePaths": { + "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitFilePathFilterCriteria", + "markdownDescription": "The field that specifies to filter on file paths for the push trigger configuration.", + "title": "FilePaths" + }, "Tags": { "$ref": "#/definitions/AWS::CodePipeline::Pipeline.GitTagFilterCriteria", "markdownDescription": "The field that contains the details for the Git tags trigger configuration.", @@ -41819,6 +43034,10 @@ "type": "boolean" } }, + "required": [ + "ClientId", + "ProviderName" + ], "type": "object" }, "AWS::Cognito::IdentityPool.CognitoStreams": { @@ -41985,12 +43204,24 @@ "type": "string" }, "RoleMappings": { + "additionalProperties": false, "markdownDescription": "How users for a specific identity provider are mapped to roles. This is a string to the `RoleMapping` object map. The string identifies the identity provider. For example: `graph.facebook.com` or `cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id` .\n\nIf the `IdentityProvider` field isn't provided in this object, the string is used as the identity provider name.\n\nFor more information, see the [RoleMapping property](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-identitypoolroleattachment-rolemapping.html) .", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::Cognito::IdentityPoolRoleAttachment.RoleMapping" + } + }, "title": "RoleMappings", "type": "object" }, "Roles": { + "additionalProperties": true, "markdownDescription": "The map of the roles associated with this pool. For a given role, the key is either \"authenticated\" or \"unauthenticated\". The value is the role ARN.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, "title": "Roles", "type": "object" } @@ -42268,7 +43499,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -43337,7 +44568,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", + "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", "title": "ProviderDetails", "type": "object" }, @@ -48343,6 +49574,22 @@ "title": "AssignContactCategoryActions", "type": "array" }, + "CreateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.CreateCaseAction" + }, + "markdownDescription": "", + "title": "CreateCaseActions", + "type": "array" + }, + "EndAssociatedTasksActions": { + "items": { + "type": "object" + }, + "markdownDescription": "", + "title": "EndAssociatedTasksActions", + "type": "array" + }, "EventBridgeActions": { "items": { "$ref": "#/definitions/AWS::Connect::Rule.EventBridgeAction" @@ -48366,8 +49613,39 @@ "markdownDescription": "Information about the task action. This field is required if `TriggerEventSource` is one of the following values: `OnZendeskTicketCreate` | `OnZendeskTicketStatusUpdate` | `OnSalesforceCaseCreate`", "title": "TaskActions", "type": "array" + }, + "UpdateCaseActions": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.UpdateCaseAction" + }, + "markdownDescription": "", + "title": "UpdateCaseActions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Connect::Rule.CreateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + }, + "TemplateId": { + "markdownDescription": "", + "title": "TemplateId", + "type": "string" } }, + "required": [ + "Fields", + "TemplateId" + ], "type": "object" }, "AWS::Connect::Rule.EventBridgeAction": { @@ -48384,6 +49662,52 @@ ], "type": "object" }, + "AWS::Connect::Rule.Field": { + "additionalProperties": false, + "properties": { + "Id": { + "markdownDescription": "", + "title": "Id", + "type": "string" + }, + "Value": { + "$ref": "#/definitions/AWS::Connect::Rule.FieldValue", + "markdownDescription": "", + "title": "Value" + } + }, + "required": [ + "Id", + "Value" + ], + "type": "object" + }, + "AWS::Connect::Rule.FieldValue": { + "additionalProperties": false, + "properties": { + "BooleanValue": { + "markdownDescription": "", + "title": "BooleanValue", + "type": "boolean" + }, + "DoubleValue": { + "markdownDescription": "", + "title": "DoubleValue", + "type": "number" + }, + "EmptyValue": { + "markdownDescription": "", + "title": "EmptyValue", + "type": "object" + }, + "StringValue": { + "markdownDescription": "", + "title": "StringValue", + "type": "string" + } + }, + "type": "object" + }, "AWS::Connect::Rule.NotificationRecipientType": { "additionalProperties": false, "properties": { @@ -48521,6 +49845,23 @@ ], "type": "object" }, + "AWS::Connect::Rule.UpdateCaseAction": { + "additionalProperties": false, + "properties": { + "Fields": { + "items": { + "$ref": "#/definitions/AWS::Connect::Rule.Field" + }, + "markdownDescription": "", + "title": "Fields", + "type": "array" + } + }, + "required": [ + "Fields" + ], + "type": "object" + }, "AWS::Connect::SecurityKey": { "additionalProperties": false, "properties": { @@ -48629,6 +49970,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AllowedAccessControlHierarchyGroupId": { + "markdownDescription": "The identifier of the hierarchy group that a security profile uses to restrict access to resources in Amazon Connect.", + "title": "AllowedAccessControlHierarchyGroupId", + "type": "string" + }, "AllowedAccessControlTags": { "items": { "$ref": "#/definitions/Tag" @@ -48637,11 +49983,27 @@ "title": "AllowedAccessControlTags", "type": "array" }, + "Applications": { + "items": { + "$ref": "#/definitions/AWS::Connect::SecurityProfile.Application" + }, + "markdownDescription": "", + "title": "Applications", + "type": "array" + }, "Description": { "markdownDescription": "The description of the security profile.", "title": "Description", "type": "string" }, + "HierarchyRestrictedResources": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of resources that a security profile applies hierarchy restrictions to in Amazon Connect. Following are acceptable ResourceNames: `User` .", + "title": "HierarchyRestrictedResources", + "type": "array" + }, "InstanceArn": { "markdownDescription": "The identifier of the Amazon Connect instance.", "title": "InstanceArn", @@ -48704,6 +50066,29 @@ ], "type": "object" }, + "AWS::Connect::SecurityProfile.Application": { + "additionalProperties": false, + "properties": { + "ApplicationPermissions": { + "items": { + "type": "string" + }, + "markdownDescription": "The permissions that the agent is granted on the application. Only the `ACCESS` permission is supported.", + "title": "ApplicationPermissions", + "type": "array" + }, + "Namespace": { + "markdownDescription": "Namespace of the application that you want to give access to.", + "title": "Namespace", + "type": "string" + } + }, + "required": [ + "ApplicationPermissions", + "Namespace" + ], + "type": "object" + }, "AWS::Connect::TaskTemplate": { "additionalProperties": false, "properties": { @@ -49736,6 +51121,117 @@ ], "type": "object" }, + "AWS::ControlTower::EnabledBaseline": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "BaselineIdentifier": { + "markdownDescription": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", + "title": "BaselineIdentifier", + "type": "string" + }, + "BaselineVersion": { + "markdownDescription": "The enabled version of the `Baseline` .", + "title": "BaselineVersion", + "type": "string" + }, + "Parameters": { + "items": { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline.Parameter" + }, + "markdownDescription": "Parameters that are applied when enabling this `Baseline` . These parameters configure the behavior of the baseline.", + "title": "Parameters", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags associated with input to `EnableBaseline` .", + "title": "Tags", + "type": "array" + }, + "TargetIdentifier": { + "markdownDescription": "The target on which to enable the `Baseline` .", + "title": "TargetIdentifier", + "type": "string" + } + }, + "required": [ + "BaselineIdentifier", + "BaselineVersion", + "TargetIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ControlTower::EnabledBaseline" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::ControlTower::EnabledBaseline.Parameter": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "A string denoting the parameter key.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "A low-level `Document` object of any type (for example, a Java Object).", + "title": "Value", + "type": "object" + } + }, + "type": "object" + }, "AWS::ControlTower::EnabledControl": { "additionalProperties": false, "properties": { @@ -49784,6 +51280,14 @@ "title": "Parameters", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "Tags to be applied to the enabled control.", + "title": "Tags", + "type": "array" + }, "TargetIdentifier": { "markdownDescription": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", "title": "TargetIdentifier", @@ -50199,6 +51703,7 @@ } }, "required": [ + "DefaultExpirationDays", "DomainName" ], "type": "object" @@ -51193,7 +52698,9 @@ } }, "required": [ - "DomainName" + "Description", + "DomainName", + "ObjectTypeName" ], "type": "object" }, @@ -51776,7 +53283,7 @@ "additionalProperties": false, "properties": { "CronExpression": { - "markdownDescription": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see [Cron expressions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) in the *Amazon CloudWatch User Guide* .", + "markdownDescription": "The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see the [Cron expressions reference](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) in the *Amazon EventBridge User Guide* .", "title": "CronExpression", "type": "string" }, @@ -52569,28 +54076,18 @@ "additionalProperties": false, "properties": { "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "DatabaseName": { - "markdownDescription": "Database name for the endpoint.", - "title": "DatabaseName", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "ServerName": { - "markdownDescription": "Fully qualified domain name of the endpoint. For an Amazon RDS SQL Server instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52600,23 +54097,15 @@ "additionalProperties": false, "properties": { "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "ServerName": { - "markdownDescription": "The host name of the endpoint database.\n\nFor an Amazon RDS MySQL instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.\n\nFor an Aurora MySQL instance, this is the output of [DescribeDBClusters](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusters.html) , in the `Endpoint` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52626,53 +54115,33 @@ "additionalProperties": false, "properties": { "AsmServer": { - "markdownDescription": "For an Oracle source endpoint, your ASM server address. You can set this value from the `asm_server` value. You set `asm_server` as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see [Configuration for change data capture (CDC) on an Oracle source database](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration) .", - "title": "AsmServer", "type": "string" }, "CertificateArn": { - "markdownDescription": "", - "title": "CertificateArn", "type": "string" }, "DatabaseName": { - "markdownDescription": "Database name for the endpoint.", - "title": "DatabaseName", "type": "string" }, "Port": { - "markdownDescription": "Endpoint TCP port.", - "title": "Port", "type": "number" }, "SecretsManagerOracleAsmAccessRoleArn": { - "markdownDescription": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies AWS DMS as the trusted entity and grants the required permissions to access the `SecretsManagerOracleAsmSecret` . This `SecretsManagerOracleAsmSecret` has the secret value that allows access to the Oracle ASM of the endpoint.\n\n> You can specify one of two sets of values for these permissions. You can specify the values for this setting and `SecretsManagerOracleAsmSecretId` . Or you can specify clear-text values for `AsmUser` , `AsmPassword` , and `AsmServerName` . You can't specify both. For more information on creating this `SecretsManagerOracleAsmSecret` and the `SecretsManagerOracleAsmAccessRoleArn` and `SecretsManagerOracleAsmSecretId` required to access it, see [Using secrets to access AWS Database Migration Service resources](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#security-iam-secretsmanager) in the *AWS Database Migration Service User Guide* .", - "title": "SecretsManagerOracleAsmAccessRoleArn", "type": "string" }, "SecretsManagerOracleAsmSecretId": { - "markdownDescription": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN, partial ARN, or friendly name of the `SecretsManagerOracleAsmSecret` that contains the Oracle ASM connection details for the Oracle endpoint.", - "title": "SecretsManagerOracleAsmSecretId", "type": "string" }, "SecretsManagerSecurityDbEncryptionAccessRoleArn": { - "markdownDescription": "", - "title": "SecretsManagerSecurityDbEncryptionAccessRoleArn", "type": "string" }, "SecretsManagerSecurityDbEncryptionSecretId": { - "markdownDescription": "", - "title": "SecretsManagerSecurityDbEncryptionSecretId", "type": "string" }, "ServerName": { - "markdownDescription": "Fully qualified domain name of the endpoint.\n\nFor an Amazon RDS Oracle instance, this is the output of [DescribeDBInstances](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html) , in the `[Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Endpoint.html) .Address` field.", - "title": "ServerName", "type": "string" }, "SslMode": { - "markdownDescription": "", - "title": "SslMode", "type": "string" } }, @@ -52713,19 +54182,13 @@ "additionalProperties": false, "properties": { "MicrosoftSqlServerSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.MicrosoftSqlServerSettings", - "markdownDescription": "", - "title": "MicrosoftSqlServerSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.MicrosoftSqlServerSettings" }, "MySqlSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.MySqlSettings", - "markdownDescription": "", - "title": "MySqlSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.MySqlSettings" }, "OracleSettings": { - "$ref": "#/definitions/AWS::DMS::DataProvider.OracleSettings", - "markdownDescription": "", - "title": "OracleSettings" + "$ref": "#/definitions/AWS::DMS::DataProvider.OracleSettings" }, "PostgreSqlSettings": { "$ref": "#/definitions/AWS::DMS::DataProvider.PostgreSqlSettings", @@ -59296,6 +60759,11 @@ "title": "Includes", "type": "array" }, + "ManifestConfig": { + "$ref": "#/definitions/AWS::DataSync::Task.ManifestConfig", + "markdownDescription": "The configuration of the manifest that lists the files or objects to transfer. For more information, see [Specifying what DataSync transfers by using a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html) .", + "title": "ManifestConfig" + }, "Name": { "markdownDescription": "The name of a task. This value is a text reference that is used to identify the task in the console.", "title": "Name", @@ -59372,7 +60840,7 @@ "additionalProperties": false, "properties": { "S3": { - "$ref": "#/definitions/AWS::DataSync::Task.S3", + "$ref": "#/definitions/AWS::DataSync::Task.TaskReportConfigDestinationS3", "markdownDescription": "Specifies the Amazon S3 bucket where DataSync uploads your task report.", "title": "S3" } @@ -59395,6 +60863,56 @@ }, "type": "object" }, + "AWS::DataSync::Task.ManifestConfig": { + "additionalProperties": false, + "properties": { + "Action": { + "markdownDescription": "Specifies what DataSync uses the manifest for.", + "title": "Action", + "type": "string" + }, + "Format": { + "markdownDescription": "Specifies the file format of your manifest. For more information, see [Creating a manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-create) .", + "title": "Format", + "type": "string" + }, + "Source": { + "$ref": "#/definitions/AWS::DataSync::Task.Source", + "markdownDescription": "Specifies the manifest that you want DataSync to use and where it's hosted.\n\n> You must specify this parameter if you're configuring a new manifest on or after February 7, 2024.\n> \n> If you don't, you'll get a 400 status code and `ValidationException` error stating that you're missing the IAM role for DataSync to access the S3 bucket where you're hosting your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) .", + "title": "Source" + } + }, + "required": [ + "Source" + ], + "type": "object" + }, + "AWS::DataSync::Task.ManifestConfigSourceS3": { + "additionalProperties": false, + "properties": { + "BucketAccessRoleArn": { + "markdownDescription": "Specifies the AWS Identity and Access Management (IAM) role that allows DataSync to access your manifest. For more information, see [Providing DataSync access to your manifest](https://docs.aws.amazon.com/datasync/latest/userguide/transferring-with-manifest.html#transferring-with-manifest-access) .", + "title": "BucketAccessRoleArn", + "type": "string" + }, + "ManifestObjectPath": { + "markdownDescription": "Specifies the Amazon S3 object key of your manifest. This can include a prefix (for example, `prefix/my-manifest.csv` ).", + "title": "ManifestObjectPath", + "type": "string" + }, + "ManifestObjectVersionId": { + "markdownDescription": "Specifies the object version ID of the manifest that you want DataSync to use. If you don't set this, DataSync uses the latest version of the object.", + "title": "ManifestObjectVersionId", + "type": "string" + }, + "S3BucketArn": { + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the S3 bucket where you're hosting your manifest.", + "title": "S3BucketArn", + "type": "string" + } + }, + "type": "object" + }, "AWS::DataSync::Task.Options": { "additionalProperties": false, "properties": { @@ -59506,18 +61024,12 @@ "additionalProperties": false, "properties": { "BucketAccessRoleArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/creating-task-reports.html) .", - "title": "BucketAccessRoleArn", "type": "string" }, "S3BucketArn": { - "markdownDescription": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", - "title": "S3BucketArn", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a bucket prefix for your report.", - "title": "Subdirectory", "type": "string" } }, @@ -59534,6 +61046,17 @@ }, "type": "object" }, + "AWS::DataSync::Task.Source": { + "additionalProperties": false, + "properties": { + "S3": { + "$ref": "#/definitions/AWS::DataSync::Task.ManifestConfigSourceS3", + "markdownDescription": "Specifies the S3 bucket where you're hosting your manifest.", + "title": "S3" + } + }, + "type": "object" + }, "AWS::DataSync::Task.TaskReportConfig": { "additionalProperties": false, "properties": { @@ -59569,6 +61092,27 @@ ], "type": "object" }, + "AWS::DataSync::Task.TaskReportConfigDestinationS3": { + "additionalProperties": false, + "properties": { + "BucketAccessRoleArn": { + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see [Allowing DataSync to upload a task report to an Amazon S3 bucket](https://docs.aws.amazon.com/datasync/latest/userguide/task-reports.html) .", + "title": "BucketAccessRoleArn", + "type": "string" + }, + "S3BucketArn": { + "markdownDescription": "Specifies the ARN of the S3 bucket where DataSync uploads your report.", + "title": "S3BucketArn", + "type": "string" + }, + "Subdirectory": { + "markdownDescription": "Specifies a bucket prefix for your report.", + "title": "Subdirectory", + "type": "string" + } + }, + "type": "object" + }, "AWS::DataSync::Task.TaskSchedule": { "additionalProperties": false, "properties": { @@ -59605,7 +61149,7 @@ }, "type": "object" }, - "AWS::Detective::Graph": { + "AWS::DataZone::DataSource": { "additionalProperties": false, "properties": { "Condition": { @@ -59640,25 +61184,82 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" - }, - "Tags": { + "AssetFormsInput": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::DataZone::DataSource.FormInput" }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", + "markdownDescription": "The metadata forms attached to the assets that the data source works with.", + "title": "AssetFormsInput", "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.DataSourceConfigurationInput", + "markdownDescription": "The configuration of the data source.", + "title": "Configuration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain where the data source is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnableSetting": { + "markdownDescription": "Specifies whether the data source is enabled.", + "title": "EnableSetting", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The unique identifier of the Amazon DataZone environment to which the data source publishes assets.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which you want to add this data source.", + "title": "ProjectIdentifier", + "type": "string" + }, + "PublishOnImport": { + "markdownDescription": "Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog.", + "title": "PublishOnImport", + "type": "boolean" + }, + "Recommendation": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RecommendationConfiguration", + "markdownDescription": "Specifies whether the business name generation is to be enabled for this data source.", + "title": "Recommendation" + }, + "Schedule": { + "$ref": "#/definitions/AWS::DataZone::DataSource.ScheduleConfiguration", + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule" + }, + "Type": { + "markdownDescription": "The type of the data source.", + "title": "Type", + "type": "string" } }, + "required": [ + "DomainIdentifier", + "EnvironmentIdentifier", + "Name", + "ProjectIdentifier", + "Type" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::DataZone::DataSource" ], "type": "string" }, @@ -59672,11 +61273,1027 @@ } }, "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.DataSourceConfigurationInput": { + "additionalProperties": false, + "properties": { + "GlueRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.GlueRunConfigurationInput", + "markdownDescription": "The configuration of the AWS Glue data source.", + "title": "GlueRunConfiguration" + }, + "RedshiftRunConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftRunConfigurationInput", + "markdownDescription": "The configuration of the Amazon Redshift data source.", + "title": "RedshiftRunConfiguration" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.FilterExpression": { + "additionalProperties": false, + "properties": { + "Expression": { + "markdownDescription": "The search filter expression.", + "title": "Expression", + "type": "string" + }, + "Type": { + "markdownDescription": "The search filter explresison type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Expression", "Type" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::DataZone::DataSource.FormInput": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the metadata form.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The name of the metadata form.", + "title": "FormName", + "type": "string" + }, + "TypeIdentifier": { + "markdownDescription": "The ID of the metadata form type.", + "title": "TypeIdentifier", + "type": "string" + }, + "TypeRevision": { + "markdownDescription": "The revision of the metadata form type.", + "title": "TypeRevision", + "type": "string" + } + }, + "required": [ + "FormName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.GlueRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RecommendationConfiguration": { + "additionalProperties": false, + "properties": { + "EnableBusinessNameGeneration": { + "markdownDescription": "Specifies whether automatic business name generation is to be enabled or not as part of the recommendation configuration.", + "title": "EnableBusinessNameGeneration", + "type": "boolean" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftClusterStorage": { + "additionalProperties": false, + "properties": { + "ClusterName": { + "markdownDescription": "The name of an Amazon Redshift cluster.", + "title": "ClusterName", + "type": "string" + } + }, + "required": [ + "ClusterName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftCredentialConfiguration": { + "additionalProperties": false, + "properties": { + "SecretManagerArn": { + "markdownDescription": "The ARN of a secret manager for an Amazon Redshift cluster.", + "title": "SecretManagerArn", + "type": "string" + } + }, + "required": [ + "SecretManagerArn" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftRunConfigurationInput": { + "additionalProperties": false, + "properties": { + "DataAccessRole": { + "markdownDescription": "The data access role included in the configuration details of the Amazon Redshift data source.", + "title": "DataAccessRole", + "type": "string" + }, + "RedshiftCredentialConfiguration": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftCredentialConfiguration", + "markdownDescription": "The details of the credentials required to access an Amazon Redshift cluster.", + "title": "RedshiftCredentialConfiguration" + }, + "RedshiftStorage": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftStorage", + "markdownDescription": "The details of the Amazon Redshift storage as part of the configuration of an Amazon Redshift data source run.", + "title": "RedshiftStorage" + }, + "RelationalFilterConfigurations": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RelationalFilterConfiguration" + }, + "markdownDescription": "The relational filter configurations included in the configuration details of the AWS Glue data source.", + "title": "RelationalFilterConfigurations", + "type": "array" + } + }, + "required": [ + "RedshiftCredentialConfiguration", + "RedshiftStorage", + "RelationalFilterConfigurations" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftServerlessStorage": { + "additionalProperties": false, + "properties": { + "WorkgroupName": { + "markdownDescription": "The name of the Amazon Redshift Serverless workgroup.", + "title": "WorkgroupName", + "type": "string" + } + }, + "required": [ + "WorkgroupName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.RedshiftStorage": { + "additionalProperties": false, + "properties": { + "RedshiftClusterSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftClusterStorage", + "markdownDescription": "The details of the Amazon Redshift cluster source.", + "title": "RedshiftClusterSource" + }, + "RedshiftServerlessSource": { + "$ref": "#/definitions/AWS::DataZone::DataSource.RedshiftServerlessStorage", + "markdownDescription": "The details of the Amazon Redshift Serverless workgroup source.", + "title": "RedshiftServerlessSource" + } + }, + "type": "object" + }, + "AWS::DataZone::DataSource.RelationalFilterConfiguration": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The database name specified in the relational filter configuration for the data source.", + "title": "DatabaseName", + "type": "string" + }, + "FilterExpressions": { + "items": { + "$ref": "#/definitions/AWS::DataZone::DataSource.FilterExpression" + }, + "markdownDescription": "The filter expressions specified in the relational filter configuration for the data source.", + "title": "FilterExpressions", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The schema name specified in the relational filter configuration for the data source.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "DatabaseName" + ], + "type": "object" + }, + "AWS::DataZone::DataSource.ScheduleConfiguration": { + "additionalProperties": false, + "properties": { + "Schedule": { + "markdownDescription": "The schedule of the data source runs.", + "title": "Schedule", + "type": "string" + }, + "Timezone": { + "markdownDescription": "The timezone of the data source run.", + "title": "Timezone", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Domain": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the Amazon DataZone domain.", + "title": "Description", + "type": "string" + }, + "DomainExecutionRole": { + "markdownDescription": "The domain execution role that is created when an Amazon DataZone domain is created. The domain execution role is created in the AWS account that houses the Amazon DataZone domain.", + "title": "DomainExecutionRole", + "type": "string" + }, + "KmsKeyIdentifier": { + "markdownDescription": "The identifier of the AWS Key Management Service (KMS) key that is used to encrypt the Amazon DataZone domain, metadata, and reporting data.", + "title": "KmsKeyIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone domain.", + "title": "Name", + "type": "string" + }, + "SingleSignOn": { + "$ref": "#/definitions/AWS::DataZone::Domain.SingleSignOn", + "markdownDescription": "The single sign-on details in Amazon DataZone.", + "title": "SingleSignOn" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags specified for the Amazon DataZone domain.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DomainExecutionRole", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Domain" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Domain.SingleSignOn": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of single sign-on in Amazon DataZone.", + "title": "Type", + "type": "string" + }, + "UserAssignment": { + "markdownDescription": "The single sign-on user assignment in Amazon DataZone.", + "title": "UserAssignment", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Environment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the environment.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentProfileIdentifier": { + "markdownDescription": "The identifier of the environment profile that is used to create this Amazon DataZone environment.", + "title": "EnvironmentProfileIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone environment.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of the Amazon DataZone environment.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone project in which this environment is created.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::Environment.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnvironmentProfileIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Environment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::Environment.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment parameter.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which an environment blueprint exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnabledRegions": { + "items": { + "type": "string" + }, + "markdownDescription": "The enabled AWS Regions specified in a blueprint configuration.", + "title": "EnabledRegions", + "type": "array" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of the environment blueprint.\n\nIn the current release, only the following values are supported: `DefaultDataLake` and `DefaultDataWarehouse` .", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "ManageAccessRoleArn": { + "markdownDescription": "The ARN of the manage access role.", + "title": "ManageAccessRoleArn", + "type": "string" + }, + "ProvisioningRoleArn": { + "markdownDescription": "The ARN of the provisioning role.", + "title": "ProvisioningRoleArn", + "type": "string" + }, + "RegionalParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter" + }, + "markdownDescription": "The regional parameters of the environment blueprint.", + "title": "RegionalParameters", + "type": "array" + } + }, + "required": [ + "DomainIdentifier", + "EnabledRegions", + "EnvironmentBlueprintIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentBlueprintConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentBlueprintConfiguration.RegionalParameter": { + "additionalProperties": false, + "properties": { + "Parameters": { + "additionalProperties": true, + "markdownDescription": "A string to string map containing parameters for the region.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Parameters", + "type": "object" + }, + "Region": { + "markdownDescription": "The region specified in the environment parameter.", + "title": "Region", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "markdownDescription": "The identifier of an AWS account in which an environment profile exists.", + "title": "AwsAccountId", + "type": "string" + }, + "AwsAccountRegion": { + "markdownDescription": "The AWS Region in which an environment profile exists.", + "title": "AwsAccountRegion", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the environment profile.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of the Amazon DataZone domain in which the environment profile exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentBlueprintIdentifier": { + "markdownDescription": "The identifier of a blueprint with which an environment profile is created.", + "title": "EnvironmentBlueprintIdentifier", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the environment profile.", + "title": "Name", + "type": "string" + }, + "ProjectIdentifier": { + "markdownDescription": "The identifier of a project in which an environment profile exists.", + "title": "ProjectIdentifier", + "type": "string" + }, + "UserParameters": { + "items": { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile.EnvironmentParameter" + }, + "markdownDescription": "The user parameters of this Amazon DataZone environment profile.", + "title": "UserParameters", + "type": "array" + } + }, + "required": [ + "AwsAccountId", + "AwsAccountRegion", + "DomainIdentifier", + "EnvironmentBlueprintIdentifier", + "Name", + "ProjectIdentifier" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::EnvironmentProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::EnvironmentProfile.EnvironmentParameter": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name specified in the environment parameter.", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the environment profile.", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::DataZone::Project": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a project.", + "title": "Description", + "type": "string" + }, + "DomainIdentifier": { + "markdownDescription": "The identifier of a Amazon DataZone domain where the project exists.", + "title": "DomainIdentifier", + "type": "string" + }, + "GlossaryTerms": { + "items": { + "type": "string" + }, + "markdownDescription": "The glossary terms that can be used in this Amazon DataZone project.", + "title": "GlossaryTerms", + "type": "array" + }, + "Name": { + "markdownDescription": "The name of a project.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "DomainIdentifier", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::Project" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicableAssetTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The asset types included in the subscription target.", + "title": "ApplicableAssetTypes", + "type": "array" + }, + "AuthorizedPrincipals": { + "items": { + "type": "string" + }, + "markdownDescription": "The authorized principals included in the subscription target.", + "title": "AuthorizedPrincipals", + "type": "array" + }, + "DomainIdentifier": { + "markdownDescription": "The ID of the Amazon DataZone domain in which subscription target is created.", + "title": "DomainIdentifier", + "type": "string" + }, + "EnvironmentIdentifier": { + "markdownDescription": "The ID of the environment in which subscription target is created.", + "title": "EnvironmentIdentifier", + "type": "string" + }, + "ManageAccessRole": { + "markdownDescription": "The manage access role that is used to create the subscription target.", + "title": "ManageAccessRole", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the subscription target.", + "title": "Name", + "type": "string" + }, + "Provider": { + "markdownDescription": "The provider of the subscription target.", + "title": "Provider", + "type": "string" + }, + "SubscriptionTargetConfig": { + "items": { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm" + }, + "markdownDescription": "The configuration of the subscription target.", + "title": "SubscriptionTargetConfig", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of the subscription target.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "ApplicableAssetTypes", + "AuthorizedPrincipals", + "DomainIdentifier", + "EnvironmentIdentifier", + "ManageAccessRole", + "Name", + "SubscriptionTargetConfig", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::DataZone::SubscriptionTarget" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::DataZone::SubscriptionTarget.SubscriptionTargetForm": { + "additionalProperties": false, + "properties": { + "Content": { + "markdownDescription": "The content of the subscription target configuration.", + "title": "Content", + "type": "string" + }, + "FormName": { + "markdownDescription": "The form name included in the subscription target configuration.", + "title": "FormName", + "type": "string" + } + }, + "required": [ + "Content", + "FormName" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { "additionalProperties": false, "properties": { "Condition": { @@ -61344,6 +63961,11 @@ "AWS::DynamoDB::GlobalTable.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.", "title": "StreamArn", @@ -61509,6 +64131,16 @@ "title": "Region", "type": "string" }, + "ReplicaStreamSpecification": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ReplicaStreamSpecification", + "markdownDescription": "Represents the DynamoDB Streams configuration for a global table replica.", + "title": "ReplicaStreamSpecification" + }, + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified replica of a DynamoDB global table. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "ResourcePolicy" + }, "SSESpecification": { "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ReplicaSSESpecification", "markdownDescription": "Allows you to specify a customer-managed key for the replica. When using customer-managed keys for server-side encryption, this property must have a value in all replicas.", @@ -61533,6 +64165,34 @@ ], "type": "object" }, + "AWS::DynamoDB::GlobalTable.ReplicaStreamSpecification": { + "additionalProperties": false, + "properties": { + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::GlobalTable.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains the permissions for the specified stream of a DynamoDB global table replica. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nYou can update the `ResourcePolicy` property if you've specified more than one table using the [AWS ::DynamoDB::GlobalTable](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-globaltable.html) resource.", + "title": "ResourcePolicy" + } + }, + "required": [ + "ResourcePolicy" + ], + "type": "object" + }, + "AWS::DynamoDB::GlobalTable.ResourcePolicy": { + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, its indexes, and stream. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "PolicyDocument", + "type": "object" + } + }, + "required": [ + "PolicyDocument" + ], + "type": "object" + }, "AWS::DynamoDB::GlobalTable.SSESpecification": { "additionalProperties": false, "properties": { @@ -61727,6 +64387,11 @@ "markdownDescription": "Throughput for the specified table, which consists of values for `ReadCapacityUnits` and `WriteCapacityUnits` . For more information about the contents of a provisioned throughput structure, see [Amazon DynamoDB Table ProvisionedThroughput](https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ProvisionedThroughput.html) .\n\nIf you set `BillingMode` as `PROVISIONED` , you must specify this property. If you set `BillingMode` as `PAY_PER_REQUEST` , you cannot specify this property.", "title": "ProvisionedThroughput" }, + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::Table.ResourcePolicy", + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified table. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .\n\nWhen you attach a resource-based policy while creating a table, the policy creation is *strongly consistent* . For information about the considerations that you should keep in mind while attaching a resource-based policy, see [Resource-based policy considerations](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) .", + "title": "ResourcePolicy" + }, "SSESpecification": { "$ref": "#/definitions/AWS::DynamoDB::Table.SSESpecification", "markdownDescription": "Specifies the settings to enable server-side encryption.", @@ -61943,6 +64608,11 @@ "AWS::DynamoDB::Table.KinesisStreamSpecification": { "additionalProperties": false, "properties": { + "ApproximateCreationDateTimePrecision": { + "markdownDescription": "The precision for the time and date that the stream was created.", + "title": "ApproximateCreationDateTimePrecision", + "type": "string" + }, "StreamArn": { "markdownDescription": "The ARN for a specific Kinesis data stream.\n\nLength Constraints: Minimum length of 37. Maximum length of 1024.", "title": "StreamArn", @@ -62033,6 +64703,20 @@ ], "type": "object" }, + "AWS::DynamoDB::Table.ResourcePolicy": { + "additionalProperties": false, + "properties": { + "PolicyDocument": { + "markdownDescription": "A resource-based policy document that contains permissions to add to the specified DynamoDB table, index, or both. In a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "PolicyDocument", + "type": "object" + } + }, + "required": [ + "PolicyDocument" + ], + "type": "object" + }, "AWS::DynamoDB::Table.S3BucketSource": { "additionalProperties": false, "properties": { @@ -62084,6 +64768,11 @@ "AWS::DynamoDB::Table.StreamSpecification": { "additionalProperties": false, "properties": { + "ResourcePolicy": { + "$ref": "#/definitions/AWS::DynamoDB::Table.ResourcePolicy", + "markdownDescription": "Creates or updates a resource-based policy document that contains the permissions for DynamoDB resources, such as a table's streams. Resource-based policies let you define access permissions by specifying who has access to each resource, and the actions they are allowed to perform on each resource.\n\nIn a CloudFormation template, you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to DynamoDB . For more information about resource-based policies, see [Using resource-based policies for DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/access-control-resource-based.html) and [Resource-based policy examples](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-examples.html) .", + "title": "ResourcePolicy" + }, "StreamViewType": { "markdownDescription": "When an item in the table is modified, `StreamViewType` determines what information is written to the stream for this table. Valid values for `StreamViewType` are:\n\n- `KEYS_ONLY` - Only the key attributes of the modified item are written to the stream.\n- `NEW_IMAGE` - The entire item, as it appears after it was modified, is written to the stream.\n- `OLD_IMAGE` - The entire item, as it appeared before it was modified, is written to the stream.\n- `NEW_AND_OLD_IMAGES` - Both the new and the old item images of the item are written to the stream.", "title": "StreamViewType", @@ -63213,6 +65902,11 @@ "title": "DomainNameServers", "type": "array" }, + "Ipv6AddressPreferredLeaseTime": { + "markdownDescription": "A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.", + "title": "Ipv6AddressPreferredLeaseTime", + "type": "number" + }, "NetbiosNameServers": { "items": { "type": "string" @@ -63655,6 +66349,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::EC2Fleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -64341,13 +67040,13 @@ "type": "string" }, "DeliverLogsPermissionArn": { - "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nThis parameter is required if the destination type is `cloud-watch-logs` and unsupported otherwise.", + "markdownDescription": "The ARN of the IAM role that allows Amazon EC2 to publish flow logs to the log destination.\n\nThis parameter is required if the destination type is `cloud-watch-logs` , or if the destination type is `kinesis-data-firehose` and the delivery stream and the resources to monitor are in different accounts.", "title": "DeliverLogsPermissionArn", "type": "string" }, "DestinationOptions": { "$ref": "#/definitions/AWS::EC2::FlowLog.DestinationOptions", - "markdownDescription": "The destination options. The following options are supported:\n\n- `FileFormat` - The format for the flow log ( `plain-text` | `parquet` ). The default is `plain-text` .\n- `HiveCompatiblePartitions` - Indicates whether to use Hive-compatible prefixes for flow logs stored in Amazon S3 ( `true` | `false` ). The default is `false` .\n- `PerHourPartition` - Indicates whether to partition the flow log per hour ( `true` | `false` ). The default is `false` .", + "markdownDescription": "The destination options.", "title": "DestinationOptions" }, "LogDestination": { @@ -65667,9 +68366,9 @@ "title": "Ebs" }, "NoDevice": { - "$ref": "#/definitions/AWS::EC2::Instance.NoDevice", "markdownDescription": "To omit the device from the block device mapping, specify an empty string.\n\n> After the instance is running, modifying this parameter results in instance [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "title": "NoDevice" + "title": "NoDevice", + "type": "object" }, "VirtualName": { "markdownDescription": "The virtual device name ( `ephemeral` N). The name must be in the form `ephemeral` *X* where *X* is a number starting from zero (0). For example, an instance type with 2 available instance store volumes can specify mappings for `ephemeral0` and `ephemeral1` . The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.\n\nNVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.\n\n*Constraints* : For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.\n\n> After the instance is running, modifying this parameter results in instance [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", @@ -65940,11 +68639,6 @@ ], "type": "object" }, - "AWS::EC2::Instance.NoDevice": { - "additionalProperties": false, - "properties": {}, - "type": "object" - }, "AWS::EC2::Instance.PrivateDnsNameOptions": { "additionalProperties": false, "properties": { @@ -66560,7 +69254,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -66756,6 +69450,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.MemoryGiBPerVCpu", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -69449,7 +72148,7 @@ "items": { "type": "string" }, - "markdownDescription": "The security group IDs associated with this network interface.", + "markdownDescription": "The IDs of the security groups associated with this network interface.", "title": "GroupSet", "type": "array" }, @@ -69472,7 +72171,7 @@ "type": "array" }, "Ipv6AddressCount": { - "markdownDescription": "The number of IPv6 addresses to assign to a network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "markdownDescription": "The number of IPv6 addresses to assign to the network interface. Amazon EC2 automatically selects the IPv6 addresses from the subnet range. To specify specific IPv6 addresses, use the `Ipv6Addresses` property and don't specify this property.\n\nWhen creating a network interface, you can't specify a count of IPv6 addresses if you've specified one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", "title": "Ipv6AddressCount", "type": "number" }, @@ -69480,7 +72179,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::NetworkInterface.InstanceIpv6Address" }, - "markdownDescription": "One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet to associate with the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", + "markdownDescription": "The IPv6 addresses from the IPv6 CIDR block range of your subnet to assign to the network interface. If you're specifying a number of IPv6 addresses, use the `Ipv6AddressCount` property and don't specify this property.\n\nWhen creating a network interface, you can't specify IPv6 addresses if you've specified one of the following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.", "title": "Ipv6Addresses", "type": "array" }, @@ -69498,7 +72197,7 @@ "type": "array" }, "PrivateIpAddress": { - "markdownDescription": "Assigns a single private IP address to the network interface, which is used as the primary private IP address. If you want to specify multiple private IP address, use the `PrivateIpAddresses` property.", + "markdownDescription": "The private IPv4 address to assign to the network interface as the primary private IP address. If you want to specify multiple private IP addresses, use the `PrivateIpAddresses` property.", "title": "PrivateIpAddress", "type": "string" }, @@ -69506,7 +72205,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::NetworkInterface.PrivateIpAddressSpecification" }, - "markdownDescription": "Assigns private IP addresses to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", + "markdownDescription": "The private IPv4 addresses to assign to the network interface. You can specify a primary private IP address by setting the value of the `Primary` property to `true` in the `PrivateIpAddressSpecification` property. If you want EC2 to automatically assign private IP addresses, use the `SecondaryPrivateIpAddressCount` property and do not specify this property.\n\nWhen creating a network interface, you can't specify private IPv4 addresses if you've specified one of the following: a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.", "title": "PrivateIpAddresses", "type": "array" }, @@ -69529,7 +72228,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An arbitrary set of tags (key-value pairs) for this network interface.", + "markdownDescription": "The tags to apply to the network interface.", "title": "Tags", "type": "array" } @@ -69688,6 +72387,11 @@ "title": "DeviceIndex", "type": "string" }, + "EnaSrdSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification", + "markdownDescription": "Configures ENA Express for the network interface that this action attaches to the instance.", + "title": "EnaSrdSpecification" + }, "InstanceId": { "markdownDescription": "The ID of the instance to which you will attach the ENI.", "title": "InstanceId", @@ -69727,6 +72431,33 @@ ], "type": "object" }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdEnabled": { + "markdownDescription": "Indicates whether ENA Express is enabled for the network interface.", + "title": "EnaSrdEnabled", + "type": "boolean" + }, + "EnaSrdUdpSpecification": { + "$ref": "#/definitions/AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification", + "markdownDescription": "Configures ENA Express for UDP network traffic.", + "title": "EnaSrdUdpSpecification" + } + }, + "type": "object" + }, + "AWS::EC2::NetworkInterfaceAttachment.EnaSrdUdpSpecification": { + "additionalProperties": false, + "properties": { + "EnaSrdUdpEnabled": { + "markdownDescription": "Indicates whether UDP traffic to and from the instance uses ENA Express. To specify this setting, you must first enable ENA Express.", + "title": "EnaSrdUdpEnabled", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::EC2::NetworkInterfacePermission": { "additionalProperties": false, "properties": { @@ -70041,7 +72772,6 @@ }, "required": [ "AddressFamily", - "MaxEntries", "PrefixListName" ], "type": "object" @@ -70403,12 +73133,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70418,12 +73148,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for the destination AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70437,6 +73167,11 @@ "title": "IpProtocol", "type": "string" }, + "SourceSecurityGroupId": { + "markdownDescription": "", + "title": "SourceSecurityGroupId", + "type": "string" + }, "ToPort": { "markdownDescription": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).", "title": "ToPort", @@ -70452,12 +73187,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70543,12 +73278,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -70558,12 +73293,12 @@ "type": "string" }, "DestinationPrefixListId": { - "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service that you want to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The prefix list IDs for an AWS service. This is the AWS service to access through a VPC endpoint from instances associated with the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationPrefixListId", "type": "string" }, "DestinationSecurityGroupId": { - "markdownDescription": "The ID of the security group.\n\nYou must specify a destination security group ( `DestinationPrefixListId` or `DestinationSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).", + "markdownDescription": "The ID of the security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "title": "DestinationSecurityGroupId", "type": "string" }, @@ -70651,12 +73386,12 @@ "additionalProperties": false, "properties": { "CidrIp": { - "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv4 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIp", "type": "string" }, "CidrIpv6": { - "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify a source security group ( `SourcePrefixListId` or `SourceSecurityGroupId` ) or a CIDR range ( `CidrIp` or `CidrIpv6` ).\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The IPv6 address range, in CIDR format.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `SourcePrefixListId` , or `SourceSecurityGroupId` .\n\nFor examples of rules that you can add to security groups for specific access scenarios, see [Security group rules for different use cases](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules-reference.html) in the *Amazon EC2 User Guide* .", "title": "CidrIpv6", "type": "string" }, @@ -71008,7 +73743,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .", "title": "VolumeType", "type": "string" } @@ -71245,6 +73980,11 @@ "title": "LocalStorageTypes", "type": "array" }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": { + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", + "title": "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice", + "type": "number" + }, "MemoryGiBPerVCpu": { "$ref": "#/definitions/AWS::EC2::SpotFleet.MemoryGiBPerVCpuRequest", "markdownDescription": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", @@ -75796,7 +78536,7 @@ "type": "string" }, "Encrypted": { - "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) in the *Amazon Elastic Compute Cloud User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) .", + "markdownDescription": "Indicates whether the volume should be encrypted. The effect of setting the encryption state to `true` depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see [Encryption by default](https://docs.aws.amazon.com/ebs/latest/userguide/work-with-ebs-encr.html#encryption-by-default) in the *Amazon EBS User Guide* .\n\nEncrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances) .", "title": "Encrypted", "type": "boolean" }, @@ -75844,7 +78584,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nDefault: `gp2`", + "markdownDescription": "The volume type. This parameter can be one of the following values:\n\n- General Purpose SSD: `gp2` | `gp3`\n- Provisioned IOPS SSD: `io1` | `io2`\n- Throughput Optimized HDD: `st1`\n- Cold HDD: `sc1`\n- Magnetic: `standard`\n\nFor more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) .\n\nDefault: `gp2`", "title": "VolumeType", "type": "string" } @@ -76967,7 +79707,7 @@ "type": "string" }, "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` will not be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that is run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", "title": "Weight", "type": "number" } @@ -77174,7 +79914,7 @@ "type": "string" }, "PropagateTags": { - "markdownDescription": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nThe default is `NONE` .", + "markdownDescription": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n\nYou must set this to a value other than `NONE` when you use Cost Explorer. For more information, see [Amazon ECS usage reports](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/usage-reports.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe default is `NONE` .", "title": "PropagateTags", "type": "string" }, @@ -77501,7 +80241,7 @@ "additionalProperties": false, "properties": { "Field": { - "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `CPU` and `MEMORY` . For the `random` placement strategy, this field is not used.", + "markdownDescription": "The field to apply the placement strategy against. For the `spread` placement strategy, valid values are `instanceId` (or `host` , which has the same effect), or any platform or custom attribute that's applied to a container instance, such as `attribute:ecs.availability-zone` . For the `binpack` placement strategy, valid values are `cpu` and `memory` . For the `random` placement strategy, this field is not used.", "title": "Field", "type": "string" }, @@ -77612,6 +80352,16 @@ "markdownDescription": "The `portName` must match the name of one of the `portMappings` from all the containers in the task definition of this Amazon ECS service.", "title": "PortName", "type": "string" + }, + "Timeout": { + "$ref": "#/definitions/AWS::ECS::Service.TimeoutConfiguration", + "markdownDescription": "A reference to an object that represents the configured timeouts for Service Connect.", + "title": "Timeout" + }, + "Tls": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsConfiguration", + "markdownDescription": "A reference to an object that represents a Transport Layer Security (TLS) configuration.", + "title": "Tls" } }, "required": [ @@ -77619,6 +80369,41 @@ ], "type": "object" }, + "AWS::ECS::Service.ServiceConnectTlsCertificateAuthority": { + "additionalProperties": false, + "properties": { + "AwsPcaAuthorityArn": { + "markdownDescription": "The ARN of the AWS Private Certificate Authority certificate.", + "title": "AwsPcaAuthorityArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Service.ServiceConnectTlsConfiguration": { + "additionalProperties": false, + "properties": { + "IssuerCertificateAuthority": { + "$ref": "#/definitions/AWS::ECS::Service.ServiceConnectTlsCertificateAuthority", + "markdownDescription": "The signer certificate authority.", + "title": "IssuerCertificateAuthority" + }, + "KmsKey": { + "markdownDescription": "The AWS Key Management Service key.", + "title": "KmsKey", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "IssuerCertificateAuthority" + ], + "type": "object" + }, "AWS::ECS::Service.ServiceManagedEBSVolumeConfiguration": { "additionalProperties": false, "properties": { @@ -77726,6 +80511,22 @@ ], "type": "object" }, + "AWS::ECS::Service.TimeoutConfiguration": { + "additionalProperties": false, + "properties": { + "IdleTimeoutSeconds": { + "markdownDescription": "The amount of time in seconds a connection will stay active while idle. A value of `0` can be set to disable `idleTimeout` .\n\nThe `idleTimeout` default for `HTTP` / `HTTP2` / `GRPC` is 5 minutes.\n\nThe `idleTimeout` default for `TCP` is 1 hour.", + "title": "IdleTimeoutSeconds", + "type": "number" + }, + "PerRequestTimeoutSeconds": { + "markdownDescription": "The amount of time waiting for the upstream to respond with a complete response per request. A value of `0` can be set to disable `perRequestTimeout` . `perRequestTimeout` can only be set if Service Connect `appProtocol` isn't `TCP` . Only `idleTimeout` is allowed for `TCP` `appProtocol` .", + "title": "PerRequestTimeoutSeconds", + "type": "number" + } + }, + "type": "object" + }, "AWS::ECS::TaskDefinition": { "additionalProperties": false, "properties": { @@ -77919,6 +80720,14 @@ "title": "Cpu", "type": "number" }, + "CredentialSpecs": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", + "title": "CredentialSpecs", + "type": "array" + }, "DependsOn": { "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.ContainerDependency" @@ -78286,7 +81095,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The file type to use. The only supported value is `s3` .", + "markdownDescription": "The file type to use. Environment files are objects in Amazon S3. The only supported value is `s3` .", "title": "Type", "type": "string" }, @@ -78302,7 +81111,7 @@ "additionalProperties": false, "properties": { "SizeInGiB": { - "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB.", + "markdownDescription": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB.", "title": "SizeInGiB", "type": "number" } @@ -78898,6 +81707,14 @@ "title": "ServiceRegistries", "type": "array" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, "TaskDefinition": { "markdownDescription": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used.", "title": "TaskDefinition", @@ -80433,7 +83250,7 @@ "additionalProperties": false, "properties": { "AmiType": { - "markdownDescription": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The AMI type for your node group. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add `eks:kube-proxy-windows` to your Windows nodes `rolearn` in the `aws-auth` `ConfigMap` . For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "AmiType", "type": "string" }, @@ -80448,7 +83265,7 @@ "type": "string" }, "DiskSize": { - "markdownDescription": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "DiskSize", "type": "number" }, @@ -80461,7 +83278,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the `amiType` parameter. If you specify `launchTemplate` , then you can specify zero or one instance type in your launch template *or* you can specify 0-20 instance types for `instanceTypes` . If however, you specify an instance type in your launch template *and* specify any `instanceTypes` , the node group deployment will fail. If you don't specify an instance type in a launch template or for `instanceTypes` , then `t3.medium` is used, by default. If you specify `Spot` for `capacityType` , then we recommend specifying multiple values for `instanceTypes` . For more information, see [Managed node group capacity types](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types) and [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "InstanceTypes", "type": "array" }, @@ -80478,11 +83295,11 @@ }, "LaunchTemplate": { "$ref": "#/definitions/AWS::EKS::Nodegroup.LaunchTemplateSpecification", - "markdownDescription": "An object representing a node group's launch template specification. If specified, then do not specify `instanceTypes` , `diskSize` , or `remoteAccess` and make sure that the launch template meets the requirements in `launchTemplateSpecification` .", + "markdownDescription": "An object representing a node group's launch template specification. When using this object, don't directly specify `instanceTypes` , `diskSize` , or `remoteAccess` . Make sure that the launch template meets the requirements in `launchTemplateSpecification` . Also refer to [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "LaunchTemplate" }, "NodeRole": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node `kubelet` daemon makes calls to AWS APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see [Amazon EKS node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) in the **Amazon EKS User Guide** . If you specify `launchTemplate` , then don't specify `[IamInstanceProfile](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "NodeRole", "type": "string" }, @@ -80498,7 +83315,7 @@ }, "RemoteAccess": { "$ref": "#/definitions/AWS::EKS::Nodegroup.RemoteAccess", - "markdownDescription": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify `launchTemplate` , then don't specify `remoteAccess` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "RemoteAccess" }, "ScalingConfig": { @@ -80510,7 +83327,7 @@ "items": { "type": "string" }, - "markdownDescription": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "markdownDescription": "The subnets to use for the Auto Scaling group that is created for your node group. If you specify `launchTemplate` , then don't specify `[SubnetId](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html)` in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Customizing managed nodes with launch templates](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "title": "Subnets", "type": "array" }, @@ -80891,7 +83708,7 @@ "title": "ManagedScalingPolicy" }, "Name": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "The name of the cluster. This parameter can't contain the characters <, >, $, |, or ` (backtick).", "title": "Name", "type": "string" }, @@ -81534,6 +84351,11 @@ "markdownDescription": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", "title": "TerminationProtected", "type": "boolean" + }, + "UnhealthyNodeReplacement": { + "markdownDescription": "Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.", + "title": "UnhealthyNodeReplacement", + "type": "boolean" } }, "type": "object" @@ -85041,7 +87863,7 @@ "Port": { "markdownDescription": "The port number that the cache engine is listening on.", "title": "Port", - "type": "number" + "type": "string" } }, "type": "object" @@ -87434,6 +90256,11 @@ "Properties": { "additionalProperties": false, "properties": { + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic": { + "markdownDescription": "Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through AWS PrivateLink .", + "title": "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", + "type": "string" + }, "IpAddressType": { "markdownDescription": "The IP address type. The possible values are `ipv4` (for IPv4 addresses) and `dualstack` (for IPv4 and IPv6 addresses). You can\u2019t specify `dualstack` for a load balancer with a UDP or TCP_UDP listener.", "title": "IpAddressType", @@ -87521,7 +90348,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", + "markdownDescription": "The name of the attribute.\n\nThe following attributes are supported by all load balancers:\n\n- `deletion_protection.enabled` - Indicates whether deletion protection is enabled. The value is `true` or `false` . The default is `false` .\n- `load_balancing.cross_zone.enabled` - Indicates whether cross-zone load balancing is enabled. The possible values are `true` and `false` . The default for Network Load Balancers and Gateway Load Balancers is `false` . The default for Application Load Balancers is `true` , and cannot be changed.\n\nThe following attributes are supported by both Application Load Balancers and Network Load Balancers:\n\n- `access_logs.s3.enabled` - Indicates whether access logs are enabled. The value is `true` or `false` . The default is `false` .\n- `access_logs.s3.bucket` - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `access_logs.s3.prefix` - The prefix for the location in the S3 bucket for the access logs.\n- `ipv6.deny_all_igw_traffic` - Blocks internet gateway (IGW) access to the load balancer. It is set to `false` for internet-facing load balancers and `true` for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.\n\nThe following attributes are supported by only Application Load Balancers:\n\n- `idle_timeout.timeout_seconds` - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.\n- `client_keep_alive.seconds` - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.\n- `connection_logs.s3.enabled` - Indicates whether connection logs are enabled. The value is `true` or `false` . The default is `false` .\n- `connection_logs.s3.bucket` - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.\n- `connection_logs.s3.prefix` - The prefix for the location in the S3 bucket for the connection logs.\n- `routing.http.desync_mitigation_mode` - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are `monitor` , `defensive` , and `strictest` . The default is `defensive` .\n- `routing.http.drop_invalid_header_fields.enabled` - Indicates whether HTTP headers with invalid header fields are removed by the load balancer ( `true` ) or routed to targets ( `false` ). The default is `false` .\n- `routing.http.preserve_host_header.enabled` - Indicates whether the Application Load Balancer should preserve the `Host` header in the HTTP request and send it to the target without any change. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.x_amzn_tls_version_and_cipher_suite.enabled` - Indicates whether the two headers ( `x-amzn-tls-version` and `x-amzn-tls-cipher-suite` ), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The `x-amzn-tls-version` header has information about the TLS protocol version negotiated with the client, and the `x-amzn-tls-cipher-suite` header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are `true` and `false` . The default is `false` .\n- `routing.http.xff_client_port.enabled` - Indicates whether the `X-Forwarded-For` header should preserve the source port that the client used to connect to the load balancer. The possible values are `true` and `false` . The default is `false` .\n- `routing.http.xff_header_processing.mode` - Enables you to modify, preserve, or remove the `X-Forwarded-For` header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are `append` , `preserve` , and `remove` . The default is `append` .\n\n- If the value is `append` , the Application Load Balancer adds the client IP address (of the last hop) to the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- If the value is `preserve` the Application Load Balancer preserves the `X-Forwarded-For` header in the HTTP request, and sends it to targets without any change.\n- If the value is `remove` , the Application Load Balancer removes the `X-Forwarded-For` header in the HTTP request before it sends it to targets.\n- `routing.http2.enabled` - Indicates whether HTTP/2 is enabled. The possible values are `true` and `false` . The default is `true` . Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.\n- `waf.fail_open.enabled` - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. The possible values are `true` and `false` . The default is `false` .\n\nThe following attributes are supported by only Network Load Balancers:\n\n- `dns_record.client_routing_policy` - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are `availability_zone_affinity` with 100 percent zonal affinity, `partial_availability_zone_affinity` with 85 percent zonal affinity, and `any_availability_zone` with 0 percent zonal affinity.", "title": "Key", "type": "string" }, @@ -93138,7 +95965,7 @@ "type": "number" }, "Mode": { - "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a `USER_PROVISIONED` value.", + "markdownDescription": "Specifies whether the file system is using the `AUTOMATIC` setting of SSD IOPS of 3 IOPS per GB of storage capacity, or if it using a `USER_PROVISIONED` value.", "title": "Mode", "type": "string" } @@ -93259,7 +96086,7 @@ "type": "string" }, "HAPairs": { - "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file system are powered by up to six HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 6.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", + "markdownDescription": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of `StorageCapacity` , `Iops` , and `ThroughputCapacity` . For more information, see [High-availability (HA) pairs](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/HA-pairs.html) in the FSx for ONTAP user guide.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `HAPairs` is less than 1 or greater than 12.\n- The value of `HAPairs` is greater than 1 and the value of `DeploymentType` is `SINGLE_AZ_1` or `MULTI_AZ_1` .", "title": "HAPairs", "type": "number" }, @@ -93282,7 +96109,7 @@ "type": "number" }, "ThroughputCapacityPerHAPair": { - "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 6).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", + "markdownDescription": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.\n\nYou can define either the `ThroughputCapacityPerHAPair` or the `ThroughputCapacity` when creating a file system, but not both.\n\nThis field and `ThroughputCapacity` are the same for scale-up file systems powered by one HA pair.\n\n- For `SINGLE_AZ_1` and `MULTI_AZ_1` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.\n- For `SINGLE_AZ_2` file systems, valid values are 3072 or 6144 MBps.\n\nAmazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:\n\n- The value of `ThroughputCapacity` and `ThroughputCapacityPerHAPair` are not the same value for file systems with one HA pair.\n- The value of deployment type is `SINGLE_AZ_2` and `ThroughputCapacity` / `ThroughputCapacityPerHAPair` is a valid HA pair (a value between 2 and 12).\n- The value of `ThroughputCapacityPerHAPair` is not a valid value.", "title": "ThroughputCapacityPerHAPair", "type": "number" }, @@ -98112,6 +100939,11 @@ "title": "Description", "type": "string" }, + "LakeFormationConfiguration": { + "$ref": "#/definitions/AWS::Glue::Crawler.LakeFormationConfiguration", + "markdownDescription": "Specifies whether the crawler should use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.", + "title": "LakeFormationConfiguration" + }, "Name": { "markdownDescription": "The name of the crawler.", "title": "Name", @@ -98310,6 +101142,22 @@ }, "type": "object" }, + "AWS::Glue::Crawler.LakeFormationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountId": { + "markdownDescription": "Required for cross account crawls. For same account crawls as the target data, this can be left as null.", + "title": "AccountId", + "type": "string" + }, + "UseLakeFormationCredentials": { + "markdownDescription": "Specifies whether to use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.", + "title": "UseLakeFormationCredentials", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::Glue::Crawler.MongoDBTarget": { "additionalProperties": false, "properties": { @@ -98659,6 +101507,11 @@ "title": "CatalogEncryptionMode", "type": "string" }, + "CatalogEncryptionServiceRole": { + "markdownDescription": "The role that AWS Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.", + "title": "CatalogEncryptionServiceRole", + "type": "string" + }, "SseAwsKmsKeyId": { "markdownDescription": "The ID of the AWS KMS key to use for encryption at rest.", "title": "SseAwsKmsKeyId", @@ -100864,6 +103717,117 @@ }, "type": "object" }, + "AWS::Glue::TableOptimizer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The catalog ID of the table.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The table name. For Hive compatibility, this must be entirely lowercase.", + "title": "TableName", + "type": "string" + }, + "TableOptimizerConfiguration": { + "$ref": "#/definitions/AWS::Glue::TableOptimizer.TableOptimizerConfiguration", + "markdownDescription": "Specifies configuration details of a table optimizer.", + "title": "TableOptimizerConfiguration" + }, + "Type": { + "markdownDescription": "The type of table optimizer. Currently, the only valid value is compaction.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "CatalogId", + "DatabaseName", + "TableName", + "TableOptimizerConfiguration", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Glue::TableOptimizer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Glue::TableOptimizer.TableOptimizerConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Whether the table optimization is enabled.", + "title": "Enabled", + "type": "boolean" + }, + "RoleArn": { + "markdownDescription": "A role passed by the caller which gives the service permission to update the resources associated with the optimizer on the caller's behalf.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "Enabled", + "RoleArn" + ], + "type": "object" + }, "AWS::Glue::Trigger": { "additionalProperties": false, "properties": { @@ -105772,7 +108736,7 @@ }, "Tags": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::GuardDuty::Filter.TagItem" }, "markdownDescription": "The tags to be added to a new filter resource. Each tag consists of a key and an optional value, both of which you define.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "title": "Tags", @@ -105780,12 +108744,7 @@ } }, "required": [ - "Action", - "Description", - "DetectorId", - "FindingCriteria", - "Name", - "Rank" + "FindingCriteria" ], "type": "object" }, @@ -105892,14 +108851,37 @@ "additionalProperties": false, "properties": { "Criterion": { + "additionalProperties": false, "markdownDescription": "Represents a map of finding properties that match specified conditions and values when querying findings.\n\nFor information about JSON criterion mapping to their console equivalent, see [Finding criteria](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_filter-findings.html#filter_criteria) . The following are the available criterion:\n\n- accountId\n- id\n- region\n- severity\n\nTo filter on the basis of severity, API and CFN use the following input list for the condition:\n\n- *Low* : `[\"1\", \"2\", \"3\"]`\n- *Medium* : `[\"4\", \"5\", \"6\"]`\n- *High* : `[\"7\", \"8\", \"9\"]`\n\nFor more information, see [Severity levels for GuardDuty findings](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity) .\n- type\n- updatedAt\n\nType: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.\n- resource.accessKeyDetails.accessKeyId\n- resource.accessKeyDetails.principalId\n- resource.accessKeyDetails.userName\n- resource.accessKeyDetails.userType\n- resource.instanceDetails.iamInstanceProfile.id\n- resource.instanceDetails.imageId\n- resource.instanceDetails.instanceId\n- resource.instanceDetails.tags.key\n- resource.instanceDetails.tags.value\n- resource.instanceDetails.networkInterfaces.ipv6Addresses\n- resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress\n- resource.instanceDetails.networkInterfaces.publicDnsName\n- resource.instanceDetails.networkInterfaces.publicIp\n- resource.instanceDetails.networkInterfaces.securityGroups.groupId\n- resource.instanceDetails.networkInterfaces.securityGroups.groupName\n- resource.instanceDetails.networkInterfaces.subnetId\n- resource.instanceDetails.networkInterfaces.vpcId\n- resource.instanceDetails.outpostArn\n- resource.resourceType\n- resource.s3BucketDetails.publicAccess.effectivePermissions\n- resource.s3BucketDetails.name\n- resource.s3BucketDetails.tags.key\n- resource.s3BucketDetails.tags.value\n- resource.s3BucketDetails.type\n- service.action.actionType\n- service.action.awsApiCallAction.api\n- service.action.awsApiCallAction.callerType\n- service.action.awsApiCallAction.errorCode\n- service.action.awsApiCallAction.remoteIpDetails.city.cityName\n- service.action.awsApiCallAction.remoteIpDetails.country.countryName\n- service.action.awsApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.awsApiCallAction.remoteIpDetails.organization.asn\n- service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg\n- service.action.awsApiCallAction.serviceName\n- service.action.dnsRequestAction.domain\n- service.action.networkConnectionAction.blocked\n- service.action.networkConnectionAction.connectionDirection\n- service.action.networkConnectionAction.localPortDetails.port\n- service.action.networkConnectionAction.protocol\n- service.action.networkConnectionAction.remoteIpDetails.city.cityName\n- service.action.networkConnectionAction.remoteIpDetails.country.countryName\n- service.action.networkConnectionAction.remoteIpDetails.ipAddressV4\n- service.action.networkConnectionAction.remoteIpDetails.organization.asn\n- service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg\n- service.action.networkConnectionAction.remotePortDetails.port\n- service.action.awsApiCallAction.remoteAccountDetails.affiliated\n- service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4\n- service.action.kubernetesApiCallAction.requestUri\n- service.action.networkConnectionAction.localIpDetails.ipAddressV4\n- service.action.networkConnectionAction.protocol\n- service.action.awsApiCallAction.serviceName\n- service.action.awsApiCallAction.remoteAccountDetails.accountId\n- service.additionalInfo.threatListName\n- service.resourceRole\n- resource.eksClusterDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.name\n- resource.kubernetesDetails.kubernetesWorkloadDetails.namespace\n- resource.kubernetesDetails.kubernetesUserDetails.username\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image\n- resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix\n- service.ebsVolumeScanDetails.scanId\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity\n- service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash\n- resource.ecsClusterDetails.name\n- resource.ecsClusterDetails.taskDetails.containers.image\n- resource.ecsClusterDetails.taskDetails.definitionArn\n- resource.containerDetails.image\n- resource.rdsDbInstanceDetails.dbInstanceIdentifier\n- resource.rdsDbInstanceDetails.dbClusterIdentifier\n- resource.rdsDbInstanceDetails.engine\n- resource.rdsDbUserDetails.user\n- resource.rdsDbInstanceDetails.tags.key\n- resource.rdsDbInstanceDetails.tags.value\n- service.runtimeDetails.process.executableSha256\n- service.runtimeDetails.process.name\n- service.runtimeDetails.process.name\n- resource.lambdaDetails.functionName\n- resource.lambdaDetails.functionArn\n- resource.lambdaDetails.tags.key\n- resource.lambdaDetails.tags.value", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + } + }, "title": "Criterion", "type": "object" + } + }, + "type": "object" + }, + "AWS::GuardDuty::Filter.TagItem": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "", + "title": "Key", + "type": "string" }, - "ItemType": { - "$ref": "#/definitions/AWS::GuardDuty::Filter.Condition" + "Value": { + "markdownDescription": "", + "title": "Value", + "type": "string" } }, + "required": [ + "Key", + "Value" + ], "type": "object" }, "AWS::GuardDuty::IPSet": { @@ -106163,9 +109145,7 @@ } }, "required": [ - "DetectorId", - "Email", - "MemberId" + "Email" ], "type": "object" }, @@ -108403,6 +111383,77 @@ }, "type": "object" }, + "AWS::IVS::Stage": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Stage name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::Stage" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -110911,7 +113962,8 @@ } }, "required": [ - "Name" + "Name", + "SemanticVersion" ], "type": "object" }, @@ -111253,23 +114305,113 @@ "Properties": { "additionalProperties": false, "properties": { - "ResourceGroupTags": { - "items": { - "$ref": "#/definitions/Tag" + "ResourceGroupTags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", + "title": "ResourceGroupTags", + "type": "array" + } + }, + "required": [ + "ResourceGroupTags" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Inspector::ResourceGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ScanName": { + "markdownDescription": "The name of the CIS scan configuration.", + "title": "ScanName", + "type": "string" + }, + "Schedule": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Schedule", + "markdownDescription": "The CIS scan configuration's schedule.", + "title": "Schedule" + }, + "SecurityLevel": { + "markdownDescription": "The CIS scan configuration's CIS Benchmark level.", + "title": "SecurityLevel", + "type": "string" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "The CIS scan configuration's tags.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } }, - "markdownDescription": "The tags (key and value pairs) that will be associated with the resource group.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", - "title": "ResourceGroupTags", - "type": "array" + "title": "Tags", + "type": "object" + }, + "Targets": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.CisTargets", + "markdownDescription": "The CIS scan configuration's targets.", + "title": "Targets" } }, - "required": [ - "ResourceGroupTags" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Inspector::ResourceGroup" + "AWS::InspectorV2::CisScanConfiguration" ], "type": "string" }, @@ -111283,8 +114425,132 @@ } }, "required": [ - "Type", - "Properties" + "Type" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.CisTargets": { + "additionalProperties": false, + "properties": { + "AccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The CIS target account ids.", + "title": "AccountIds", + "type": "array" + }, + "TargetResourceTags": { + "markdownDescription": "The CIS target resource tags.", + "title": "TargetResourceTags", + "type": "object" + } + }, + "required": [ + "AccountIds" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.DailySchedule": { + "additionalProperties": false, + "properties": { + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The schedule start time.", + "title": "StartTime" + } + }, + "required": [ + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.MonthlySchedule": { + "additionalProperties": false, + "properties": { + "Day": { + "markdownDescription": "The monthly schedule's day.", + "title": "Day", + "type": "string" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The monthly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Day", + "StartTime" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Schedule": { + "additionalProperties": false, + "properties": { + "Daily": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.DailySchedule", + "markdownDescription": "A daily schedule.", + "title": "Daily" + }, + "Monthly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.MonthlySchedule", + "markdownDescription": "A monthly schedule.", + "title": "Monthly" + }, + "OneTime": { + "markdownDescription": "A one time schedule.", + "title": "OneTime", + "type": "object" + }, + "Weekly": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.WeeklySchedule", + "markdownDescription": "A weekly schedule.", + "title": "Weekly" + } + }, + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.Time": { + "additionalProperties": false, + "properties": { + "TimeOfDay": { + "markdownDescription": "The time of day in 24-hour format (00:00).", + "title": "TimeOfDay", + "type": "string" + }, + "TimeZone": { + "markdownDescription": "The timezone.", + "title": "TimeZone", + "type": "string" + } + }, + "required": [ + "TimeOfDay", + "TimeZone" + ], + "type": "object" + }, + "AWS::InspectorV2::CisScanConfiguration.WeeklySchedule": { + "additionalProperties": false, + "properties": { + "Days": { + "items": { + "type": "string" + }, + "markdownDescription": "The weekly schedule's days.", + "title": "Days", + "type": "array" + }, + "StartTime": { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration.Time", + "markdownDescription": "The weekly schedule's start time.", + "title": "StartTime" + } + }, + "required": [ + "Days", + "StartTime" ], "type": "object" }, @@ -111794,11 +115060,21 @@ "markdownDescription": "A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.\n\nDefines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.\n\nYou can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.\n\nIf you don't set a health event threshold, the default value is 95%.\n\nFor local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.\n\nFor more information, see [Change health event thresholds](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-IM-overview.html#IMUpdateThresholdFromOverview) in the Internet Monitor section of the *CloudWatch User Guide* .", "title": "HealthEventsConfig" }, + "IncludeLinkedAccounts": { + "markdownDescription": "", + "title": "IncludeLinkedAccounts", + "type": "boolean" + }, "InternetMeasurementsLogDelivery": { "$ref": "#/definitions/AWS::InternetMonitor::Monitor.InternetMeasurementsLogDelivery", "markdownDescription": "Publish internet measurements for a monitor for all city-networks (up to the 500,000 service limit) to another location, such as an Amazon S3 bucket. Measurements are also published to Amazon CloudWatch Logs for the first 500 (by traffic volume) city-networks (client locations and ASNs, typically internet service providers or ISPs).", "title": "InternetMeasurementsLogDelivery" }, + "LinkedAccountId": { + "markdownDescription": "", + "title": "LinkedAccountId", + "type": "string" + }, "MaxCityNetworksToMonitor": { "markdownDescription": "The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network, such as an internet service provider, that clients access the resources through.\n\nFor more information, see [Choosing a city-network maximum value](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/IMCityNetworksMaximum.html) in *Using Amazon CloudWatch Internet Monitor* .", "title": "MaxCityNetworksToMonitor", @@ -113168,6 +116444,11 @@ "title": "ServerCertificateArns", "type": "array" }, + "ServerCertificateConfig": { + "$ref": "#/definitions/AWS::IoT::DomainConfiguration.ServerCertificateConfig", + "markdownDescription": "The server certificate configuration.\n\nFor more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "ServerCertificateConfig" + }, "ServiceType": { "markdownDescription": "The type of service delivered by the endpoint.\n\n> AWS IoT Core currently supports only the `DATA` service type.", "title": "ServiceType", @@ -113230,6 +116511,17 @@ }, "type": "object" }, + "AWS::IoT::DomainConfiguration.ServerCertificateConfig": { + "additionalProperties": false, + "properties": { + "EnableOCSPCheck": { + "markdownDescription": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide.", + "title": "EnableOCSPCheck", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::IoT::DomainConfiguration.ServerCertificateSummary": { "additionalProperties": false, "properties": { @@ -121182,7 +124474,7 @@ "title": "AccessPolicyIdentity" }, "AccessPolicyPermission": { - "markdownDescription": "The permission level for this access policy. Choose either a `ADMINISTRATOR` or `VIEWER` . Note that a project `ADMINISTRATOR` is also known as a project owner.", + "markdownDescription": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", "title": "AccessPolicyPermission", "type": "string" }, @@ -121235,7 +124527,7 @@ }, "User": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.User", - "markdownDescription": "The IAM Identity Center user to which this access policy maps.", + "markdownDescription": "An IAM Identity Center user identity.", "title": "User" } }, @@ -121246,12 +124538,12 @@ "properties": { "Portal": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Portal", - "markdownDescription": "The AWS IoT SiteWise Monitor portal for this access policy.", + "markdownDescription": "Identifies an AWS IoT SiteWise Monitor portal.", "title": "Portal" }, "Project": { "$ref": "#/definitions/AWS::IoTSiteWise::AccessPolicy.Project", - "markdownDescription": "The AWS IoT SiteWise Monitor project for this access policy.", + "markdownDescription": "Identifies a specific AWS IoT SiteWise Monitor project.", "title": "Project" } }, @@ -121305,7 +124597,7 @@ "additionalProperties": false, "properties": { "id": { - "markdownDescription": "The ID of the user.", + "markdownDescription": "The IAM Identity Center ID of the user.", "title": "id", "type": "string" } @@ -121348,15 +124640,20 @@ "additionalProperties": false, "properties": { "AssetDescription": { - "markdownDescription": "A description for the asset.", + "markdownDescription": "The ID of the asset, in UUID format.", "title": "AssetDescription", "type": "string" }, + "AssetExternalId": { + "markdownDescription": "The external ID of the asset model composite model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "AssetExternalId", + "type": "string" + }, "AssetHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::Asset.AssetHierarchy" }, - "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyLogicalId` . A hierarchy specifies allowed parent/child asset relationships.", + "markdownDescription": "A list of asset hierarchies that each contain a `hierarchyId` . A hierarchy specifies allowed parent/child asset relationships.", "title": "AssetHierarchies", "type": "array" }, @@ -121366,7 +124663,7 @@ "type": "string" }, "AssetName": { - "markdownDescription": "A unique, friendly name for the asset.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A friendly name for the asset.", "title": "AssetName", "type": "string" }, @@ -121422,15 +124719,24 @@ "title": "ChildAssetId", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the hierarchy, if it has one. When you update an asset hierarchy, you may assign an external ID if it doesn't already have one. You can't change the external ID of an asset hierarchy that already has one. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The ID of the hierarchy. This ID is a `hierarchyId` .", "title": "LogicalId", "type": "string" } }, "required": [ - "ChildAssetId", - "LogicalId" + "ChildAssetId" ], "type": "object" }, @@ -121438,17 +124744,27 @@ "additionalProperties": false, "properties": { "Alias": { - "markdownDescription": "The property alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .\n\nThe property alias must have 1-1000 characters.", + "markdownDescription": "The alias that identifies the property, such as an OPC-UA server data stream path (for example, `/company/windfarm/3/turbine/7/temperature` ). For more information, see [Mapping industrial data streams to asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html) in the *AWS IoT SiteWise User Guide* .", "title": "Alias", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset property.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset property.\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset property.", "title": "LogicalId", "type": "string" }, "NotificationState": { - "markdownDescription": "The MQTT notification state ( `ENABLED` or `DISABLED` ) for this asset property. When the notification state is `ENABLED` , AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .\n\n> You must use all caps for the NotificationState parameter. If you use lower case letters, you will receive a schema validation error.", + "markdownDescription": "The MQTT notification state (enabled or disabled) for this asset property. When the notification state is enabled, AWS IoT SiteWise publishes property value updates to a unique MQTT topic. For more information, see [Interacting with other services](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html) in the *AWS IoT SiteWise User Guide* .\n\nIf you omit this parameter, the notification state is set to `DISABLED` .", "title": "NotificationState", "type": "string" }, @@ -121458,9 +124774,6 @@ "type": "string" } }, - "required": [ - "LogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::AssetModel": { @@ -121502,7 +124815,7 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel" }, - "markdownDescription": "The composite asset models that are part of this asset model. Composite asset models are asset models that contain specific properties. Each composite model has a type that defines the properties that the composite model supports. You can use composite asset models to define alarms on this asset model.", + "markdownDescription": "The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model.\n\n> When creating custom composite models, you need to use [CreateAssetModelCompositeModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModelCompositeModel.html) . For more information, see [Creating custom composite models (Components)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/create-custom-composite-models.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelCompositeModels", "type": "array" }, @@ -121511,16 +124824,21 @@ "title": "AssetModelDescription", "type": "string" }, + "AssetModelExternalId": { + "markdownDescription": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "AssetModelExternalId", + "type": "string" + }, "AssetModelHierarchies": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelHierarchy" }, - "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelHierarchies", "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -121528,10 +124846,15 @@ "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" }, - "markdownDescription": "The property definitions of the asset model. For more information, see [Defining data properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "title": "AssetModelProperties", "type": "array" }, + "AssetModelType": { + "markdownDescription": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", + "title": "AssetModelType", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -121570,6 +124893,11 @@ "AWS::IoTSiteWise::AssetModel.AssetModelCompositeModel": { "additionalProperties": false, "properties": { + "ComposedAssetModelId": { + "markdownDescription": "The ID of a component model which is reused to create this composite model.", + "title": "ComposedAssetModelId", + "type": "string" + }, "CompositeModelProperties": { "items": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.AssetModelProperty" @@ -121583,11 +124911,34 @@ "title": "Description", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of a composite model on this asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset model composite model.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "Name": { "markdownDescription": "The name of the composite model.", "title": "Name", "type": "string" }, + "ParentAssetModelCompositeModelExternalId": { + "markdownDescription": "The external ID of the parent asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> If `ParentCompositeModelExternalId` is specified, this value overrides the value of `ExternalId` , if both are included.", + "title": "ParentAssetModelCompositeModelExternalId", + "type": "string" + }, + "Path": { + "items": { + "type": "string" + }, + "markdownDescription": "The structured path to the property from the root of the asset using property names. Path is used as the ID if the asset model is a derived composite model.", + "title": "Path", + "type": "array" + }, "Type": { "markdownDescription": "The type of the composite model. For alarm composite models, this type is `AWS/ALARM` .", "title": "Type", @@ -121604,24 +124955,33 @@ "additionalProperties": false, "properties": { "ChildAssetModelId": { - "markdownDescription": "The Id of the asset model.", + "markdownDescription": "The ID of the asset model, in UUID format. All assets in this hierarchy must be instances of the `childAssetModelId` asset model. AWS IoT SiteWise will always return the actual asset model ID for this value. However, when you are specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) , you may provide either the asset model ID or else `externalId:` followed by the asset model's external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "title": "ChildAssetModelId", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID (if any) provided in the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) operation. You can assign an external ID by specifying this value as part of a call to [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) . However, you can't change the external ID if one is already assigned. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the asset model hierarchy. This ID is a `hierarchyId` .\n\n> This is a return value and can't be set. \n\n- If you are callling [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) to create a *new* hierarchy: You can specify its ID here, if desired. AWS IoT SiteWise automatically generates a unique ID for you, so this parameter is never required. However, if you prefer to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique.\n- If you are calling UpdateAssetModel to modify an *existing* hierarchy: This can be either the actual ID in UUID format, or else `externalId:` followed by the external ID, if it has one. For more information, see [Referencing objects with external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-id-references) in the *AWS IoT SiteWise User Guide* .", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .\n\nThe maximum length is 256 characters, with the pattern `[^\\u0000-\\u001F\\u007F]+`", + "markdownDescription": "The `LogicalID` of the asset model hierarchy. This ID is a `hierarchyLogicalId` .", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model hierarchy.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation.", "title": "Name", "type": "string" } }, "required": [ "ChildAssetModelId", - "LogicalId", "Name" ], "type": "object" @@ -121630,7 +124990,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property. The value can be `STRING` , `INTEGER` , `DOUBLE` , `BOOLEAN` , or `STRUCT` .", + "markdownDescription": "The data type of the asset model property.", "title": "DataType", "type": "string" }, @@ -121639,19 +124999,29 @@ "title": "DataTypeSpec", "type": "string" }, + "ExternalId": { + "markdownDescription": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "ExternalId", + "type": "string" + }, + "Id": { + "markdownDescription": "The ID of the property.\n\n> This is a return value and can't be set.", + "title": "Id", + "type": "string" + }, "LogicalId": { - "markdownDescription": "The `LogicalID` of the asset model property.\n\nThe maximum length is 256 characters, with the pattern `[^\\\\u0000-\\\\u001F\\\\u007F]+` .", + "markdownDescription": "The `LogicalID` of the asset model property.", "title": "LogicalId", "type": "string" }, "Name": { - "markdownDescription": "The name of the asset model property.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "The name of the asset model property.", "title": "Name", "type": "string" }, "Type": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyType", - "markdownDescription": "Contains a property type, which can be one of `Attribute` , `Measurement` , `Metric` , or `Transform` .", + "markdownDescription": "Contains a property type, which can be one of `attribute` , `measurement` , `metric` , or `transform` .", "title": "Type" }, "Unit": { @@ -121662,7 +125032,6 @@ }, "required": [ "DataType", - "LogicalId", "Name", "Type" ], @@ -121683,7 +125052,7 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The friendly name of the variable to be used in the expression.\n\nThe maximum length is 64 characters with the pattern `^[a-z][a-z0-9_]*$` .", + "markdownDescription": "The friendly name of the variable to be used in the expression.", "title": "Name", "type": "string" }, @@ -121739,22 +125108,36 @@ }, "type": "object" }, + "AWS::IoTSiteWise::AssetModel.PropertyPathDefinition": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the path segment.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::IoTSiteWise::AssetModel.PropertyType": { "additionalProperties": false, "properties": { "Attribute": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Attribute", - "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [industrial IoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.\n\nThis is required if the `TypeName` is `Attribute` and has a `DefaultValue` .", + "markdownDescription": "Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an [IIoT](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/Internet_of_things#Industrial_applications) wind turbine.", "title": "Attribute" }, "Metric": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Metric", - "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.\n\nThis is required if the `TypeName` is `Metric` .", + "markdownDescription": "Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.", "title": "Metric" }, "Transform": { "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.Transform", - "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.\n\nThis is required if the `TypeName` is `Transform` .", + "markdownDescription": "Specifies an asset transform property. A transform contains a mathematical expression that maps a property's data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.", "title": "Transform" }, "TypeName": { @@ -121813,20 +125196,45 @@ "AWS::IoTSiteWise::AssetModel.VariableValue": { "additionalProperties": false, "properties": { + "HierarchyExternalId": { + "markdownDescription": "The external ID of the hierarchy being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "HierarchyExternalId", + "type": "string" + }, + "HierarchyId": { + "markdownDescription": "The ID of the hierarchy to query for the property ID. You can use the hierarchy's name instead of the hierarchy's ID. If the hierarchy has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\nYou use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same `propertyId` . For example, you might have separately grouped assets that come from the same asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "title": "HierarchyId", + "type": "string" + }, "HierarchyLogicalId": { - "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between assets](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", + "markdownDescription": "The `LogicalID` of the hierarchy to query for the `PropertyLogicalID` .\n\nYou use a `hierarchyLogicalID` instead of a model ID because you can have several hierarchies using the same model and therefore the same property. For example, you might have separately grouped assets that come from the same asset model. For more information, see [Defining relationships between asset models (hierarchies)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .", "title": "HierarchyLogicalId", "type": "string" }, + "PropertyExternalId": { + "markdownDescription": "The external ID of the property being referenced. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", + "title": "PropertyExternalId", + "type": "string" + }, + "PropertyId": { + "markdownDescription": "The ID of the property to use as the variable. You can use the property `name` if it's from the same asset model. If the property has an external ID, you can specify `externalId:` followed by the external ID. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> This is a return value and can't be set.", + "title": "PropertyId", + "type": "string" + }, "PropertyLogicalId": { - "markdownDescription": "The `LogicalID` of the property to use as the variable.", + "markdownDescription": "The `LogicalID` of the property that is being referenced.", "title": "PropertyLogicalId", "type": "string" + }, + "PropertyPath": { + "items": { + "$ref": "#/definitions/AWS::IoTSiteWise::AssetModel.PropertyPathDefinition" + }, + "markdownDescription": "The path of the property. Each step of the path is the name of the step. See the following example:\n\n`PropertyPath: Name: AssetModelName Name: Composite1 Name: NestedComposite`", + "title": "PropertyPath", + "type": "array" } }, - "required": [ - "PropertyLogicalId" - ], "type": "object" }, "AWS::IoTSiteWise::Dashboard": { @@ -121965,7 +125373,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.\n\nThe maximum length is 256 characters with the pattern `[^\\u0000-\\u001F\\u007F]+` .", + "markdownDescription": "A unique, friendly name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -122019,7 +125427,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .\n\nThe maximum length is 512 characters with the pattern `^[a-zA-Z]+:[a-zA-Z]+:[0-9]+$` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", "title": "CapabilityNamespace", "type": "string" } @@ -122041,6 +125449,11 @@ "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.GreengrassV2", "markdownDescription": "A gateway that runs on AWS IoT Greengrass V2 .", "title": "GreengrassV2" + }, + "SiemensIE": { + "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.SiemensIE", + "markdownDescription": "", + "title": "SiemensIE" } }, "type": "object" @@ -122049,7 +125462,7 @@ "additionalProperties": false, "properties": { "GroupArn": { - "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/latest/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/latest/apireference/getgroup-get.html) in the *AWS IoT Greengrass API Reference* .", + "markdownDescription": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* .", "title": "GroupArn", "type": "string" } @@ -122073,6 +125486,20 @@ ], "type": "object" }, + "AWS::IoTSiteWise::Gateway.SiemensIE": { + "additionalProperties": false, + "properties": { + "IotCoreThingName": { + "markdownDescription": "", + "title": "IotCoreThingName", + "type": "string" + } + }, + "required": [ + "IotCoreThingName" + ], + "type": "object" + }, "AWS::IoTSiteWise::Portal": { "additionalProperties": false, "properties": { @@ -122119,7 +125546,7 @@ "type": "string" }, "PortalAuthMode": { - "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center , you must enable IAM Identity Center . For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management ( IAM ) to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", + "markdownDescription": "The service to use to authenticate users to the portal. Choose from the following options:\n\n- `SSO` \u2013 The portal uses AWS IAM Identity Center to authenticate users and manage user permissions. Before you can create a portal that uses IAM Identity Center, you must enable IAM Identity Center. For more information, see [Enabling IAM Identity Center](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) in the *AWS IoT SiteWise User Guide* . This option is only available in AWS Regions other than the China Regions.\n- `IAM` \u2013 The portal uses AWS Identity and Access Management to authenticate users and manage user permissions.\n\nYou can't change this value after you create a portal.\n\nDefault: `SSO`", "title": "PortalAuthMode", "type": "string" }, @@ -124806,6 +128233,11 @@ "title": "Name", "type": "string" }, + "Positioning": { + "markdownDescription": "FPort values for the GNSS, Stream, and ClockSync functions of the positioning information.", + "title": "Positioning", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -124892,6 +128324,41 @@ ], "type": "object" }, + "AWS::IoTWireless::WirelessDevice.Application": { + "additionalProperties": false, + "properties": { + "DestinationName": { + "markdownDescription": "The name of the position data destination that describes the IoT rule that processes the device's position data.", + "title": "DestinationName", + "type": "string" + }, + "FPort": { + "markdownDescription": "The name of the new destination for the device.", + "title": "FPort", + "type": "number" + }, + "Type": { + "markdownDescription": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, + "AWS::IoTWireless::WirelessDevice.FPorts": { + "additionalProperties": false, + "properties": { + "Applications": { + "items": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.Application" + }, + "markdownDescription": "LoRaWAN application configuration, which can be used to perform geolocation.", + "title": "Applications", + "type": "array" + } + }, + "type": "object" + }, "AWS::IoTWireless::WirelessDevice.LoRaWANDevice": { "additionalProperties": false, "properties": { @@ -124915,6 +128382,11 @@ "title": "DeviceProfileId", "type": "string" }, + "FPorts": { + "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.FPorts", + "markdownDescription": "List of FPort assigned for different LoRaWAN application packages to use.", + "title": "FPorts" + }, "OtaaV10x": { "$ref": "#/definitions/AWS::IoTWireless::WirelessDevice.OtaaV10x", "markdownDescription": "OTAA device object for create APIs for v1.0.x", @@ -125645,6 +129117,14 @@ "title": "ServiceExecutionRoleArn", "type": "string" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + }, "WorkerConfiguration": { "$ref": "#/definitions/AWS::KafkaConnect::Connector.WorkerConfiguration", "markdownDescription": "The worker configurations that are in use with the connector.", @@ -126025,6 +129505,239 @@ }, "type": "object" }, + "AWS::KafkaConnect::CustomPlugin": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ContentType": { + "markdownDescription": "The format of the plugin file.", + "title": "ContentType", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the custom plugin.", + "title": "Description", + "type": "string" + }, + "Location": { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin.CustomPluginLocation", + "markdownDescription": "Information about the location of the custom plugin.", + "title": "Location" + }, + "Name": { + "markdownDescription": "The name of the custom plugin.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ContentType", + "Location", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::KafkaConnect::CustomPlugin" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.CustomPluginFileDescription": { + "additionalProperties": false, + "properties": { + "FileMd5": { + "markdownDescription": "The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.", + "title": "FileMd5", + "type": "string" + }, + "FileSize": { + "markdownDescription": "The size in bytes of the custom plugin file. You can use it to validate the file.", + "title": "FileSize", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.CustomPluginLocation": { + "additionalProperties": false, + "properties": { + "S3Location": { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin.S3Location", + "markdownDescription": "The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.", + "title": "S3Location" + } + }, + "required": [ + "S3Location" + ], + "type": "object" + }, + "AWS::KafkaConnect::CustomPlugin.S3Location": { + "additionalProperties": false, + "properties": { + "BucketArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of an S3 bucket.", + "title": "BucketArn", + "type": "string" + }, + "FileKey": { + "markdownDescription": "The file key for an object in an S3 bucket.", + "title": "FileKey", + "type": "string" + }, + "ObjectVersion": { + "markdownDescription": "The version of an object in an S3 bucket.", + "title": "ObjectVersion", + "type": "string" + } + }, + "required": [ + "BucketArn", + "FileKey" + ], + "type": "object" + }, + "AWS::KafkaConnect::WorkerConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of a worker configuration.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the worker configuration.", + "title": "Name", + "type": "string" + }, + "PropertiesFileContent": { + "markdownDescription": "Base64 encoded contents of the connect-distributed.properties file.", + "title": "PropertiesFileContent", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name", + "PropertiesFileContent" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::KafkaConnect::WorkerConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::Kendra::DataSource": { "additionalProperties": false, "properties": { @@ -126996,7 +130709,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -127004,7 +130717,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- */myapp/config/** \u2014All files inside config directory.\n- ***/*.png* \u2014All .png files in all directories.\n- ***/*.{png, ico, md}* \u2014All .png, .ico or .md files in all directories.\n- */myapp/src/**/*.ts* \u2014All .ts files inside src directory (and all its subdirectories).\n- ***/!(*.module).ts* \u2014All .ts files but not .module.ts\n- **.png , *.jpg* \u2014All PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** \u2014All files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** \u2014All internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -127772,6 +131485,11 @@ "title": "IndexId", "type": "string" }, + "LanguageCode": { + "markdownDescription": "The code for a language. This shows a supported language for the FAQ document as part of the summary information for FAQs. English is supported by default. For more information on supported languages, including their codes, see [Adding documents in languages other than English](https://docs.aws.amazon.com/kendra/latest/dg/in-adding-languages.html) .", + "title": "LanguageCode", + "type": "string" + }, "Name": { "markdownDescription": "The name that you assigned the FAQ when you created or updated the FAQ.", "title": "Name", @@ -128091,7 +131809,7 @@ "type": "string" }, "Freshness": { - "markdownDescription": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. You can only set the `Freshness` field on one `DATE` type field. Only applies to `DATE` fields.", + "markdownDescription": "Indicates that this field determines how \"fresh\" a document is. For example, if document 1 was created on November 5, and document 2 was created on October 31, document 1 is \"fresher\" than document 2. Only applies to `DATE` fields.", "title": "Freshness", "type": "boolean" }, @@ -128101,7 +131819,7 @@ "type": "number" }, "RankOrder": { - "markdownDescription": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` and `DOUBLE` fields.", + "markdownDescription": "Determines how values should be interpreted.\n\nWhen the `RankOrder` field is `ASCENDING` , higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.\n\nWhen the `RankOrder` field is `DESCENDING` , lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.\n\nOnly applies to `LONG` fields.", "title": "RankOrder", "type": "string" }, @@ -129390,7 +133108,7 @@ }, "SqlApplicationConfiguration": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::Application.SqlApplicationConfiguration", - "markdownDescription": "The creation and update parameters for a SQL-based Managed Service for Apache Flink application.", + "markdownDescription": "The creation and update parameters for a SQL-based Kinesis Data Analytics application.", "title": "SqlApplicationConfiguration" }, "VpcConfigurations": { @@ -129661,7 +133379,7 @@ "title": "KinesisStreamsInput" }, "NamePrefix": { - "markdownDescription": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Managed Service for Apache Flink then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on.", + "markdownDescription": "The name prefix to use when creating an in-application stream. Suppose that you specify a prefix \" `MyInApplicationStream` .\" Kinesis Data Analytics then creates one or more (as per the `InputParallelism` count you specified) in-application streams with the names \" `MyInApplicationStream_001` ,\" \" `MyInApplicationStream_002` ,\" and so on.", "title": "NamePrefix", "type": "string" } @@ -130208,7 +133926,7 @@ }, "Output": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::ApplicationOutput.Output", - "markdownDescription": "Describes a SQL-based Managed Service for Apache Flink application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.", + "markdownDescription": "Describes a SQL-based Kinesis Data Analytics application's output configuration, in which you identify an in-application stream and a destination where you want the in-application stream data to be written. The destination can be a Kinesis data stream or a Kinesis Data Firehose delivery stream.", "title": "Output" } }, @@ -130368,7 +134086,7 @@ }, "ReferenceDataSource": { "$ref": "#/definitions/AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource", - "markdownDescription": "For a SQL-based Managed Service for Apache Flink application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.", + "markdownDescription": "For a SQL-based Kinesis Data Analytics application, describes the reference data source by providing the source information (Amazon S3 bucket name and object key name), the resulting in-application table name that is created, and the necessary schema to map the data elements in the Amazon S3 object to the in-application table.", "title": "ReferenceDataSource" } }, @@ -130660,6 +134378,11 @@ "markdownDescription": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "title": "S3DestinationConfiguration" }, + "SnowflakeDestinationConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration", + "markdownDescription": "Configure Snowflake destination", + "title": "SnowflakeDestinationConfiguration" + }, "SplunkDestinationConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SplunkDestinationConfiguration", "markdownDescription": "The configuration of a destination in Splunk for the delivery stream.", @@ -130669,7 +134392,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.", + "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -131211,6 +134934,11 @@ "title": "CompressionFormat", "type": "string" }, + "CustomTimeZone": { + "markdownDescription": "The time zone you prefer. UTC is the default.", + "title": "CustomTimeZone", + "type": "string" + }, "DataFormatConversionConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.DataFormatConversionConfiguration", "markdownDescription": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", @@ -131231,6 +134959,11 @@ "title": "ErrorOutputPrefix", "type": "string" }, + "FileExtension": { + "markdownDescription": "Specify a file extension. It will override the default file extension", + "title": "FileExtension", + "type": "string" + }, "Prefix": { "markdownDescription": "The `YYYY/MM/DD/HH` time format prefix is automatically used for delivered Amazon S3 files. For more information, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "title": "Prefix", @@ -131853,6 +135586,153 @@ }, "type": "object" }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeDestinationConfiguration": { + "additionalProperties": false, + "properties": { + "AccountUrl": { + "markdownDescription": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "title": "AccountUrl", + "type": "string" + }, + "CloudWatchLoggingOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", + "markdownDescription": "", + "title": "CloudWatchLoggingOptions" + }, + "ContentColumnName": { + "markdownDescription": "The name of the record content column", + "title": "ContentColumnName", + "type": "string" + }, + "DataLoadingOption": { + "markdownDescription": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", + "title": "DataLoadingOption", + "type": "string" + }, + "Database": { + "markdownDescription": "All data in Snowflake is maintained in databases.", + "title": "Database", + "type": "string" + }, + "KeyPassphrase": { + "markdownDescription": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "KeyPassphrase", + "type": "string" + }, + "MetaDataColumnName": { + "markdownDescription": "The name of the record metadata column", + "title": "MetaDataColumnName", + "type": "string" + }, + "PrivateKey": { + "markdownDescription": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", + "title": "PrivateKey", + "type": "string" + }, + "ProcessingConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", + "markdownDescription": "", + "title": "ProcessingConfiguration" + }, + "RetryOptions": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions", + "markdownDescription": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "RetryOptions" + }, + "RoleARN": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Snowflake role", + "title": "RoleARN", + "type": "string" + }, + "S3BackupMode": { + "markdownDescription": "Choose an S3 backup mode", + "title": "S3BackupMode", + "type": "string" + }, + "S3Configuration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.S3DestinationConfiguration", + "markdownDescription": "", + "title": "S3Configuration" + }, + "Schema": { + "markdownDescription": "Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views", + "title": "Schema", + "type": "string" + }, + "SnowflakeRoleConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration", + "markdownDescription": "Optionally configure a Snowflake role. Otherwise the default user role will be used.", + "title": "SnowflakeRoleConfiguration" + }, + "SnowflakeVpcConfiguration": { + "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration", + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "SnowflakeVpcConfiguration" + }, + "Table": { + "markdownDescription": "All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.", + "title": "Table", + "type": "string" + }, + "User": { + "markdownDescription": "User login name for the Snowflake account.", + "title": "User", + "type": "string" + } + }, + "required": [ + "AccountUrl", + "Database", + "PrivateKey", + "RoleARN", + "S3Configuration", + "Schema", + "Table", + "User" + ], + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRetryOptions": { + "additionalProperties": false, + "properties": { + "DurationInSeconds": { + "markdownDescription": "the time period where Firehose will retry sending data to the chosen HTTP endpoint.", + "title": "DurationInSeconds", + "type": "number" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeRoleConfiguration": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Enable Snowflake role", + "title": "Enabled", + "type": "boolean" + }, + "SnowflakeRole": { + "markdownDescription": "The Snowflake role you wish to configure", + "title": "SnowflakeRole", + "type": "string" + } + }, + "type": "object" + }, + "AWS::KinesisFirehose::DeliveryStream.SnowflakeVpcConfiguration": { + "additionalProperties": false, + "properties": { + "PrivateLinkVpceId": { + "markdownDescription": "The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see [Amazon PrivateLink & Snowflake](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-security-privatelink)", + "title": "PrivateLinkVpceId", + "type": "string" + } + }, + "required": [ + "PrivateLinkVpceId" + ], + "type": "object" + }, "AWS::KinesisFirehose::DeliveryStream.SplunkBufferingHints": { "additionalProperties": false, "properties": { @@ -133107,6 +136987,11 @@ "Properties": { "additionalProperties": false, "properties": { + "HybridAccessEnabled": { + "markdownDescription": "Indicates whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.", + "title": "HybridAccessEnabled", + "type": "boolean" + }, "ResourceArn": { "markdownDescription": "The Amazon Resource Name (ARN) of the resource.", "title": "ResourceArn", @@ -133493,7 +137378,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -133913,7 +137798,7 @@ "title": "FilterCriteria" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -134854,7 +138739,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -135094,7 +138979,7 @@ "type": "string" }, "FunctionName": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* - `MyFunction` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Partial ARN* - `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "FunctionName", "type": "string" }, @@ -140414,6 +144299,14 @@ "AWS::Location::Map.MapConfiguration": { "additionalProperties": false, "properties": { + "CustomLayers": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as the `POI` layer for the VectorEsriNavigation style.\n\n> Currenlty only `VectorEsriNavigation` supports CustomLayers. For more information, see [Custom Layers](https://docs.aws.amazon.com//location/latest/developerguide/map-concepts.html#map-custom-layers) .", + "title": "CustomLayers", + "type": "array" + }, "PoliticalView": { "markdownDescription": "Specifies the map political view selected from an available data provider.", "title": "PoliticalView", @@ -144316,6 +148209,17 @@ ], "type": "object" }, + "AWS::MSK::Replicator.ReplicationStartingPosition": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "", + "title": "Type", + "type": "string" + } + }, + "type": "object" + }, "AWS::MSK::Replicator.TopicReplication": { "additionalProperties": false, "properties": { @@ -144334,6 +148238,11 @@ "title": "DetectAndCopyNewTopics", "type": "boolean" }, + "StartingPosition": { + "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationStartingPosition", + "markdownDescription": "", + "title": "StartingPosition" + }, "TopicsToExclude": { "items": { "type": "string" @@ -154648,6 +158557,10 @@ "type": "array" } }, + "required": [ + "ChannelGroupName", + "ChannelName" + ], "type": "object" }, "Type": { @@ -154666,7 +158579,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154740,6 +158654,9 @@ "type": "array" } }, + "required": [ + "ChannelGroupName" + ], "type": "object" }, "Type": { @@ -154758,7 +158675,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -154814,6 +158732,8 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", "Policy" ], "type": "object" @@ -154935,7 +158855,9 @@ } }, "required": [ - "ContainerType" + "ChannelGroupName", + "ChannelName", + "OriginEndpointName" ], "type": "object" }, @@ -155304,6 +159226,9 @@ } }, "required": [ + "ChannelGroupName", + "ChannelName", + "OriginEndpointName", "Policy" ], "type": "object" @@ -170145,6 +174070,11 @@ "title": "Identity", "type": "string" }, + "OrchestrationSendingRoleArn": { + "markdownDescription": "", + "title": "OrchestrationSendingRoleArn", + "type": "string" + }, "RoleArn": { "markdownDescription": "The ARN of the AWS Identity and Access Management (IAM) role that you want Amazon Pinpoint to use when it submits email-related event data for the channel.", "title": "RoleArn", @@ -186196,6 +190126,14 @@ "markdownDescription": "", "title": "Definition" }, + "LinkEntities": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", + "title": "LinkEntities", + "type": "array" + }, "LinkSharingConfiguration": { "$ref": "#/definitions/AWS::QuickSight::Dashboard.LinkSharingConfiguration", "markdownDescription": "A structure that contains the link sharing configurations that you want to apply overrides to.", @@ -214695,7 +218633,7 @@ "type": "boolean" }, "EnableHttpEndpoint": { - "markdownDescription": "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless DB cluster. By default, the HTTP endpoint is disabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless DB cluster. You can also query your database from inside the RDS console with the query editor.\n\nFor more information, see [Using the Data API for Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", + "markdownDescription": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EnableHttpEndpoint", "type": "boolean" }, @@ -214710,7 +218648,7 @@ "type": "string" }, "EngineMode": { - "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only supports Aurora Serverless v1.\n\nLimitations and requirements apply to some DB engine modes. For more information, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n- [Limitations of parallel query](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n- [Limitations of Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n\nValid for: Aurora DB clusters only", + "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EngineMode", "type": "string" }, @@ -214850,7 +218788,7 @@ "type": "boolean" }, "StorageType": { - "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", + "markdownDescription": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", "title": "StorageType", "type": "string" }, @@ -215327,7 +219265,7 @@ "title": "Endpoint" }, "Engine": { - "markdownDescription": "The name of the database engine that you want to use for this DB instance.\n\nNot every database engine is available in every AWS Region.\n\n> When you are creating a DB instance, the `Engine` property is required. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Engine", "type": "string" }, @@ -215495,7 +219433,7 @@ "type": "number" }, "StorageType": { - "markdownDescription": "Specifies the storage type to be associated with the DB instance.\n\nValid values: `gp2 | gp3 | io1 | standard`\n\nThe `standard` value is also known as magnetic.\n\nIf you specify `io1` or `gp3` , you must also include a value for the `Iops` parameter.\n\nDefault: `io1` if the `Iops` parameter is specified, otherwise `gp2`\n\nFor more information, see [Amazon RDS DB Instance Storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Aurora data is stored in the cluster volume, which is a single, virtual volume that uses solid state drives (SSDs).", + "markdownDescription": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "title": "StorageType", "type": "string" }, @@ -216392,42 +220330,148 @@ "Properties": { "additionalProperties": false, "properties": { - "DBSubnetGroupDescription": { - "markdownDescription": "The description for the DB subnet group.", - "title": "DBSubnetGroupDescription", - "type": "string" + "DBSubnetGroupDescription": { + "markdownDescription": "The description for the DB subnet group.", + "title": "DBSubnetGroupDescription", + "type": "string" + }, + "DBSubnetGroupName": { + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "title": "DBSubnetGroupName", + "type": "string" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The EC2 Subnet IDs for the DB subnet group.", + "title": "SubnetIds", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "DBSubnetGroupDescription", + "SubnetIds" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::RDS::DBSubnetGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::RDS::EventSubscription": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Enabled": { + "markdownDescription": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", + "title": "Enabled", + "type": "boolean" }, - "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", - "title": "DBSubnetGroupName", + "EventCategories": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", + "title": "EventCategories", + "type": "array" + }, + "SnsTopicArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", + "title": "SnsTopicArn", "type": "string" }, - "SubnetIds": { + "SourceIds": { "items": { "type": "string" }, - "markdownDescription": "The EC2 Subnet IDs for the DB subnet group.", - "title": "SubnetIds", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "title": "SourceIds", "type": "array" }, + "SourceType": { + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "title": "SourceType", + "type": "string" + }, + "SubscriptionName": { + "markdownDescription": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", + "title": "SubscriptionName", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "An optional array of key-value pairs to apply to this subscription.", "title": "Tags", "type": "array" } }, "required": [ - "DBSubnetGroupDescription", - "SubnetIds" + "SnsTopicArn" ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::DBSubnetGroup" + "AWS::RDS::EventSubscription" ], "type": "string" }, @@ -216446,7 +220490,7 @@ ], "type": "object" }, - "AWS::RDS::EventSubscription": { + "AWS::RDS::GlobalCluster": { "additionalProperties": false, "properties": { "Condition": { @@ -216481,59 +220525,42 @@ "Properties": { "additionalProperties": false, "properties": { - "Enabled": { - "markdownDescription": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", - "title": "Enabled", + "DeletionProtection": { + "markdownDescription": "Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.", + "title": "DeletionProtection", "type": "boolean" }, - "EventCategories": { - "items": { - "type": "string" - }, - "markdownDescription": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", - "title": "EventCategories", - "type": "array" - }, - "SnsTopicArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", - "title": "SnsTopicArn", + "Engine": { + "markdownDescription": "The database engine to use for this global database cluster.\n\nValid Values: `aurora-mysql | aurora-postgresql`\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine of the source DB cluster.", + "title": "Engine", "type": "string" }, - "SourceIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", - "title": "SourceIds", - "type": "array" + "EngineVersion": { + "markdownDescription": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", + "title": "EngineVersion", + "type": "string" }, - "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", - "title": "SourceType", + "GlobalClusterIdentifier": { + "markdownDescription": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", + "title": "GlobalClusterIdentifier", "type": "string" }, - "SubscriptionName": { - "markdownDescription": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", - "title": "SubscriptionName", + "SourceDBClusterIdentifier": { + "markdownDescription": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", + "title": "SourceDBClusterIdentifier", "type": "string" }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "An optional array of key-value pairs to apply to this subscription.", - "title": "Tags", - "type": "array" + "StorageEncrypted": { + "markdownDescription": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", + "title": "StorageEncrypted", + "type": "boolean" } }, - "required": [ - "SnsTopicArn" - ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::EventSubscription" + "AWS::RDS::GlobalCluster" ], "type": "string" }, @@ -216547,12 +220574,11 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::RDS::GlobalCluster": { + "AWS::RDS::Integration": { "additionalProperties": false, "properties": { "Condition": { @@ -216587,42 +220613,55 @@ "Properties": { "additionalProperties": false, "properties": { - "DeletionProtection": { - "markdownDescription": "Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled.", - "title": "DeletionProtection", - "type": "boolean" + "AdditionalEncryptionContext": { + "additionalProperties": true, + "markdownDescription": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "AdditionalEncryptionContext", + "type": "object" }, - "Engine": { - "markdownDescription": "The database engine to use for this global database cluster.\n\nValid Values: `aurora-mysql | aurora-postgresql`\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine of the source DB cluster.", - "title": "Engine", + "IntegrationName": { + "markdownDescription": "The name of the integration.", + "title": "IntegrationName", "type": "string" }, - "EngineVersion": { - "markdownDescription": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", - "title": "EngineVersion", + "KMSKeyId": { + "markdownDescription": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", + "title": "KMSKeyId", "type": "string" }, - "GlobalClusterIdentifier": { - "markdownDescription": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", - "title": "GlobalClusterIdentifier", + "SourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", + "title": "SourceArn", "type": "string" }, - "SourceDBClusterIdentifier": { - "markdownDescription": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", - "title": "SourceDBClusterIdentifier", - "type": "string" + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "title": "Tags", + "type": "array" }, - "StorageEncrypted": { - "markdownDescription": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", - "title": "StorageEncrypted", - "type": "boolean" + "TargetArn": { + "markdownDescription": "The ARN of the Redshift data warehouse to use as the target for replication.", + "title": "TargetArn", + "type": "string" } }, + "required": [ + "SourceArn", + "TargetArn" + ], "type": "object" }, "Type": { "enum": [ - "AWS::RDS::GlobalCluster" + "AWS::RDS::Integration" ], "type": "string" }, @@ -216636,7 +220675,8 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, @@ -217296,7 +221336,7 @@ "type": "string" }, "Port": { - "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values: `1150-65535`", + "markdownDescription": "The port number on which the cluster accepts incoming connections.\n\nThe cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections.\n\nDefault: `5439`\n\nValid Values:\n\n- For clusters with ra3 nodes - Select a port within the ranges `5431-5455` or `8191-8215` . (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.)\n- For clusters with ds2 or dc2 nodes - Select a port within the range `1150-65535` .", "title": "Port", "type": "number" }, @@ -218359,6 +222399,11 @@ "Properties": { "additionalProperties": false, "properties": { + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret. You can only use this parameter if `ManageAdminPassword` is `true` .", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUserPassword": { "markdownDescription": "The password of the administrator for the primary database created in the namespace.", "title": "AdminUserPassword", @@ -218410,11 +222455,26 @@ "title": "LogExports", "type": "array" }, + "ManageAdminPassword": { + "markdownDescription": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials. You can't use `AdminUserPassword` if `ManageAdminPassword` is true. If `ManageAdminPassword` is `false` or not set, Amazon Redshift uses `AdminUserPassword` for the admin user account's password.", + "title": "ManageAdminPassword", + "type": "boolean" + }, "NamespaceName": { "markdownDescription": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "title": "NamespaceName", "type": "string" }, + "NamespaceResourcePolicy": { + "markdownDescription": "The resource policy that will be attached to the namespace.", + "title": "NamespaceResourcePolicy", + "type": "object" + }, + "RedshiftIdcApplicationArn": { + "markdownDescription": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "title": "RedshiftIdcApplicationArn", + "type": "string" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -218453,6 +222513,16 @@ "AWS::RedshiftServerless::Namespace.Namespace": { "additionalProperties": false, "properties": { + "AdminPasswordSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the namespace's admin user credentials secret.", + "title": "AdminPasswordSecretArn", + "type": "string" + }, + "AdminPasswordSecretKmsKeyId": { + "markdownDescription": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", + "title": "AdminPasswordSecretKmsKeyId", + "type": "string" + }, "AdminUsername": { "markdownDescription": "The username of the administrator for the first database created in the namespace.", "title": "AdminUsername", @@ -218570,6 +222640,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -218762,6 +222837,11 @@ "title": "EnhancedVpcRouting", "type": "boolean" }, + "MaxCapacity": { + "markdownDescription": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", + "title": "MaxCapacity", + "type": "number" + }, "NamespaceName": { "markdownDescription": "The namespace the workgroup is associated with.", "title": "NamespaceName", @@ -220019,15 +224099,9 @@ "type": "string" }, "Policy": { - "additionalProperties": false, + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.PolicyMap", "markdownDescription": "The resiliency policy.", - "patternProperties": { - "^[a-zA-Z0-9]+$": { - "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy" - } - }, - "title": "Policy", - "type": "object" + "title": "Policy" }, "PolicyDescription": { "markdownDescription": "The description for the policy.", @@ -220104,6 +224178,37 @@ ], "type": "object" }, + "AWS::ResilienceHub::ResiliencyPolicy.PolicyMap": { + "additionalProperties": false, + "properties": { + "AZ": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for Availability Zone disruption.", + "title": "AZ" + }, + "Hardware": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for hardware disruption.", + "title": "Hardware" + }, + "Region": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for Regional disruption.", + "title": "Region" + }, + "Software": { + "$ref": "#/definitions/AWS::ResilienceHub::ResiliencyPolicy.FailurePolicy", + "markdownDescription": "Defines the RTO and RPO targets for software disruption.", + "title": "Software" + } + }, + "required": [ + "AZ", + "Hardware", + "Software" + ], + "type": "object" + }, "AWS::ResourceExplorer2::DefaultViewAssociation": { "additionalProperties": false, "properties": { @@ -222266,6 +226371,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSet.GeoProximityLocation", + "markdownDescription": "*GeoproximityLocation resource record sets only:* A complex type that lets you control how Route\u00a053 responds to DNS queries based on the geographic origin of the query and your resources.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -222397,6 +226507,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSet.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSet.GeoLocation": { "additionalProperties": false, "properties": { @@ -222418,6 +226548,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSet.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSet.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup": { "additionalProperties": false, "properties": { @@ -222544,6 +226700,26 @@ ], "type": "object" }, + "AWS::Route53::RecordSetGroup.Coordinates": { + "additionalProperties": false, + "properties": { + "Latitude": { + "markdownDescription": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "title": "Latitude", + "type": "string" + }, + "Longitude": { + "markdownDescription": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180).", + "title": "Longitude", + "type": "string" + } + }, + "required": [ + "Latitude", + "Longitude" + ], + "type": "object" + }, "AWS::Route53::RecordSetGroup.GeoLocation": { "additionalProperties": false, "properties": { @@ -222565,6 +226741,32 @@ }, "type": "object" }, + "AWS::Route53::RecordSetGroup.GeoProximityLocation": { + "additionalProperties": false, + "properties": { + "AWSRegion": { + "markdownDescription": "The AWS Region the resource you are directing DNS traffic to, is in.", + "title": "AWSRegion", + "type": "string" + }, + "Bias": { + "markdownDescription": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "title": "Bias", + "type": "number" + }, + "Coordinates": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.Coordinates", + "markdownDescription": "Contains the longitude and latitude for a geographic region.", + "title": "Coordinates" + }, + "LocalZoneGroup": { + "markdownDescription": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` .", + "title": "LocalZoneGroup", + "type": "string" + } + }, + "type": "object" + }, "AWS::Route53::RecordSetGroup.RecordSet": { "additionalProperties": false, "properties": { @@ -222588,6 +226790,11 @@ "markdownDescription": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", "title": "GeoLocation" }, + "GeoProximityLocation": { + "$ref": "#/definitions/AWS::Route53::RecordSetGroup.GeoProximityLocation", + "markdownDescription": "A complex type that contains information about a geographic location.", + "title": "GeoProximityLocation" + }, "HealthCheckId": { "markdownDescription": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "title": "HealthCheckId", @@ -223705,6 +227912,11 @@ "markdownDescription": "The priority of the rule in the rule group. This value must be unique within the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting.", "title": "Priority", "type": "number" + }, + "Qtype": { + "markdownDescription": "The DNS query type you want the rule to evaluate. Allowed values are;\n\n- A: Returns an IPv4 address.\n- AAAA: Returns an Ipv6 address.\n- CAA: Restricts CAs that can create SSL/TLS certifications for the domain.\n- CNAME: Returns another domain name.\n- DS: Record that identifies the DNSSEC signing key of a delegated zone.\n- MX: Specifies mail servers.\n- NAPTR: Regular-expression-based rewriting of domain names.\n- NS: Authoritative name servers.\n- PTR: Maps an IP address to a domain name.\n- SOA: Start of authority record for the zone.\n- SPF: Lists the servers authorized to send emails from a domain.\n- SRV: Application specific values that identify servers.\n- TXT: Verifies email senders and application-specific values.\n- A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be defined as TYPE NUMBER , where the NUMBER can be 1-65334, for example, TYPE28. For more information, see [List of DNS record types](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/List_of_DNS_record_types) .", + "title": "Qtype", + "type": "string" } }, "required": [ @@ -231778,7 +235990,7 @@ "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *`BLOCK`* : Packages in the `RejectedPatches` list, and packages that include them as dependencies, aren't installed under any circumstances. If a package was installed before it was added to the Rejected patches list, it is considered non-compliant with the patch baseline, and its status is reported as `InstalledRejected` .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", "title": "RejectedPatchesAction", "type": "string" }, @@ -233843,6 +238055,11 @@ "title": "AppImageConfigName", "type": "string" }, + "JupyterLabAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "JupyterLabAppImageConfig" + }, "KernelGatewayImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig", "markdownDescription": "The configuration for the file system and kernels in the SageMaker image.", @@ -233883,6 +238100,56 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.ContainerConfig": { + "additionalProperties": false, + "properties": { + "ContainerArguments": { + "items": { + "type": "string" + }, + "markdownDescription": "The arguments for the container when you're running the application.", + "title": "ContainerArguments", + "type": "array" + }, + "ContainerEntrypoint": { + "items": { + "type": "string" + }, + "markdownDescription": "The entrypoint used to run the application in the container.", + "title": "ContainerEntrypoint", + "type": "array" + }, + "ContainerEnvironmentVariables": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable" + }, + "markdownDescription": "The environment variables to set in the container", + "title": "ContainerEnvironmentVariables", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SageMaker::AppImageConfig.CustomImageContainerEnvironmentVariable": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key that identifies a container environment variable.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of the container environment variable.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, "AWS::SageMaker::AppImageConfig.FileSystemConfig": { "additionalProperties": false, "properties": { @@ -233904,6 +238171,17 @@ }, "type": "object" }, + "AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "The configuration used to run the application image container.", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.KernelGatewayImageConfig": { "additionalProperties": false, "properties": { @@ -235063,9 +239341,33 @@ }, "type": "object" }, + "AWS::SageMaker::Domain.DockerSettings": { + "additionalProperties": false, + "properties": { + "EnableDockerAccess": { + "markdownDescription": "Indicates whether the domain can access Docker.", + "title": "EnableDockerAccess", + "type": "string" + }, + "VpcOnlyTrustedAccounts": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of AWS accounts that are trusted when the domain is created in VPC-only mode.", + "title": "VpcOnlyTrustedAccounts", + "type": "array" + } + }, + "type": "object" + }, "AWS::SageMaker::Domain.DomainSettings": { "additionalProperties": false, "properties": { + "DockerSettings": { + "$ref": "#/definitions/AWS::SageMaker::Domain.DockerSettings", + "markdownDescription": "A collection of settings that configure the domain's Docker interaction.", + "title": "DockerSettings" + }, "RStudioServerProDomainSettings": { "$ref": "#/definitions/AWS::SageMaker::Domain.RStudioServerProDomainSettings", "markdownDescription": "A collection of settings that configure the `RStudioServerPro` Domain-level app.", @@ -236209,7 +240511,7 @@ "type": "number" }, "ProvisionedConcurrency": { - "markdownDescription": "", + "markdownDescription": "The amount of provisioned concurrency to allocate for the serverless endpoint. Should be less than or equal to `MaxConcurrency` .\n\n> This field is not supported for serverless endpoint recommendations for Inference Recommender jobs. For more information about creating an Inference Recommender job, see [CreateInferenceRecommendationsJobs](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateInferenceRecommendationsJob.html) .", "title": "ProvisionedConcurrency", "type": "number" } @@ -236455,6 +240757,11 @@ "markdownDescription": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval.", "title": "StorageType", "type": "string" + }, + "TtlDuration": { + "$ref": "#/definitions/AWS::SageMaker::FeatureGroup.TtlDuration", + "markdownDescription": "Time to live duration, where the record is hard deleted after the expiration time is reached; `ExpiresAt` = `EventTime` + `TtlDuration` . For information on HardDelete, see the [DeleteRecord](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_feature_store_DeleteRecord.html) API in the Amazon SageMaker API Reference guide.", + "title": "TtlDuration" } }, "type": "object" @@ -236513,6 +240820,22 @@ ], "type": "object" }, + "AWS::SageMaker::FeatureGroup.TtlDuration": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "`TtlDuration` time unit.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "`TtlDuration` time value.", + "title": "Value", + "type": "number" + } + }, + "type": "object" + }, "AWS::SageMaker::Image": { "additionalProperties": false, "properties": { @@ -237418,7 +241741,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container. Each key and value in the `Environment` string to string map can have length of up to 1024. We support up to 16 entries in the map.", + "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -242278,6 +246601,16 @@ "title": "DomainId", "type": "string" }, + "OwnershipSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.OwnershipSettings", + "markdownDescription": "The collection of ownership settings for a space.", + "title": "OwnershipSettings" + }, + "SpaceDisplayName": { + "markdownDescription": "The name of the space that appears in the Studio UI.", + "title": "SpaceDisplayName", + "type": "string" + }, "SpaceName": { "markdownDescription": "The name of the space.", "title": "SpaceName", @@ -242288,6 +246621,11 @@ "markdownDescription": "A collection of space settings.", "title": "SpaceSettings" }, + "SpaceSharingSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceSharingSettings", + "markdownDescription": "A collection of space sharing settings.", + "title": "SpaceSharingSettings" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -242324,6 +246662,31 @@ ], "type": "object" }, + "AWS::SageMaker::Space.CodeRepository": { + "additionalProperties": false, + "properties": { + "RepositoryUrl": { + "markdownDescription": "The URL of the Git repository.", + "title": "RepositoryUrl", + "type": "string" + } + }, + "required": [ + "RepositoryUrl" + ], + "type": "object" + }, + "AWS::SageMaker::Space.CustomFileSystem": { + "additionalProperties": false, + "properties": { + "EFSFileSystem": { + "$ref": "#/definitions/AWS::SageMaker::Space.EFSFileSystem", + "markdownDescription": "A custom file system in Amazon EFS.", + "title": "EFSFileSystem" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.CustomImage": { "additionalProperties": false, "properties": { @@ -242349,6 +246712,34 @@ ], "type": "object" }, + "AWS::SageMaker::Space.EFSFileSystem": { + "additionalProperties": false, + "properties": { + "FileSystemId": { + "markdownDescription": "The ID of your Amazon EFS file system.", + "title": "FileSystemId", + "type": "string" + } + }, + "required": [ + "FileSystemId" + ], + "type": "object" + }, + "AWS::SageMaker::Space.EbsStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsVolumeSizeInGb": { + "markdownDescription": "The size of an EBS storage volume for a private space.", + "title": "EbsVolumeSizeInGb", + "type": "number" + } + }, + "required": [ + "EbsVolumeSizeInGb" + ], + "type": "object" + }, "AWS::SageMaker::Space.JupyterServerAppSettings": { "additionalProperties": false, "properties": { @@ -242379,6 +246770,20 @@ }, "type": "object" }, + "AWS::SageMaker::Space.OwnershipSettings": { + "additionalProperties": false, + "properties": { + "OwnerUserProfileName": { + "markdownDescription": "The user profile who is the owner of the private space.", + "title": "OwnerUserProfileName", + "type": "string" + } + }, + "required": [ + "OwnerUserProfileName" + ], + "type": "object" + }, "AWS::SageMaker::Space.ResourceSpec": { "additionalProperties": false, "properties": { @@ -242400,9 +246805,62 @@ }, "type": "object" }, + "AWS::SageMaker::Space.SpaceCodeEditorAppSettings": { + "additionalProperties": false, + "properties": { + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceJupyterLabAppSettings": { + "additionalProperties": false, + "properties": { + "CodeRepositories": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CodeRepository" + }, + "markdownDescription": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", + "title": "CodeRepositories", + "type": "array" + }, + "DefaultResourceSpec": { + "$ref": "#/definitions/AWS::SageMaker::Space.ResourceSpec", + "markdownDescription": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on.", + "title": "DefaultResourceSpec" + } + }, + "type": "object" + }, "AWS::SageMaker::Space.SpaceSettings": { "additionalProperties": false, "properties": { + "AppType": { + "markdownDescription": "The type of app created within the space.", + "title": "AppType", + "type": "string" + }, + "CodeEditorAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceCodeEditorAppSettings", + "markdownDescription": "The Code Editor application settings.", + "title": "CodeEditorAppSettings" + }, + "CustomFileSystems": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Space.CustomFileSystem" + }, + "markdownDescription": "A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.", + "title": "CustomFileSystems", + "type": "array" + }, + "JupyterLabAppSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceJupyterLabAppSettings", + "markdownDescription": "The settings for the JupyterLab application.", + "title": "JupyterLabAppSettings" + }, "JupyterServerAppSettings": { "$ref": "#/definitions/AWS::SageMaker::Space.JupyterServerAppSettings", "markdownDescription": "The JupyterServer app settings.", @@ -242412,6 +246870,36 @@ "$ref": "#/definitions/AWS::SageMaker::Space.KernelGatewayAppSettings", "markdownDescription": "The KernelGateway app settings.", "title": "KernelGatewayAppSettings" + }, + "SpaceStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.SpaceStorageSettings", + "markdownDescription": "The storage settings for a private space.", + "title": "SpaceStorageSettings" + } + }, + "type": "object" + }, + "AWS::SageMaker::Space.SpaceSharingSettings": { + "additionalProperties": false, + "properties": { + "SharingType": { + "markdownDescription": "Specifies the sharing type of the space.", + "title": "SharingType", + "type": "string" + } + }, + "required": [ + "SharingType" + ], + "type": "object" + }, + "AWS::SageMaker::Space.SpaceStorageSettings": { + "additionalProperties": false, + "properties": { + "EbsStorageSettings": { + "$ref": "#/definitions/AWS::SageMaker::Space.EbsStorageSettings", + "markdownDescription": "A collection of EBS storage settings for a private space.", + "title": "EbsStorageSettings" } }, "type": "object" @@ -247530,7 +252018,7 @@ "type": "array" }, "RoleArn": { - "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- You must have the `iam:PassRole` permission. For more information, see [Granting a user permissions to pass a role to an AWS service](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", + "markdownDescription": "Authorizes the Shield Response Team (SRT) using the specified role, to access your AWS account to assist with DDoS attack mitigation during potential attacks. This enables the SRT to inspect your AWS WAF configuration and logs and to create or update AWS WAF rules and web ACLs.\n\nYou can associate only one `RoleArn` with your subscription. If you submit this update for an account that already has an associated role, the new `RoleArn` will replace the existing `RoleArn` .\n\nThis change requires the following:\n\n- You must be subscribed to the [Business Support plan](https://docs.aws.amazon.com/premiumsupport/business-support/) or the [Enterprise Support plan](https://docs.aws.amazon.com/premiumsupport/enterprise-support/) .\n- The `AWSShieldDRTAccessPolicy` managed policy must be attached to the role that you specify in the request. You can access this policy in the IAM console at [AWSShieldDRTAccessPolicy](https://docs.aws.amazon.com/iam/home?#/policies/arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy) . For information, see [Adding and removing IAM identity permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) .\n- The role must trust the service principal `drt.shield.amazonaws.com` . For information, see [IAM JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) .\n\nThe SRT will have access only to your AWS WAF and Shield resources. By submitting this request, you provide permissions to the SRT to inspect your AWS WAF and Shield configuration and logs, and to create and update AWS WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you.", "title": "RoleArn", "type": "string" } @@ -247827,7 +252315,7 @@ "additionalProperties": false, "properties": { "Aggregation": { - "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- Sum - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- Mean - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- Max - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", + "markdownDescription": "Defines how AWS Shield combines resource data for the group in order to detect, mitigate, and report events.\n\n- `Sum` - Use the total traffic across the group. This is a good choice for most cases. Examples include Elastic IP addresses for EC2 instances that scale manually or automatically.\n- `Mean` - Use the average of the traffic across the group. This is a good choice for resources that share traffic uniformly. Examples include accelerators and load balancers.\n- `Max` - Use the highest traffic from each resource. This is useful for resources that don't share traffic and for resources that share that traffic in a non-uniform way. Examples include Amazon CloudFront distributions and origin resources for CloudFront distributions.", "title": "Aggregation", "type": "string" }, @@ -250486,7 +254974,7 @@ "type": "string" }, "EncryptionAlgorithm": { - "markdownDescription": "The algorithm that is used to encrypt the file.\n\n> You can only specify `NONE` if the URL for your connector uses HTTPS. This ensures that no traffic is sent in clear text.", + "markdownDescription": "The algorithm that is used to encrypt the file.\n\nNote the following:\n\n- Do not use the `DES_EDE3_CBC` algorithm unless you must support a legacy client that requires it, as it is a weak encryption algorithm.\n- You can only specify `NONE` if the URL for your connector uses HTTPS. Using HTTPS ensures that no traffic is sent in clear text.", "title": "EncryptionAlgorithm", "type": "string" }, @@ -251508,7 +255996,8 @@ } }, "required": [ - "Configuration" + "Configuration", + "PolicyStoreId" ], "type": "object" }, @@ -251904,6 +256393,7 @@ } }, "required": [ + "PolicyStoreId", "Statement" ], "type": "object" @@ -255433,7 +259923,7 @@ "items": { "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.FieldToMatch" }, - "markdownDescription": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` .", + "markdownDescription": "The parts of the request that you want to keep out of the logs.\n\nFor example, if you redact the `SingleHeader` field, the `HEADER` field in the logs will be `REDACTED` for all rules that use the `SingleHeader` `FieldToMatch` setting.\n\nRedaction applies only to the component that's specified in the rule's `FieldToMatch` setting, so the `SingleHeader` redaction doesn't apply to rules that use the `Headers` `FieldToMatch` .\n\n> You can specify only the following fields for redaction: `UriPath` , `QueryString` , `SingleHeader` , and `Method` . > This setting has no impact on request sampling. With request sampling, the only way to exclude fields is by disabling sampling in the web ACL visibility configuration.", "title": "RedactedFields", "type": "array" }, @@ -255503,9 +259993,6 @@ "AWS::WAFv2::LoggingConfiguration.FieldToMatch": { "additionalProperties": false, "properties": { - "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" - }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "title": "Method", @@ -255558,25 +260045,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.JsonBody": { - "additionalProperties": false, - "properties": { - "InvalidFallbackBehavior": { - "type": "string" - }, - "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" - }, - "MatchScope": { - "type": "string" - } - }, - "required": [ - "MatchPattern", - "MatchScope" - ], - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.LabelNameCondition": { "additionalProperties": false, "properties": { @@ -255614,21 +260082,6 @@ ], "type": "object" }, - "AWS::WAFv2::LoggingConfiguration.MatchPattern": { - "additionalProperties": false, - "properties": { - "All": { - "type": "object" - }, - "IncludedPaths": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, "AWS::WAFv2::LoggingConfiguration.SingleHeader": { "additionalProperties": false, "properties": { @@ -255912,7 +260365,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256160,7 +260613,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -256173,9 +260626,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -256357,6 +260815,20 @@ ], "type": "object" }, + "AWS::WAFv2::RuleGroup.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::RuleGroup.JsonBody": { "additionalProperties": false, "properties": { @@ -256376,7 +260848,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -256498,6 +260970,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -256978,7 +261455,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -257028,7 +261505,7 @@ "type": "string" }, "SampledRequestsEnabled": { - "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.", + "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.", "title": "SampledRequestsEnabled", "type": "boolean" } @@ -257100,7 +261577,7 @@ "properties": { "AssociationConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.AssociationConfig", - "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Specifies custom configurations for the associations between the web ACL and protected resources.\n\nUse this to customize the maximum size of the request body that your protected resources forward to AWS WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "title": "AssociationConfig" }, "CaptchaConfig": { @@ -257319,7 +261796,7 @@ "properties": { "RequestBody": { "additionalProperties": false, - "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront distributions forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes).\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", + "markdownDescription": "Customizes the maximum size of the request body that your protected CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access resources forward to AWS WAF for inspection. The default size is 16 KB (16,384 bytes). You can change the setting for any of the available resource types.\n\n> You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) . \n\nExample JSON: `{ \"API_GATEWAY\": \"KB_48\", \"APP_RUNNER_SERVICE\": \"KB_32\" }`\n\nFor Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).", "patternProperties": { "^[a-zA-Z0-9]+$": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RequestBodyAssociatedResourceTypeConfig" @@ -257346,7 +261823,7 @@ "additionalProperties": false, "properties": { "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -257638,7 +262115,7 @@ }, "Body": { "$ref": "#/definitions/AWS::WAFv2::WebACL.Body", - "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", + "markdownDescription": "Inspect the request body as plain text. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `Body` object configuration.", "title": "Body" }, "Cookies": { @@ -257651,9 +262128,14 @@ "markdownDescription": "Inspect the request headers. You must configure scope and pattern matching filters in the `Headers` object, to define the set of headers to and the parts of the headers that AWS WAF inspects.\n\nOnly the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to AWS WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the `Headers` object. AWS WAF applies the pattern matching filters to the headers that it receives from the underlying host service.", "title": "Headers" }, + "JA3Fingerprint": { + "$ref": "#/definitions/AWS::WAFv2::WebACL.JA3Fingerprint", + "markdownDescription": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. AWS WAF calculates and logs this fingerprint for each request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information.\n\n> You can use this choice only with a string match `ByteMatchStatement` with the `PositionalConstraint` set to `EXACTLY` . \n\nYou can obtain the JA3 fingerprint for client requests from the web ACL logs. If AWS WAF is able to calculate the fingerprint, it includes it in the logs. For information about the logging fields, see [Log fields](https://docs.aws.amazon.com/waf/latest/developerguide/logging-fields.html) in the *AWS WAF Developer Guide* .\n\nProvide the JA3 fingerprint string from the logs in your string match statement specification, to match with any future requests that have the same TLS configuration.", + "title": "JA3Fingerprint" + }, "JsonBody": { "$ref": "#/definitions/AWS::WAFv2::WebACL.JsonBody", - "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nA limited amount of the request body is forwarded to AWS WAF for inspection by the underlying host service. For regional resources, the limit is 8 KB (8,192 bytes) and for CloudFront distributions, the limit is 16 KB (16,384 bytes). For CloudFront distributions, you can increase the limit in the web ACL's `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", + "markdownDescription": "Inspect the request body as JSON. The request body immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nFor information about how to handle oversized request bodies, see the `JsonBody` object configuration.", "title": "JsonBody" }, "Method": { @@ -257835,6 +262317,20 @@ ], "type": "object" }, + "AWS::WAFv2::WebACL.JA3Fingerprint": { + "additionalProperties": false, + "properties": { + "FallbackBehavior": { + "markdownDescription": "The match status to assign to the web request if the request doesn't have a JA3 fingerprint.\n\nYou can specify the following fallback behaviors:\n\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.", + "title": "FallbackBehavior", + "type": "string" + } + }, + "required": [ + "FallbackBehavior" + ], + "type": "object" + }, "AWS::WAFv2::WebACL.JsonBody": { "additionalProperties": false, "properties": { @@ -257854,7 +262350,7 @@ "type": "string" }, "OversizeHandling": { - "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect. AWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. If the body is larger than the limit, the underlying host service only forwards the contents that are below the limit to AWS WAF for inspection.\n\nThe default limit is 8 KB (8,192 bytes) for regional resources and 16 KB (16,384 bytes) for CloudFront distributions. For CloudFront distributions, you can increase the limit in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", + "markdownDescription": "What AWS WAF should do if the body is larger than AWS WAF can inspect.\n\nAWS WAF does not support inspecting the entire contents of the web request body if the body exceeds the limit for the resource type. When a web request body is larger than the limit, the underlying host service only forwards the contents that are within the limit to AWS WAF for inspection.\n\n- For Application Load Balancer and AWS AppSync , the limit is fixed at 8 KB (8,192 bytes).\n- For CloudFront, API Gateway, Amazon Cognito, App Runner, and Verified Access, the default limit is 16 KB (16,384 bytes), and you can increase the limit for each resource type in the web ACL `AssociationConfig` , for additional processing fees.\n\nThe options for oversize handling are the following:\n\n- `CONTINUE` - Inspect the available body contents normally, according to the rule inspection criteria.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nYou can combine the `MATCH` or `NO_MATCH` settings for oversize handling with your rule and web ACL action settings, so that you block any request whose body is over the limit.\n\nDefault: `CONTINUE`", "title": "OversizeHandling", "type": "string" } @@ -258076,6 +262572,11 @@ "title": "CustomKeys", "type": "array" }, + "EvaluationWindowSec": { + "markdownDescription": "The amount of time, in seconds, that AWS WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when AWS WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.\n\nThis setting doesn't determine how often AWS WAF checks the rate, but how far back it looks each time it checks. AWS WAF checks the rate about every 10 seconds.\n\nDefault: `300` (5 minutes)", + "title": "EvaluationWindowSec", + "type": "number" + }, "ForwardedIPConfig": { "$ref": "#/definitions/AWS::WAFv2::WebACL.ForwardedIPConfiguration", "markdownDescription": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.\n\n> If the specified header isn't present in the request, AWS WAF doesn't apply the rule to the web request at all. \n\nThis is required if you specify a forwarded IP in the rule's aggregate key settings.", @@ -258328,7 +262829,7 @@ "additionalProperties": false, "properties": { "DefaultSizeInspectionLimit": { - "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", + "markdownDescription": "Specifies the maximum size of the web request body component that an associated CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resource should send to AWS WAF for inspection. This applies to statements in the web ACL that inspect the body or JSON body.\n\nDefault: `16 KB (16,384 bytes)`", "title": "DefaultSizeInspectionLimit", "type": "string" } @@ -258848,7 +263349,7 @@ }, "SizeConstraintStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.SizeConstraintStatement", - "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", + "markdownDescription": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes in the body up to the limit for the web ACL and protected resource type. If you know that the request body for your web requests should never exceed the inspection limit, you can use a size constraint statement to block requests that have a larger request body size. For more information about the inspection limits, see `Body` and `JsonBody` settings for the `FieldToMatch` data type.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", "title": "SizeConstraintStatement" }, "SqliMatchStatement": { @@ -258898,7 +263399,7 @@ "type": "string" }, "SampledRequestsEnabled": { - "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.", + "markdownDescription": "Indicates whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\n> Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.", "title": "SampledRequestsEnabled", "type": "boolean" } @@ -261475,6 +265976,9 @@ { "$ref": "#/definitions/AWS::AppFlow::Flow" }, + { + "$ref": "#/definitions/AWS::AppIntegrations::Application" + }, { "$ref": "#/definitions/AWS::AppIntegrations::DataIntegration" }, @@ -261871,9 +266375,15 @@ { "$ref": "#/definitions/AWS::CodeArtifact::Domain" }, + { + "$ref": "#/definitions/AWS::CodeArtifact::PackageGroup" + }, { "$ref": "#/definitions/AWS::CodeArtifact::Repository" }, + { + "$ref": "#/definitions/AWS::CodeBuild::Fleet" + }, { "$ref": "#/definitions/AWS::CodeBuild::Project" }, @@ -262075,6 +266585,9 @@ { "$ref": "#/definitions/AWS::ConnectCampaigns::Campaign" }, + { + "$ref": "#/definitions/AWS::ControlTower::EnabledBaseline" + }, { "$ref": "#/definitions/AWS::ControlTower::EnabledControl" }, @@ -262201,6 +266714,27 @@ { "$ref": "#/definitions/AWS::DataSync::Task" }, + { + "$ref": "#/definitions/AWS::DataZone::DataSource" + }, + { + "$ref": "#/definitions/AWS::DataZone::Domain" + }, + { + "$ref": "#/definitions/AWS::DataZone::Environment" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentBlueprintConfiguration" + }, + { + "$ref": "#/definitions/AWS::DataZone::EnvironmentProfile" + }, + { + "$ref": "#/definitions/AWS::DataZone::Project" + }, + { + "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -262897,6 +267431,9 @@ { "$ref": "#/definitions/AWS::Glue::Table" }, + { + "$ref": "#/definitions/AWS::Glue::TableOptimizer" + }, { "$ref": "#/definitions/AWS::Glue::Trigger" }, @@ -263050,6 +267587,9 @@ { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, + { + "$ref": "#/definitions/AWS::IVS::Stage" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -263101,6 +267641,9 @@ { "$ref": "#/definitions/AWS::Inspector::ResourceGroup" }, + { + "$ref": "#/definitions/AWS::InspectorV2::CisScanConfiguration" + }, { "$ref": "#/definitions/AWS::InspectorV2::Filter" }, @@ -263329,6 +267872,12 @@ { "$ref": "#/definitions/AWS::KafkaConnect::Connector" }, + { + "$ref": "#/definitions/AWS::KafkaConnect::CustomPlugin" + }, + { + "$ref": "#/definitions/AWS::KafkaConnect::WorkerConfiguration" + }, { "$ref": "#/definitions/AWS::Kendra::DataSource" }, @@ -264088,6 +268637,9 @@ { "$ref": "#/definitions/AWS::RDS::GlobalCluster" }, + { + "$ref": "#/definitions/AWS::RDS::Integration" + }, { "$ref": "#/definitions/AWS::RDS::OptionGroup" }, diff --git a/tests/openapi/test_openapi.py b/tests/openapi/test_openapi.py index 02a02fba9..0ee3f362d 100644 --- a/tests/openapi/test_openapi.py +++ b/tests/openapi/test_openapi.py @@ -342,7 +342,7 @@ class TestOpenApiEditor_is_valid(TestCase): [ param(OpenApiEditor.gen_skeleton()), # Dict can contain any other unrecognized properties - param({"openapi": "3.1.1", "paths": {}, "foo": "bar", "baz": "bar"}) + param({"openapi": "3.1.1", "paths": {}, "foo": "bar", "baz": "bar"}), # TODO check and update the regex accordingly # Fails for this: param({"openapi": "3.1.10", "paths": {}, "foo": "bar", "baz": "bar"}) ] diff --git a/tests/translator/input/error_http_api_with_invalid_jwt_configuration.yaml b/tests/translator/input/error_http_api_with_invalid_jwt_configuration.yaml new file mode 100644 index 000000000..8e28a8a91 --- /dev/null +++ b/tests/translator/input/error_http_api_with_invalid_jwt_configuration.yaml @@ -0,0 +1,37 @@ +Resources: + MyApi: + Type: AWS::Serverless::HttpApi + Properties: + Tags: + Tag1: value1 + Tag2: value2 + Auth: + Authorizers: + MyLambdaAuth: + FunctionArn: + Fn::GetAtt: + - MyAuthFn + - Arn + FunctionInvokeRole: + Fn::GetAtt: + - MyAuthFnRole + - Arn + Identity: + Context: + - contextVar + Headers: + - Authorization + QueryStrings: + - petId + StageVariables: + - stageVar + ReauthorizeEvery: 23 + EnableSimpleResponses: true + AuthorizerPayloadFormatVersion: 2.0 + MyOAuth2Auth: + AuthorizationScopes: + - scope4 + JwtConfiguration: + - issuer: https://openid-connect.onelogin.com/oidc + IdentitySource: $request.querystring.param + DefaultAuthorizer: MyOAuth2Auth diff --git a/tests/translator/output/error_http_api_with_invalid_jwt_configuration.json b/tests/translator/output/error_http_api_with_invalid_jwt_configuration.json new file mode 100644 index 000000000..b8b629d18 --- /dev/null +++ b/tests/translator/output/error_http_api_with_invalid_jwt_configuration.json @@ -0,0 +1,9 @@ +{ + "_autoGeneratedBreakdownErrorMessage": [ + "Invalid Serverless Application Specification document. ", + "Number of errors found: 1. ", + "Resource with id [MyApi] is invalid. ", + "Property 'JwtConfiguration' should be a map." + ], + "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. Resource with id [MyApi] is invalid. Property 'JwtConfiguration' should be a map." +} diff --git a/tests/translator/test_arn_generator.py b/tests/translator/test_arn_generator.py index ae2c1bf7f..482d55401 100644 --- a/tests/translator/test_arn_generator.py +++ b/tests/translator/test_arn_generator.py @@ -10,7 +10,15 @@ def setUp(self): ArnGenerator.BOTO_SESSION_REGION_NAME = None @parameterized.expand( - [("us-east-1", "aws"), ("cn-east-1", "aws-cn"), ("us-gov-west-1", "aws-us-gov"), ("US-EAST-1", "aws")] + [ + ("us-east-1", "aws"), + ("cn-east-1", "aws-cn"), + ("us-gov-west-1", "aws-us-gov"), + ("us-isob-east-1", "aws-iso-b"), + ("eu-isoe-west-1", "aws-iso-e"), + ("US-EAST-1", "aws"), + ("us-isof-east-1", "aws-iso-f"), + ] ) def test_get_partition_name(self, region, expected): actual = ArnGenerator.get_partition_name(region) diff --git a/tests/translator/test_translator.py b/tests/translator/test_translator.py index 326086aa8..8e7e95b01 100644 --- a/tests/translator/test_translator.py +++ b/tests/translator/test_translator.py @@ -165,13 +165,13 @@ def _compare_transform(self, manifest, expected, partition, region): "AWSLambdaRole": f"arn:{partition}:iam::aws:policy/service-role/AWSLambdaRole", } if partition == "aws": - mock_policy_loader.load.return_value[ - "AWSXrayWriteOnlyAccess" - ] = "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess" + mock_policy_loader.load.return_value["AWSXrayWriteOnlyAccess"] = ( + "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess" + ) else: - mock_policy_loader.load.return_value[ - "AWSXRayDaemonWriteAccess" - ] = f"arn:{partition}:iam::aws:policy/AWSXRayDaemonWriteAccess" + mock_policy_loader.load.return_value["AWSXRayDaemonWriteAccess"] = ( + f"arn:{partition}:iam::aws:policy/AWSXRayDaemonWriteAccess" + ) output_fragment = transform(manifest, parameter_values, mock_policy_loader)