From 5ddaef491d7962616f75f170cf7547cd9229338f Mon Sep 17 00:00:00 2001 From: zehsor Date: Fri, 7 Jan 2022 23:10:51 +0100 Subject: [PATCH 01/40] fix(aws-lambda-event-sources): unsupported properties for SelfManagedKafkaEventSource and ManagedKafkaEventSource (#17965) This PR fixes a bug in the CDK where some `kafkaEventSource` properties are actually unsupported. These properties exist only for kinesis and dynamodb streams. The existing KafkaEventSourceProps Interface erroneously extends an interface that includes kinesis and dynamodb specific properties. This PR separates these properties into a `Base` interface with shared stream properties for all 3, as well as an interface for `kinesis` and `dynamodb` specific properties. Unit testing unavailable because the scope of the PR is to remove properties. It is enough to ensure that current tests still succeed. We are allowing the breaking changes specified in `allowed-breaking-changes.txt` because they never worked in the first place. Fixes #17934. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- allowed-breaking-changes.txt | 23 +++++++ .../aws-lambda-event-sources/lib/kafka.ts | 4 +- .../aws-lambda-event-sources/lib/stream.ts | 62 ++++++++++--------- 3 files changed, 59 insertions(+), 30 deletions(-) diff --git a/allowed-breaking-changes.txt b/allowed-breaking-changes.txt index fa3498335f679..56b1145d61b39 100644 --- a/allowed-breaking-changes.txt +++ b/allowed-breaking-changes.txt @@ -93,3 +93,26 @@ incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.FunctionHook.bind incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.QueueHook.bind incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.TopicHook.bind incompatible-argument:@aws-cdk/aws-autoscaling.ILifecycleHookTarget.bind + +# removed properties from kafka eventsources as they are not supported +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.tumblingWindow +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.tumblingWindow +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.tumblingWindow +base-types:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps +base-types:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps +base-types:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps \ No newline at end of file diff --git a/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts b/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts index e31fac89100cb..1918ec2756c84 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts +++ b/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts @@ -4,7 +4,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as lambda from '@aws-cdk/aws-lambda'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import { Stack, Names } from '@aws-cdk/core'; -import { StreamEventSource, StreamEventSourceProps } from './stream'; +import { StreamEventSource, BaseStreamEventSourceProps } from './stream'; // keep this import separate from other imports to reduce chance for merge conflicts with v2-main // eslint-disable-next-line no-duplicate-imports, import/order @@ -13,7 +13,7 @@ import { Construct } from '@aws-cdk/core'; /** * Properties for a Kafka event source */ -export interface KafkaEventSourceProps extends StreamEventSourceProps { +export interface KafkaEventSourceProps extends BaseStreamEventSourceProps{ /** * The Kafka topic to subscribe to */ diff --git a/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts b/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts index 01288efb21a6c..462387397b629 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts +++ b/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts @@ -5,7 +5,7 @@ import { Duration } from '@aws-cdk/core'; * The set of properties for event sources that follow the streaming model, * such as, Dynamo, Kinesis and Kafka. */ -export interface StreamEventSourceProps { +export interface BaseStreamEventSourceProps{ /** * The largest number of records that AWS Lambda will retrieve from your event * source at the time of invoking your function. Your function receives an @@ -15,25 +15,51 @@ export interface StreamEventSourceProps { * * Minimum value of 1 * * Maximum value of: * * 1000 for {@link DynamoEventSource} - * * 10000 for {@link KinesisEventSource} + * * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource} * * @default 100 */ readonly batchSize?: number; /** - * If the function returns an error, split the batch in two and retry. + * An Amazon SQS queue or Amazon SNS topic destination for discarded records. * - * @default false + * @default discarded records are ignored */ - readonly bisectBatchOnError?: boolean; + readonly onFailure?: lambda.IEventSourceDlq; /** - * An Amazon SQS queue or Amazon SNS topic destination for discarded records. + * Where to begin consuming the stream. + */ + readonly startingPosition: lambda.StartingPosition; + + /** + * The maximum amount of time to gather records before invoking the function. + * Maximum of Duration.minutes(5) * - * @default discarded records are ignored + * @default Duration.seconds(0) */ - readonly onFailure?: lambda.IEventSourceDlq; + readonly maxBatchingWindow?: Duration; + + /** + * If the stream event source mapping should be enabled. + * + * @default true + */ + readonly enabled?: boolean; +} + +/** + * The set of properties for event sources that follow the streaming model, + * such as, Dynamo, Kinesis. + */ +export interface StreamEventSourceProps extends BaseStreamEventSourceProps { + /** + * If the function returns an error, split the batch in two and retry. + * + * @default false + */ + readonly bisectBatchOnError?: boolean; /** * The maximum age of a record that Lambda sends to a function for processing. @@ -65,11 +91,6 @@ export interface StreamEventSourceProps { */ readonly parallelizationFactor?: number; - /** - * Where to begin consuming the stream. - */ - readonly startingPosition: lambda.StartingPosition; - /** * Allow functions to return partially successful responses for a batch of records. * @@ -79,14 +100,6 @@ export interface StreamEventSourceProps { */ readonly reportBatchItemFailures?: boolean; - /** - * The maximum amount of time to gather records before invoking the function. - * Maximum of Duration.minutes(5) - * - * @default Duration.seconds(0) - */ - readonly maxBatchingWindow?: Duration; - /** * The size of the tumbling windows to group records sent to DynamoDB or Kinesis * Valid Range: 0 - 15 minutes @@ -94,13 +107,6 @@ export interface StreamEventSourceProps { * @default - None */ readonly tumblingWindow?: Duration; - - /** - * If the stream event source mapping should be enabled. - * - * @default true - */ - readonly enabled?: boolean; } /** From 74eee1e5b8fa404dde129f001b986d615f435c73 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizen3031593@users.noreply.github.com> Date: Fri, 7 Jan 2022 17:55:35 -0500 Subject: [PATCH 02/40] fix(apigatewayv2-authorizers): incorrect `identitySource` default for `WebSocketLambdaAuthorizer` (#18315) We introduced `WebSocketLambdaAuthorizer` in #16886 with an incorrect default `identitySource`, according to these [docs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-authorizer.html#cfn-apigatewayv2-authorizer-identitysource). The result is that using the default `identitySource` results in a deploy-time failure. This PR fixes the error and adds documentation for the syntax for all `identitySource` possibilities. I can confirm that this default successfully passes `cdk deploy` on my local app. Fixes #18307. BREAKING CHANGE: `WebSocketLambdaAuthorizerProps.identitySource` default changes from `['$request.header.Authorization']` to `['route.request.header.Authorization']`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../aws-apigatewayv2-authorizers/lib/websocket/lambda.ts | 8 ++++++-- .../test/websocket/lambda.test.ts | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts b/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts index 2e60cbdd7b547..8b5b5c6d3fc43 100644 --- a/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts +++ b/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts @@ -28,7 +28,11 @@ export interface WebSocketLambdaAuthorizerProps { /** * The identity source for which authorization is requested. * - * @default ['$request.header.Authorization'] + * Request parameter match `'route.request.querystring|header.[a-zA-z0-9._-]+'`. + * Staged variable match `'stageVariables.[a-zA-Z0-9._-]+'`. + * Context parameter match `'context.[a-zA-Z0-9._-]+'`. + * + * @default ['route.request.header.Authorization'] */ readonly identitySource?: string[]; } @@ -56,7 +60,7 @@ export class WebSocketLambdaAuthorizer implements IWebSocketRouteAuthorizer { this.authorizer = new WebSocketAuthorizer(options.scope, this.id, { webSocketApi: options.route.webSocketApi, identitySource: this.props.identitySource ?? [ - '$request.header.Authorization', + 'route.request.header.Authorization', ], type: WebSocketAuthorizerType.LAMBDA, authorizerName: this.props.authorizerName ?? this.id, diff --git a/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts b/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts index c171247801911..8a62d5731ac58 100644 --- a/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts +++ b/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts @@ -35,7 +35,7 @@ describe('WebSocketLambdaAuthorizer', () => { Name: 'default-authorizer', AuthorizerType: 'REQUEST', IdentitySource: [ - '$request.header.Authorization', + 'route.request.header.Authorization', ], }); From 455147a5c3ed2c519279d6a523881126e0856f21 Mon Sep 17 00:00:00 2001 From: Addi Horowitz <11254819+addihorowitz@users.noreply.github.com> Date: Sun, 9 Jan 2022 17:43:11 +0200 Subject: [PATCH 03/40] docs: Add urls for blog posts (#17912) Best practices + all developer blog posts ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 4a1eab8dd9eb8..7349b1909d6b1 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,12 @@ this capability, please see the ## More Resources * [CDK Workshop](https://cdkworkshop.com/) * [Construct Hub](https://constructs.dev) - Find and use open-source Cloud Development Kit (CDK) libraries +* Best Practices + * [Best practices for developing cloud applications with AWS CDK](https://aws.amazon.com/blogs/devops/best-practices-for-developing-cloud-applications-with-aws-cdk/) + * [Align with best practices while creating infrastructure using cdk aspects](https://aws.amazon.com/blogs/devops/align-with-best-practices-while-creating-infrastructure-using-cdk-aspects/) + * [Recommended AWS CDK project structure for Python applications](https://aws.amazon.com/blogs/developer/recommended-aws-cdk-project-structure-for-python-applications/) + * [Best practices for discoverability of a construct library on Construct Hub](https://aws.amazon.com/blogs/opensource/best-practices-for-discoverability-of-a-construct-library-on-construct-hub/) +* [All developer blog posts about AWS CDK](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-cloud-development-kit/) * **[CDK Construction Zone](https://www.twitch.tv/collections/9kCOGphNZBYVdA)** - A Twitch live coding series hosted by the CDK team, season one episodes: * Triggers: Join us as we implement [Triggers](https://github.com/aws/aws-cdk-rfcs/issues/71), a Construct for configuring deploy time actions. Episodes 1-3: * [S1E1](https://www.twitch.tv/videos/917691798): Triggers (part 1); **Participants:** @NetaNir, @eladb, @richardhboyd From ddc2bc6ae64fe14ddb4a03122c90dfcf954f149f Mon Sep 17 00:00:00 2001 From: Shea Belsky Date: Sun, 9 Jan 2022 11:31:43 -0500 Subject: [PATCH 04/40] fix(cli): breaks due to faulty version of `colors` (#18324) Fixes https://github.com/aws/aws-cdk/issues/18322 and https://github.com/aws/aws-cdk/issues/18323 Clarifying question: I'm not sure if the `yarn.lock` file should have automatically updated itself to only refer to `colors@1.4.0` when I set the dependency within the workspace _and_ added the resolution for child dependencies. If it's expected behavior for it _not_ to update the `yarn.lock` file after adding `resolutions`, great! If I need to do something else for that to happen, let me know! ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- package.json | 3 ++- packages/@aws-cdk/aws-cloudtrail/package.json | 4 ++-- packages/@aws-cdk/cloudformation-diff/package.json | 4 ++-- packages/aws-cdk/package.json | 4 ++-- packages/awslint/package.json | 4 ++-- tools/@aws-cdk/cdk-build-tools/package.json | 4 ++-- tools/@aws-cdk/pkglint/package.json | 9 ++++++--- yarn.lock | 2 +- 8 files changed, 19 insertions(+), 15 deletions(-) diff --git a/package.json b/package.json index 462d2c605eabd..78578c6cc9811 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "typescript": "~3.9.10" }, "resolutions": { + "colors": "1.4.0", "string-width": "^4.2.3" }, "repository": { @@ -179,4 +180,4 @@ "dependencies": { "string-width": "^4.2.3" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index d94555d73ae26..3a81482d9fd44 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -79,7 +79,7 @@ "@aws-cdk/pkglint": "0.0.0", "@types/jest": "^27.0.3", "aws-sdk": "^2.848.0", - "colors": "^1.4.0", + "colors": "1.4.0", "jest": "^27.4.5" }, "dependencies": { @@ -121,4 +121,4 @@ "publishConfig": { "tag": "latest" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/cloudformation-diff/package.json b/packages/@aws-cdk/cloudformation-diff/package.json index 53bea429a4b8c..2a7452c11e399 100644 --- a/packages/@aws-cdk/cloudformation-diff/package.json +++ b/packages/@aws-cdk/cloudformation-diff/package.json @@ -25,7 +25,7 @@ "dependencies": { "@aws-cdk/cfnspec": "0.0.0", "@types/node": "^10.17.60", - "colors": "^1.4.0", + "colors": "1.4.0", "diff": "^5.0.0", "fast-deep-equal": "^3.1.3", "string-width": "^4.2.3", @@ -58,4 +58,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index 382a3de061a6a..e548614c522fb 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -77,7 +77,7 @@ "camelcase": "^6.2.1", "cdk-assets": "0.0.0", "chokidar": "^3.5.2", - "colors": "^1.4.0", + "colors": "1.4.0", "decamelize": "^5.0.1", "fs-extra": "^9.1.0", "glob": "^7.2.0", @@ -124,4 +124,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/awslint/package.json b/packages/awslint/package.json index f158b63476d76..b8fb72ad10f29 100644 --- a/packages/awslint/package.json +++ b/packages/awslint/package.json @@ -20,7 +20,7 @@ "dependencies": { "@jsii/spec": "^1.50.0", "camelcase": "^6.2.1", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "jsii-reflect": "^1.50.0", "yargs": "^16.2.0" @@ -71,4 +71,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/cdk-build-tools/package.json b/tools/@aws-cdk/cdk-build-tools/package.json index 8db499fc9e8d2..7aad17fcd9e80 100644 --- a/tools/@aws-cdk/cdk-build-tools/package.json +++ b/tools/@aws-cdk/cdk-build-tools/package.json @@ -47,7 +47,7 @@ "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", "awslint": "0.0.0", - "colors": "^1.4.0", + "colors": "1.4.0", "eslint": "^7.32.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^2.5.0", @@ -82,4 +82,4 @@ "ubergen": { "exclude": true } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/pkglint/package.json b/tools/@aws-cdk/pkglint/package.json index 5ab8ec96fb7d5..3115ccc8023a0 100644 --- a/tools/@aws-cdk/pkglint/package.json +++ b/tools/@aws-cdk/pkglint/package.json @@ -54,15 +54,18 @@ "typescript": "~3.9.10" }, "nozem": { - "ostools": ["chmod", "cp"] + "ostools": [ + "chmod", + "cp" + ] }, "dependencies": { "case": "^1.6.3", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "glob": "^7.2.0", "npm-bundled": "^1.1.2", "semver": "^7.3.5", "yargs": "^16.2.0" } -} +} \ No newline at end of file diff --git a/yarn.lock b/yarn.lock index e478f529f0427..8e58662689437 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2835,7 +2835,7 @@ color-name@~1.1.4: resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colors@^1.4.0: +colors@1.4.0, colors@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== From 43bf9aec0b3c5e06d5382b29f4e8e0c91cd796ca Mon Sep 17 00:00:00 2001 From: Shea Belsky Date: Sun, 9 Jan 2022 11:31:43 -0500 Subject: [PATCH 05/40] fix(cli): breaks due to faulty version of `colors` (#18324) Fixes https://github.com/aws/aws-cdk/issues/18322 and https://github.com/aws/aws-cdk/issues/18323 Clarifying question: I'm not sure if the `yarn.lock` file should have automatically updated itself to only refer to `colors@1.4.0` when I set the dependency within the workspace _and_ added the resolution for child dependencies. If it's expected behavior for it _not_ to update the `yarn.lock` file after adding `resolutions`, great! If I need to do something else for that to happen, let me know! ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- package.json | 3 ++- packages/@aws-cdk/aws-cloudtrail/package.json | 4 ++-- packages/@aws-cdk/cloudformation-diff/package.json | 4 ++-- packages/aws-cdk/package.json | 4 ++-- packages/awslint/package.json | 4 ++-- tools/@aws-cdk/cdk-build-tools/package.json | 4 ++-- tools/@aws-cdk/pkglint/package.json | 9 ++++++--- yarn.lock | 2 +- 8 files changed, 19 insertions(+), 15 deletions(-) diff --git a/package.json b/package.json index 462d2c605eabd..78578c6cc9811 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "typescript": "~3.9.10" }, "resolutions": { + "colors": "1.4.0", "string-width": "^4.2.3" }, "repository": { @@ -179,4 +180,4 @@ "dependencies": { "string-width": "^4.2.3" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index d94555d73ae26..3a81482d9fd44 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -79,7 +79,7 @@ "@aws-cdk/pkglint": "0.0.0", "@types/jest": "^27.0.3", "aws-sdk": "^2.848.0", - "colors": "^1.4.0", + "colors": "1.4.0", "jest": "^27.4.5" }, "dependencies": { @@ -121,4 +121,4 @@ "publishConfig": { "tag": "latest" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/cloudformation-diff/package.json b/packages/@aws-cdk/cloudformation-diff/package.json index 53bea429a4b8c..2a7452c11e399 100644 --- a/packages/@aws-cdk/cloudformation-diff/package.json +++ b/packages/@aws-cdk/cloudformation-diff/package.json @@ -25,7 +25,7 @@ "dependencies": { "@aws-cdk/cfnspec": "0.0.0", "@types/node": "^10.17.60", - "colors": "^1.4.0", + "colors": "1.4.0", "diff": "^5.0.0", "fast-deep-equal": "^3.1.3", "string-width": "^4.2.3", @@ -58,4 +58,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index 382a3de061a6a..e548614c522fb 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -77,7 +77,7 @@ "camelcase": "^6.2.1", "cdk-assets": "0.0.0", "chokidar": "^3.5.2", - "colors": "^1.4.0", + "colors": "1.4.0", "decamelize": "^5.0.1", "fs-extra": "^9.1.0", "glob": "^7.2.0", @@ -124,4 +124,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/awslint/package.json b/packages/awslint/package.json index f158b63476d76..b8fb72ad10f29 100644 --- a/packages/awslint/package.json +++ b/packages/awslint/package.json @@ -20,7 +20,7 @@ "dependencies": { "@jsii/spec": "^1.50.0", "camelcase": "^6.2.1", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "jsii-reflect": "^1.50.0", "yargs": "^16.2.0" @@ -71,4 +71,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/cdk-build-tools/package.json b/tools/@aws-cdk/cdk-build-tools/package.json index 8db499fc9e8d2..7aad17fcd9e80 100644 --- a/tools/@aws-cdk/cdk-build-tools/package.json +++ b/tools/@aws-cdk/cdk-build-tools/package.json @@ -47,7 +47,7 @@ "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", "awslint": "0.0.0", - "colors": "^1.4.0", + "colors": "1.4.0", "eslint": "^7.32.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^2.5.0", @@ -82,4 +82,4 @@ "ubergen": { "exclude": true } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/pkglint/package.json b/tools/@aws-cdk/pkglint/package.json index 5ab8ec96fb7d5..3115ccc8023a0 100644 --- a/tools/@aws-cdk/pkglint/package.json +++ b/tools/@aws-cdk/pkglint/package.json @@ -54,15 +54,18 @@ "typescript": "~3.9.10" }, "nozem": { - "ostools": ["chmod", "cp"] + "ostools": [ + "chmod", + "cp" + ] }, "dependencies": { "case": "^1.6.3", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "glob": "^7.2.0", "npm-bundled": "^1.1.2", "semver": "^7.3.5", "yargs": "^16.2.0" } -} +} \ No newline at end of file diff --git a/yarn.lock b/yarn.lock index e478f529f0427..8e58662689437 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2835,7 +2835,7 @@ color-name@~1.1.4: resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colors@^1.4.0: +colors@1.4.0, colors@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== From b012846e4c7dd2c08ffa39e3adeaf4c6df09c8be Mon Sep 17 00:00:00 2001 From: epolon Date: Sun, 9 Jan 2022 18:57:12 +0200 Subject: [PATCH 06/40] chore(release): 1.138.2 --- CHANGELOG.md | 7 +++++++ version.v1.json | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a7177fff221f..f40604e3c028a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) + + +### Bug Fixes + +* **cli:** breaks due to faulty version of `colors` ([#18324](https://github.com/aws/aws-cdk/issues/18324)) ([43bf9ae](https://github.com/aws/aws-cdk/commit/43bf9aec0b3c5e06d5382b29f4e8e0c91cd796ca)) + ## [1.138.1](https://github.com/aws/aws-cdk/compare/v1.138.0...v1.138.1) (2022-01-07) diff --git a/version.v1.json b/version.v1.json index 35cef8a7a0a74..52f052970432b 100644 --- a/version.v1.json +++ b/version.v1.json @@ -1,3 +1,3 @@ { - "version": "1.138.1" + "version": "1.138.2" } \ No newline at end of file From ac7b66fc40df4fbd5f57f6fa6ebcea2b7a305806 Mon Sep 17 00:00:00 2001 From: Eli Polonsky Date: Sun, 9 Jan 2022 19:39:47 +0200 Subject: [PATCH 07/40] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f40604e3c028a..4e1b13e5cd6bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. -### [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) +## [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) ### Bug Fixes From c2c87d9dd861a25dcbd9aa830e81ecb4d76ba509 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizen3031593@users.noreply.github.com> Date: Mon, 10 Jan 2022 04:21:32 -0500 Subject: [PATCH 08/40] fix(pipelines): `DockerCredential.dockerHub()` silently fails auth (#18313) ### Problem: `DockerCredential.dockerHub()` silently failed to authenticate users, resulting in unexpected and intermittent throttling due to docker's policy for unauthenticated users. ### Reason: `.dockerHub()` added `index.docker.io` to the domain credentials, but the actual docker command [authenticated](https://github.com/moby/moby/blob/1e71c6cffedb79e3def696652753ea43cdc47b99/registry/config.go#L35) with `https://index.docker.io/v1/` which it was unable to find as a domain credential, thus failing to trigger `docker-credential-cdk-assets` during the `docker --config build` call. Furthermore, the credential `DockerCredential.customRegistry('https://index.docker.io/v1/', secret)` alone does not work. This would successfully trigger `docker-credential-cdk-assets` but fail to authenticate because of how `cdk-assets` handles credential lookup. The command strips the endpoint into just a hostname so in this case we try `fetchDockerLoginCredentials(awsClient, config, 'index.docker.io')` which fails: https://github.com/aws/aws-cdk/blob/4fb0309e3b93be276ab3e2d510ffc2ce35823dcd/packages/cdk-assets/bin/docker-credential-cdk-assets.ts#L32-L38 So the workaround for this bug was to specify both domains as credentials, each to satisfy a separate step of the process: ```ts dockerCredentials: [ pipelines.DockerCredential.dockerHub(secret), pipelines.DockerCredential.customRegistry('https://index.docker.io/v1/', secret), ], ``` ### Solution: This PR introduces two separate changes to address both problems. First, we change the hardcoded domain in `DockerCredential.dockerHub()` to be `https://index.docker.io/v1/`. This allows us to successfully trigger `docker-credential-cdk-assets` when the `docker --config build` command is called. Next, to make sure the credential lookup succeeds, we check for both the complete endpoint and the domain name. In this case, we will check for both `https://index.docker.io/v1/` as well as `index.docker.io`. Since `https://index.docker.io/v1/` exists in the credentials helper, authentication will succeed. Why do we still check for the domain `index.docker.io`? I don't know how custom registries or ecr works in this context and believe it to be beyond the scope of the PR. It's possible that they require the domain only for lookup. ### Testing: The change to credential lookups is unit tested in `docker-credentials.test.ts`. I confirmed that the change to `DockerCredential.dockerHub()` is successful by configuring a mock `cdk-docker-creds.json` file and successfully `cdk deploy`ing a docker image that depends on a private repository. This isn't a common use case but ensures that failure to authenticate results in failure every time. Thanks @james-mathiesen for the suggestion. ### Contributors: Thanks to @nohack for the code in `cdk-assets`. Fixes #15737. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/pipelines/lib/docker-credentials.ts | 4 ++-- .../pipelines/test/docker-credentials.test.ts | 2 +- .../cdk-assets/bin/docker-credential-cdk-assets.ts | 10 ++-------- .../cdk-assets/lib/private/docker-credentials.ts | 11 ++++++++--- packages/cdk-assets/lib/private/docker.ts | 11 ++++++++++- .../test/private/docker-credentials.test.ts | 14 +++++++++++++- 6 files changed, 36 insertions(+), 16 deletions(-) diff --git a/packages/@aws-cdk/pipelines/lib/docker-credentials.ts b/packages/@aws-cdk/pipelines/lib/docker-credentials.ts index 77b7d2c1b4381..05144d4957771 100644 --- a/packages/@aws-cdk/pipelines/lib/docker-credentials.ts +++ b/packages/@aws-cdk/pipelines/lib/docker-credentials.ts @@ -10,10 +10,10 @@ import { Fn } from '@aws-cdk/core'; export abstract class DockerCredential { /** * Creates a DockerCredential for DockerHub. - * Convenience method for `fromCustomRegistry('index.docker.io', opts)`. + * Convenience method for `customRegistry('https://index.docker.io/v1/', opts)`. */ public static dockerHub(secret: secretsmanager.ISecret, opts: ExternalDockerCredentialOptions = {}): DockerCredential { - return new ExternalDockerCredential('index.docker.io', secret, opts); + return new ExternalDockerCredential('https://index.docker.io/v1/', secret, opts); } /** diff --git a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts index a2b5fc2c577dd..902c13a4129b7 100644 --- a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts +++ b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts @@ -29,7 +29,7 @@ describe('ExternalDockerCredential', () => { test('dockerHub defaults registry domain', () => { const creds = cdkp.DockerCredential.dockerHub(secret); - expect(Object.keys(creds._renderCdkAssetsConfig())).toEqual(['index.docker.io']); + expect(Object.keys(creds._renderCdkAssetsConfig())).toEqual(['https://index.docker.io/v1/']); }); test('minimal example only renders secret', () => { diff --git a/packages/cdk-assets/bin/docker-credential-cdk-assets.ts b/packages/cdk-assets/bin/docker-credential-cdk-assets.ts index b04f2ba8510bc..6dccb5521cf55 100644 --- a/packages/cdk-assets/bin/docker-credential-cdk-assets.ts +++ b/packages/cdk-assets/bin/docker-credential-cdk-assets.ts @@ -29,14 +29,8 @@ async function main() { } // Read the domain to fetch from stdin - let rawDomain = fs.readFileSync(0, { encoding: 'utf-8' }).trim(); - // Paranoid handling to ensure new URL() doesn't throw if the schema is missing. - // Not convinced docker will ever pass in a url like 'index.docker.io/v1', but just in case... - rawDomain = rawDomain.includes('://') ? rawDomain : `https://${rawDomain}`; - const domain = new URL(rawDomain).hostname; - - const credentials = await fetchDockerLoginCredentials(new DefaultAwsClient(), config, domain); - + let endpoint = fs.readFileSync(0, { encoding: 'utf-8' }).trim(); + const credentials = await fetchDockerLoginCredentials(new DefaultAwsClient(), config, endpoint); // Write the credentials back to stdout fs.writeFileSync(1, JSON.stringify(credentials)); } diff --git a/packages/cdk-assets/lib/private/docker-credentials.ts b/packages/cdk-assets/lib/private/docker-credentials.ts index b5c3f42139581..923d18d70a3ee 100644 --- a/packages/cdk-assets/lib/private/docker-credentials.ts +++ b/packages/cdk-assets/lib/private/docker-credentials.ts @@ -39,12 +39,17 @@ export function cdkCredentialsConfig(): DockerCredentialsConfig | undefined { } /** Fetches login credentials from the configured source (e.g., SecretsManager, ECR) */ -export async function fetchDockerLoginCredentials(aws: IAws, config: DockerCredentialsConfig, domain: string) { - if (!Object.keys(config.domainCredentials).includes(domain)) { +export async function fetchDockerLoginCredentials(aws: IAws, config: DockerCredentialsConfig, endpoint: string) { + // Paranoid handling to ensure new URL() doesn't throw if the schema is missing + // For official docker registry, docker will pass https://index.docker.io/v1/ + endpoint = endpoint.includes('://') ? endpoint : `https://${endpoint}`; + const domain = new URL(endpoint).hostname; + + if (!Object.keys(config.domainCredentials).includes(domain) && !Object.keys(config.domainCredentials).includes(endpoint)) { throw new Error(`unknown domain ${domain}`); } - const domainConfig = config.domainCredentials[domain]; + let domainConfig = config.domainCredentials[domain] ?? config.domainCredentials[endpoint]; if (domainConfig.secretsManagerSecretId) { const sm = await aws.secretsManagerClient({ assumeRoleArn: domainConfig.assumeRoleArn }); diff --git a/packages/cdk-assets/lib/private/docker.ts b/packages/cdk-assets/lib/private/docker.ts index e1fc54429f18f..aed2631ab2852 100644 --- a/packages/cdk-assets/lib/private/docker.ts +++ b/packages/cdk-assets/lib/private/docker.ts @@ -124,8 +124,17 @@ export class Docker { private async execute(args: string[], options: ShellOptions = {}) { const configArgs = this.configDir ? ['--config', this.configDir] : []; + const pathToCdkAssets = path.resolve(__dirname, '..', '..', 'bin'); try { - await shell(['docker', ...configArgs, ...args], { logger: this.logger, ...options }); + await shell(['docker', ...configArgs, ...args], { + logger: this.logger, + ...options, + env: { + ...process.env, + ...options.env, + PATH: `${pathToCdkAssets}${path.delimiter}${options.env?.PATH ?? process.env.PATH}`, + }, + }); } catch (e) { if (e.code === 'ENOENT') { throw new Error('Unable to execute \'docker\' in order to build a container asset. Please install \'docker\' and try again.'); diff --git a/packages/cdk-assets/test/private/docker-credentials.test.ts b/packages/cdk-assets/test/private/docker-credentials.test.ts index 6b521c67457b6..19160ccd0c880 100644 --- a/packages/cdk-assets/test/private/docker-credentials.test.ts +++ b/packages/cdk-assets/test/private/docker-credentials.test.ts @@ -97,8 +97,12 @@ describe('fetchDockerLoginCredentials', () => { await expect(fetchDockerLoginCredentials(aws, config, 'misconfigured.example.com')).rejects.toThrow(/unknown credential type/); }); + test('does not throw on correctly configured raw domain', async () => { + expect(fetchDockerLoginCredentials(aws, config, 'https://secret.example.com/v1/')).resolves; + }); + describe('SecretsManager', () => { - test('returns the credentials sucessfully if configured correctly', async () => { + test('returns the credentials sucessfully if configured correctly - domain', async () => { mockSecretWithSecretString({ username: 'secretUser', secret: 'secretPass' }); const creds = await fetchDockerLoginCredentials(aws, config, 'secret.example.com'); @@ -106,6 +110,14 @@ describe('fetchDockerLoginCredentials', () => { expect(creds).toEqual({ Username: 'secretUser', Secret: 'secretPass' }); }); + test('returns the credentials successfully if configured correctly - raw domain', async () => { + mockSecretWithSecretString({ username: 'secretUser', secret: 'secretPass' }); + + const creds = await fetchDockerLoginCredentials(aws, config, 'https://secret.example.com'); + + expect(creds).toEqual({ Username: 'secretUser', Secret: 'secretPass' }); + }); + test('throws when SecretsManager returns an error', async () => { const errMessage = "Secrets Manager can't find the specified secret."; aws.mockSecretsManager.getSecretValue = mockedApiFailure('ResourceNotFoundException', errMessage); From 883c1a32eb73cd7e4bd9d8e2cafe06a70e5264f7 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Mon, 10 Jan 2022 02:33:15 -0800 Subject: [PATCH 09/40] docs(cfnspec): update CloudFormation documentation (#18338) Co-authored-by: AWS CDK Team Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../spec-source/cfn-docs/cfn-docs.json | 270 ++++++++++++++++-- 1 file changed, 249 insertions(+), 21 deletions(-) diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index 8340dcdfab2e6..337ce790bae1d 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -902,6 +902,7 @@ }, "AWS::ApiGateway::Deployment": { "attributes": { + "DeploymentId": "", "Ref": "`Ref` returns the deployment ID, such as `123abc` ." }, "description": "The `AWS::ApiGateway::Deployment` resource deploys an API Gateway `RestApi` resource to a stage so that clients can call the API over the internet. The stage acts as an environment.", @@ -3933,6 +3934,17 @@ "S3Key": "The S3 key of the S3 object." } }, + "AWS::AppStream::ApplicationEntitlementAssociation": { + "attributes": { + "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns the combination of the `StackName` , `EntitlementName` , and `ApplicationIdentifier` , such as `abcdefStack|abcdefEntitlement|abcdefApplication` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." + }, + "description": "Associates an application to an entitlement.", + "properties": { + "ApplicationIdentifier": "The identifier of the application.", + "EntitlementName": "The name of the entitlement.", + "StackName": "The name of the stack." + } + }, "AWS::AppStream::ApplicationFleetAssociation": { "attributes": { "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns a combination of the `FleetName` and `ApplicationArn` , such as `aabcdefgFleet|arn:aws:appstream:us-west-2:123456789123:application/abcdefg` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." @@ -3960,6 +3972,29 @@ "AccountPassword": "The password for the account." } }, + "AWS::AppStream::Entitlement": { + "attributes": { + "CreatedTime": "The time when the entitlement was created.", + "LastModifiedTime": "The time when the entitlement was last modified.", + "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns the combination of the `StackName` and `Name` , such as `abcdefStack|abcdefEntitlement` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." + }, + "description": "Creates an entitlement to control access, based on user attributes, to specific applications within a stack. Entitlements apply to SAML 2.0 federated user identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all applications in a stack. Entitlements don't apply to the desktop stream view application or to applications managed by a dynamic app provider using the Dynamic Application Framework.", + "properties": { + "AppVisibility": "Specifies whether to entitle all apps or only selected apps.", + "Attributes": "The attributes of the entitlement.", + "Description": "The description of the entitlement.", + "Name": "The name of the entitlement.", + "StackName": "The name of the stack." + } + }, + "AWS::AppStream::Entitlement.Attribute": { + "attributes": {}, + "description": "An attribute that belongs to an entitlement. Application entitlements work by matching a supported SAML 2.0 attribute name to a value when a user identity federates to an AppStream 2.0 SAML application.", + "properties": { + "Name": "A supported AWS IAM SAML PrincipalTag attribute that is matched to a value when a user identity federates to an AppStream 2.0 SAML application.\n\nThe following are supported values:\n\n- roles\n- department\n- organization\n- groups\n- title\n- costCenter\n- userType", + "Value": "A value that is matched to a supported SAML attribute name when a user identity federates to an AppStream 2.0 SAML application." + } + }, "AWS::AppStream::Fleet": { "attributes": {}, "description": "The `AWS::AppStream::Fleet` resource creates a fleet for Amazon AppStream 2.0. A fleet consists of streaming instances that run a specified image when using Always-On or On-Demand.", @@ -8831,10 +8866,28 @@ "properties": { "ExcludedAccounts": "A comma-separated list of accounts excluded from organization config rule.", "OrganizationConfigRuleName": "The name that you assign to organization config rule.", + "OrganizationCustomCodeRuleMetadata": "", "OrganizationCustomRuleMetadata": "An `OrganizationCustomRuleMetadata` object.", "OrganizationManagedRuleMetadata": "An `OrganizationManagedRuleMetadata` object." } }, + "AWS::Config::OrganizationConfigRule.OrganizationCustomCodeRuleMetadata": { + "attributes": {}, + "description": "", + "properties": { + "CodeText": "", + "DebugLogDeliveryAccounts": "", + "Description": "", + "InputParameters": "", + "MaximumExecutionFrequency": "", + "OrganizationConfigRuleTriggerTypes": "", + "ResourceIdScope": "", + "ResourceTypesScope": "", + "Runtime": "", + "TagKeyScope": "", + "TagValueScope": "" + } + }, "AWS::Config::OrganizationConfigRule.OrganizationCustomRuleMetadata": { "attributes": {}, "description": "An object that specifies organization custom rule metadata such as resource type, resource ID of AWS resource, Lambda function ARN, and organization trigger types that trigger AWS Config to evaluate your AWS resources against a rule. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.", @@ -11266,6 +11319,7 @@ "description": "Specifies a VPC flow log that captures IP traffic for a specified network interface, subnet, or VPC. To view the log data, use Amazon CloudWatch Logs (CloudWatch Logs) to help troubleshoot connection issues. For example, you can use a flow log to investigate why certain traffic isn't reaching an instance, which can help you diagnose overly restrictive security group rules. For more information, see [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) in the *Amazon VPC User Guide* .", "properties": { "DeliverLogsPermissionArn": "The ARN for the IAM role that permits Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nIf you specify `LogDestinationType` as `s3` , do not specify `DeliverLogsPermissionArn` or `LogGroupName` .", + "DestinationOptions": "The destination options.", "LogDestination": "The destination to which the flow log data is to be published. Flow log data can be published to a CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for `LogDestinationType` .\n\nIf `LogDestinationType` is not specified or `cloud-watch-logs` , specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group. For example, to publish to a log group called `my-logs` , specify `arn:aws:logs:us-east-1:123456789012:log-group:my-logs` . Alternatively, use `LogGroupName` instead.\n\nIf LogDestinationType is `s3` , specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: `bucket_ARN/subfolder_name/` . For example, to specify a subfolder named `my-logs` in a bucket named `my-bucket` , use the following ARN: `arn:aws:s3:::my-bucket/my-logs/` . You cannot use `AWSLogs` as a subfolder name. This is a reserved term.", "LogDestinationType": "The type of destination to which the flow log data is to be published. Flow log data can be published to CloudWatch Logs or Amazon S3. To publish flow log data to CloudWatch Logs, specify `cloud-watch-logs` . To publish flow log data to Amazon S3, specify `s3` .\n\nIf you specify `LogDestinationType` as `s3` , do not specify `DeliverLogsPermissionArn` or `LogGroupName` .\n\nDefault: `cloud-watch-logs`", "LogFormat": "The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see [Flow Log Records](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) . If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.\n\nSpecify the fields using the `${field-id}` format, separated by spaces.", @@ -11392,6 +11446,7 @@ "AWS::EC2::Instance": { "attributes": { "AvailabilityZone": "The Availability Zone where the specified instance is launched. For example: `us-east-1b` .\n\nYou can retrieve a list of all Availability Zones for a Region by using the [Fn::GetAZs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getavailabilityzones.html) intrinsic function.", + "PrivateDnsName": "The private DNS name of the specified instance. For example: `ip-10-24-34-0.ec2.internal` .", "PrivateIp": "The private IP address of the specified instance. For example: `10.24.34.0` .", "PublicDnsName": "The public DNS name of the specified instance. For example: `ec2-107-20-50-45.compute-1.amazonaws.com` .", "PublicIp": "The public IP address of the specified instance. For example: `192.0.2.0` .", @@ -11725,6 +11780,33 @@ "SpotOptions": "The options for Spot Instances." } }, + "AWS::EC2::LaunchTemplate.InstanceRequirements": { + "attributes": {}, + "description": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nWhen you specify multiple parameters, you get instance types that satisfy all of the specified parameters. If you specify multiple values for a parameter, you get instance types that satisfy any of the specified values.\n\n> You must specify `VCpuCount` and `MemoryMiB` . All other parameters are optional. Any unspecified optional parameter is set to its default. \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", + "properties": { + "AcceleratorCount": "The minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips) on an instance.\n\nTo exclude accelerator-enabled instance types, set `Max` to `0` .\n\nDefault: No minimum or maximum limits", + "AcceleratorManufacturers": "Indicates whether instance types must have accelerators by specific manufacturers.\n\n- For instance types with NVIDIA devices, specify `nvidia` .\n- For instance types with AMD devices, specify `amd` .\n- For instance types with AWS devices, specify `amazon-web-services` .\n- For instance types with Xilinx devices, specify `xilinx` .\n\nDefault: Any manufacturer", + "AcceleratorNames": "The accelerators that must be on the instance type.\n\n- For instance types with NVIDIA A100 GPUs, specify `a100` .\n- For instance types with NVIDIA V100 GPUs, specify `v100` .\n- For instance types with NVIDIA K80 GPUs, specify `k80` .\n- For instance types with NVIDIA T4 GPUs, specify `t4` .\n- For instance types with NVIDIA M60 GPUs, specify `m60` .\n- For instance types with AMD Radeon Pro V520 GPUs, specify `radeon-pro-v520` .\n- For instance types with Xilinx VU9P FPGAs, specify `vu9p` .\n\nDefault: Any accelerator", + "AcceleratorTotalMemoryMiB": "The minimum and maximum amount of total accelerator memory, in MiB.\n\nDefault: No minimum or maximum limits", + "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n- For instance types with inference accelerators, specify `inference` .\n\nDefault: Any accelerator type", + "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", + "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", + "BurstablePerformance": "Indicates whether burstable performance T instance types are included, excluded, or required. For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) .\n\n- To include burstable performance instance types, specify `included` .\n- To require only burstable performance instance types, specify `required` .\n- To exclude burstable performance instance types, specify `excluded` .\n\nDefault: `excluded`", + "CpuManufacturers": "The CPU manufacturers to include.\n\n- For instance types with Intel CPUs, specify `intel` .\n- For instance types with AMD CPUs, specify `amd` .\n- For instance types with AWS CPUs, specify `amazon-web-services` .\n\n> Don't confuse the CPU manufacturer with the CPU architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. \n\nDefault: Any manufacturer", + "ExcludedInstanceTypes": "The instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk ( `*` ), to exclude an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will exclude the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will exclude all the M5a instance types, but not the M5n instance types.\n\nDefault: No excluded instance types", + "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", + "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", + "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SDD) storage, specify `sdd` .\n\nDefault: `hdd` and `sdd`", + "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", + "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", + "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", + "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\nDefault: `20`", + "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", + "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage above the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\nDefault: `100`", + "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", + "VCpuCount": "The minimum and maximum number of vCPUs." + } + }, "AWS::EC2::LaunchTemplate.Ipv6Add": { "attributes": {}, "description": "Specifies an IPv6 address in an Amazon EC2 launch template.\n\n`Ipv6Add` is a property of [AWS::EC2::LaunchTemplate NetworkInterface](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-networkinterface.html) .", @@ -11750,6 +11832,7 @@ "ImageId": "The ID of the AMI.", "InstanceInitiatedShutdownBehavior": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n\nDefault: `stop`", "InstanceMarketOptions": "The market (purchasing) option for the instances.", + "InstanceRequirements": "", "InstanceType": "The instance type. For more information, see [Instance Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nIf you specify `InstanceTypes` , you can't specify `InstanceRequirements` .", "KernelId": "The ID of the kernel.\n\nWe recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User Provided Kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "KeyName": "The name of the key pair. You can create a key pair using [CreateKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or [ImportKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html) .\n\n> If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.", @@ -13175,7 +13258,6 @@ "attributes": { "CreationTimestamp": "The date and time the VPC endpoint was created. For example: `Fri Sep 28 23:34:36 UTC 2018.`", "DnsEntries": "(Interface endpoint) The DNS entries for the endpoint. Each entry is a combination of the hosted zone ID and the DNS name. The entries are ordered as follows: regional public DNS, zonal public DNS, private DNS, and wildcard DNS. This order is not enforced for AWS Marketplace services.\n\nThe following is an example. In the first entry, the hosted zone ID is Z1HUB23UULQXV and the DNS name is vpce-01abc23456de78f9g-12abccd3.ec2.us-east-1.vpce.amazonaws.com.\n\n[\"Z1HUB23UULQXV:vpce-01abc23456de78f9g-12abccd3.ec2.us-east-1.vpce.amazonaws.com\", \"Z1HUB23UULQXV:vpce-01abc23456de78f9g-12abccd3-us-east-1a.ec2.us-east-1.vpce.amazonaws.com\", \"Z1C12344VYDITB0:ec2.us-east-1.amazonaws.com\"]\n\nIf you update the `PrivateDnsEnabled` or `SubnetIds` properties, the DNS entries in the list will change.", - "Id": "", "NetworkInterfaceIds": "(Interface endpoint) One or more network interface IDs. If you update the `PrivateDnsEnabled` or `SubnetIds` properties, the items in this list might change.", "Ref": "`Ref` returns the ID of the VPC endpoint." }, @@ -13211,7 +13293,8 @@ "properties": { "AcceptanceRequired": "Indicates whether requests from service consumers to create an endpoint to your service must be accepted.", "GatewayLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers.", - "NetworkLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Network Load Balancers for your service." + "NetworkLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Network Load Balancers for your service.", + "PayerResponsibility": "" } }, "AWS::EC2::VPCEndpointServicePermissions": { @@ -14185,7 +14268,7 @@ "ClusterSecurityGroupId": "The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control plane to data plane communication.\n\nThis parameter is only returned by Amazon EKS clusters that support managed node groups. For more information, see [Managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) in the *Amazon EKS User Guide* .", "EncryptionConfigKeyArn": "Amazon Resource Name (ARN) or alias of the customer master key (CMK).", "Endpoint": "The endpoint for your Kubernetes API server, such as `https://5E1D0CEXAMPLEA591B746AFC5AB30262.yl4.us-west-2.eks.amazonaws.com` .", - "KubernetesNetworkConfig.ServiceIpv6Cidr": "", + "KubernetesNetworkConfig.ServiceIpv6Cidr": "The CIDR block that Kubernetes Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified `ipv6` for *ipFamily* when you created the cluster. Kubernetes assigns Service addresses from the unique local address range ( `fc00::/7` ) because you can't specify a custom IPv6 CIDR block when you create the cluster.", "OpenIdConnectIssuerUrl": "The issuer URL for the OIDC identity provider of the cluster, such as `https://oidc.eks.us-west-2.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E` . If you need to remove `https://` from this output value, you can include the following code in your template.\n\n`!Select [1, !Split [\"//\", !GetAtt EKSCluster.OpenIdConnectIssuerUrl]]`", "Ref": "`Ref` returns the resource name. For example:\n\n`{ \"Ref\": \"myCluster\" }`\n\nFor the Amazon EKS cluster `myCluster` , `Ref` returns the name of the cluster." }, @@ -14220,9 +14303,9 @@ "attributes": {}, "description": "The Kubernetes network configuration for the cluster.", "properties": { - "IpFamily": "", + "IpFamily": "Specify which IP family is used to assign Kubernetes Pod and Service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them.\n\nYou can only specify `ipv6` for 1.21 and later clusters that use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to Pods and Services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the Amazon EKS User Guide. Kubernetes assigns Services IPv6 addresses from the unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR block. Pod addresses are assigned from the subnet's IPv6 CIDR.", "ServiceIpv4Cidr": "Don't specify a value if you select `ipv6` for *ipFamily* . The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. The block must meet the following requirements:\n\n- Within one of the following private IP address blocks: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16.\n- Doesn't overlap with any CIDR block assigned to the VPC that you selected for VPC.\n- Between /24 and /12.\n\n> You can only specify a custom CIDR block when you create a cluster and can't change this value once the cluster is created.", - "ServiceIpv6Cidr": "" + "ServiceIpv6Cidr": "The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified `ipv6` for *ipFamily* when you created the cluster. Kubernetes assigns Service addresses from the unique local address range ( `fc00::/7` ) because you can't specify a custom IPv6 CIDR block when you create the cluster." } }, "AWS::EKS::Cluster.Logging": { @@ -15082,7 +15165,7 @@ "properties": { "ReplicationGroupId": "The replication group id of the Global datastore member.", "ReplicationGroupRegion": "The Amazon region of the Global datastore member.", - "Role": "Indicates the role of the replication group, primary or secondary." + "Role": "Indicates the role of the replication group, `PRIMARY` or `SECONDARY` ." } }, "AWS::ElastiCache::GlobalReplicationGroup.RegionalConfiguration": { @@ -16185,7 +16268,16 @@ "description": "Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your custom applications and services, or it can be a partner event bus which can be matched to a partner event source.", "properties": { "EventSourceName": "If you are creating a partner event bus, this specifies the partner event source that the new event bus will be matched with.", - "Name": "The name of the new event bus.\n\nEvent bus names cannot contain the / character. You can't use the name `default` for a custom event bus, as this name is already used for your account's default event bus.\n\nIf this is a partner event bus, the name must exactly match the name of the partner event source that this event bus is matched to." + "Name": "The name of the new event bus.\n\nEvent bus names cannot contain the / character. You can't use the name `default` for a custom event bus, as this name is already used for your account's default event bus.\n\nIf this is a partner event bus, the name must exactly match the name of the partner event source that this event bus is matched to.", + "Tags": "" + } + }, + "AWS::Events::EventBus.TagEntry": { + "attributes": {}, + "description": "", + "properties": { + "Key": "", + "Value": "" } }, "AWS::Events::EventBusPolicy": { @@ -17470,6 +17562,7 @@ "NotificationTarget": "An SNS topic ARN that is set up to receive game session placement notifications. See [Setting up notifications for game session placement](https://docs.aws.amazon.com/gamelift/latest/developerguide/queue-notification.html) .", "PlayerLatencyPolicies": "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.", "PriorityConfiguration": "Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process.", + "Tags": "A list of labels to assign to the new game session queue resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.", "TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status." } }, @@ -17524,7 +17617,8 @@ "Name": "A unique identifier for a matchmaking configuration. Matchmaking requests use this name to identify which matchmaking configuration to use.", "NotificationTarget": "An SNS topic ARN that is set up to receive matchmaking notifications. See [Setting up notifications for matchmaking](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) for more information.", "RequestTimeoutSeconds": "The maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.", - "RuleSetName": "A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region." + "RuleSetName": "A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.", + "Tags": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits." } }, "AWS::GameLift::MatchmakingConfiguration.GameProperty": { @@ -17544,7 +17638,8 @@ "description": "The `AWS::GameLift::MatchmakingRuleSet` resource creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a matchmaking configuration.", "properties": { "Name": "A unique identifier for the matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. Note that the rule set name is different from the optional `name` field in the rule set body.", - "RuleSetBody": "A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field." + "RuleSetBody": "A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.", + "Tags": "A list of labels to assign to the new matchmaking rule set resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits." } }, "AWS::GameLift::Script": { @@ -17557,6 +17652,7 @@ "properties": { "Name": "A descriptive label that is associated with a script. Script names do not need to be unique.", "StorageLocation": "The location in Amazon S3 where build or script files are stored for access by Amazon GameLift.", + "Tags": "", "Version": "The version that is associated with a build or script. Version strings do not need to be unique." } }, @@ -20326,7 +20422,7 @@ "properties": { "DisplayName": "Field that represents a friendly name in the console for the custom metric; it doesn't have to be unique. Don't use this name as the metric identifier in the device metric report. Can be updated.", "MetricName": "The name of the custom metric. This will be used in the metric report submitted from the device/thing. It shouldn't begin with `aws:` . Cannot be updated once it's defined.", - "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , and `number-list` .", + "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , `number-list` , and `number` .", "Tags": "Metadata that can be used to manage the custom metric." } }, @@ -20421,6 +20517,7 @@ "Document": "The job document.\n\nRequired if you don't specify a value for `documentSource` .", "DocumentSource": "An S3 link to the job document to use in the template. Required if you don't specify a value for `document` .\n\n> If the job document resides in an S3 bucket, you must use a placeholder link when specifying the document.\n> \n> The placeholder link is of the following form:\n> \n> `${aws:iot:s3-presigned-url:https://s3.amazonaws.com/ *bucket* / *key* }`\n> \n> where *bucket* is your bucket name and *key* is the object in the bucket to which you are linking.", "JobArn": "The ARN of the job to use as the basis for the job template.", + "JobExecutionsRetryConfig": "Allows you to create the criteria to retry a job.", "JobExecutionsRolloutConfig": "Allows you to create a staged rollout of a job.", "JobTemplateId": "A unique identifier for the job template. We recommend using a UUID. Alpha-numeric characters, \"-\", and \"_\" are valid for use here.", "PresignedUrlConfig": "Configuration for pre-signed S3 URLs.", @@ -23490,11 +23587,6 @@ "S3ContentLocation": "The location of the custom artifacts." } }, - "AWS::KinesisAnalyticsV2::Application.CustomArtifactsConfiguration": { - "attributes": {}, - "description": "A list of `CustomArtifactConfiguration` objects.", - "properties": {} - }, "AWS::KinesisAnalyticsV2::Application.DeployAsApplicationConfiguration": { "attributes": {}, "description": "The information required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state.", @@ -24709,6 +24801,13 @@ "Enabled": "Indicates whether an intent uses the dialog code hook during a conversation with a user." } }, + "AWS::Lex::Bot.ExternalSourceSetting": { + "attributes": {}, + "description": "Provides information about the external source of the slot type's definition.", + "properties": { + "GrammarSlotTypeSetting": "Settings required for a slot type based on a grammar that you provide." + } + }, "AWS::Lex::Bot.FulfillmentCodeHookSetting": { "attributes": {}, "description": "Determines if a Lambda function should be invoked for a specific intent.", @@ -24746,6 +24845,22 @@ "UpdateResponse": "Provides configuration information for messages sent periodically to the user while the fulfillment Lambda function is running." } }, + "AWS::Lex::Bot.GrammarSlotTypeSetting": { + "attributes": {}, + "description": "Settings required for a slot type based on a grammar that you provide.", + "properties": { + "Source": "The source of the grammar used to create the slot type." + } + }, + "AWS::Lex::Bot.GrammarSlotTypeSource": { + "attributes": {}, + "description": "Describes the Amazon S3 bucket name and location for the grammar that is the source of the slot type.", + "properties": { + "KmsKeyArn": "The AWS Key Management Service key required to decrypt the contents of the grammar, if any.", + "S3BucketName": "The name of the S3 bucket that contains the grammar source.", + "S3ObjectKey": "The path to the grammar in the S3 bucket." + } + }, "AWS::Lex::Bot.ImageResponseCard": { "attributes": {}, "description": "A card that is shown to the user by a messaging platform. You define the contents of the card, the card is displayed by the platform.\n\nWhen you use a response card, the response from the user is constrained to the text associated with a button on the card.", @@ -24951,6 +25066,7 @@ "description": "Describes a slot type.", "properties": { "Description": "A description of the slot type. Use the description to help identify the slot type in lists.", + "ExternalSourceSetting": "Sets the type of external information used to create the slot type.", "Name": "The name of the slot type. A slot type name must be unique withing the account.", "ParentSlotTypeSignature": "The built-in slot type used as a parent of this slot type. When you define a parent slot type, the new slot type has the configuration of the parent lot type.\n\nOnly AMAZON.AlphaNumeric is supported.", "SlotTypeValues": "A list of SlotTypeValue objects that defines the values that the slot type can take. Each value can have a list of synonyms, additional values that help train the machine learning model about the values that it resolves for the slot.", @@ -25140,6 +25256,14 @@ "SourceBotVersion": "The version of a bot used for a bot locale." } }, + "AWS::Lex::BotVersion.BotVersionLocaleSpecification": { + "attributes": {}, + "description": "Specifies the locale that Amazon Lex adds to this version. You can choose the Draft version or any other previously published version for each locale. When you specify a source version, the locale data is copied from the source version to the new version.", + "properties": { + "BotVersionLocaleDetails": "The version of a bot used for a bot locale.", + "LocaleId": "The identifier of the locale to add to the version." + } + }, "AWS::Lex::BotVersion.BotVersionLocaleSpecificationItem": { "attributes": {}, "description": "Specifies the details of a locale in a bot version.", @@ -25158,6 +25282,11 @@ "ResourceArn": "The Amazon Resource Name (ARN) of the bot or bot alias that the resource policy is attached to." } }, + "AWS::Lex::ResourcePolicy.Policy": { + "attributes": {}, + "description": "The resource policy to add to the resource. The policy is a JSON structure that contains one or more statements that define the policy. The policy must follow the IAM policy syntax. For more information about the contents of a JSON policy document, see the [IAM JSON policy reference](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) .", + "properties": {} + }, "AWS::LicenseManager::Grant": { "attributes": { "GrantArn": "The Amazon Resource Name (ARN) of the grant.", @@ -25255,6 +25384,53 @@ "End": "End of the time range." } }, + "AWS::Lightsail::Alarm": { + "attributes": { + "AlarmArn": "The Amazon Resource Name (ARN) of the alarm.", + "Ref": "", + "State": "The current state of the alarm.\n\nAn alarm has the following possible states:\n\n- `ALARM` - The metric is outside of the defined threshold.\n- `INSUFFICIENT_DATA` - The alarm has recently started, the metric is not available, or not enough data is available for the metric to determine the alarm state.\n- `OK` - The metric is within the defined threshold." + }, + "description": "The `AWS::Lightsail::Alarm` resource specifies an alarm that can be used to monitor a single metric for one of your Lightsail resources.", + "properties": { + "AlarmName": "The name of the alarm.", + "ComparisonOperator": "The arithmetic operation to use when comparing the specified statistic and threshold.", + "ContactProtocols": "The contact protocols for the alarm, such as `Email` , `SMS` (text messaging), or both.\n\n*Allowed Values* : `Email` | `SMS`", + "DatapointsToAlarm": "The number of data points within the evaluation periods that must be breaching to cause the alarm to go to the `ALARM` state.", + "EvaluationPeriods": "The number of periods over which data is compared to the specified threshold.", + "MetricName": "The name of the metric associated with the alarm.", + "MonitoredResourceName": "The name of the Lightsail resource that the alarm monitors.", + "NotificationEnabled": "A Boolean value indicating whether the alarm is enabled.", + "NotificationTriggers": "The alarm states that trigger a notification.\n\n> To specify the `OK` and `INSUFFICIENT_DATA` values, you must also specify `ContactProtocols` values. Otherwise, the `OK` and `INSUFFICIENT_DATA` values will not take effect and the stack will drift. \n\n*Allowed Values* : `OK` | `ALARM` | `INSUFFICIENT_DATA`", + "Threshold": "The value against which the specified statistic is compared.", + "TreatMissingData": "Specifies how the alarm handles missing data points.\n\nAn alarm can treat missing data in the following ways:\n\n- `breaching` - Assumes the missing data is not within the threshold. Missing data counts towards the number of times that the metric is not within the threshold.\n- `notBreaching` - Assumes the missing data is within the threshold. Missing data does not count towards the number of times that the metric is not within the threshold.\n- `ignore` - Ignores the missing data. Maintains the current alarm state.\n- `missing` - Missing data is treated as missing." + } + }, + "AWS::Lightsail::Bucket": { + "attributes": { + "AbleToUpdateBundle": "A Boolean value indicating whether the bundle that is currently applied to your distribution can be changed to another bundle.", + "BucketArn": "The Amazon Resource Name (ARN) of the bucket.", + "Ref": "", + "Url": "The URL of the bucket." + }, + "description": "The `AWS::Lightsail::Bucket` resource specifies a bucket.", + "properties": { + "AccessRules": "An object that describes the access rules for the bucket.", + "BucketName": "The name of the bucket.", + "BundleId": "The bundle ID for the bucket (for example, `small_1_0` ).\n\nA bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.", + "ObjectVersioning": "Indicates whether object versioning is enabled for the bucket.\n\nThe following options can be configured:\n\n- `Enabled` - Object versioning is enabled.\n- `Suspended` - Object versioning was previously enabled but is currently suspended. Existing object versions are retained.\n- `NeverEnabled` - Object versioning has never been enabled.", + "ReadOnlyAccessAccounts": "An array of AWS account IDs that have read-only access to the bucket.", + "ResourcesReceivingAccess": "An array of Lightsail instances that have access to the bucket.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." + } + }, + "AWS::Lightsail::Bucket.AccessRules": { + "attributes": {}, + "description": "`AccessRules` is a property of the [AWS::Lightsail::Bucket](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lightsail-bucket.html) resource. It describes access rules for a bucket.", + "properties": { + "AllowPublicOverrides": "A Boolean value indicating whether the access control list (ACL) permissions that are applied to individual objects override the `GetObject` option that is currently specified.\n\nWhen this is true, you can use the [PutObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html) Amazon S3 API operation to set individual objects to public (read-only) or private, using either the `public-read` ACL or the `private` ACL.", + "GetObject": "Specifies the anonymous access to all objects in a bucket.\n\nThe following options can be specified:\n\n- `public` - Sets all objects in the bucket to public (read-only), making them readable by everyone on the internet.\n\nIf the `GetObject` value is set to `public` , then all objects in the bucket default to public regardless of the `allowPublicOverrides` value.\n- `private` - Sets all objects in the bucket to private, making them readable only by you and anyone that you grant access to.\n\nIf the `GetObject` value is set to `private` , and the `allowPublicOverrides` value is set to `true` , then all objects in the bucket default to private unless they are configured with a `public-read` ACL. Individual objects with a `public-read` ACL are readable by everyone on the internet." + } + }, "AWS::Lightsail::Database": { "attributes": { "DatabaseArn": "The Amazon Resource Name (ARN) of the database (for example, `arn:aws:lightsail:us-east-2:123456789101:RelationalDatabase/244ad76f-8aad-4741-809f-12345EXAMPLE` ).", @@ -25275,7 +25451,7 @@ "RelationalDatabaseBundleId": "The bundle ID for the database (for example, `medium_1_0` ).", "RelationalDatabaseName": "The name of the instance.", "RelationalDatabaseParameters": "An array of parameters for the database.", - "RotateMasterUserPassword": "A boolean value indicating whether to change the primary user password to a new, strong password generated by Lightsail .\n\n> The `RotateMasterUserPassword` and `MasterUserPassword` parameters cannot be used together in the same template.", + "RotateMasterUserPassword": "A Boolean value indicating whether to change the primary user password to a new, strong password generated by Lightsail .\n\n> The `RotateMasterUserPassword` and `MasterUserPassword` parameters cannot be used together in the same template.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." } }, @@ -25449,6 +25625,38 @@ "Name": "The state of the instance (for example, `running` or `pending` )." } }, + "AWS::Lightsail::LoadBalancer": { + "attributes": { + "LoadBalancerArn": "The Amazon Resource Name (ARN) of the load balancer.", + "Ref": "" + }, + "description": "The `AWS::Lightsail::LoadBalancer` resource specifies a load balancer that can be used with Lightsail instances.\n\n> You cannot attach attach TLS certificates to a load balancer using the `AWS::Lightsail::LoadBalancer` resource type. Instead, use the `LoadBalancerTlsCertificate` resource type to create a certificate and attach it to a load balancer.", + "properties": { + "AttachedInstances": "The Lightsail instances to attach to the load balancer.", + "HealthCheckPath": "The path on the attached instance where the health check will be performed. If no path is specified, the load balancer tries to make a request to the default (root) page ( `/index.html` ).", + "InstancePort": "The port that the load balancer uses to direct traffic to your Lightsail instances. For HTTP traffic, specify port `80` . For HTTPS traffic, specify port `443` .", + "IpAddressType": "The IP address type of the load balancer.\n\nThe possible values are `ipv4` for IPv4 only, and `dualstack` for both IPv4 and IPv6.", + "LoadBalancerName": "The name of the load balancer.", + "SessionStickinessEnabled": "A Boolean value indicating whether session stickiness is enabled.\n\nEnable session stickiness (also known as *session affinity* ) to bind a user's session to a specific instance. This ensures that all requests from the user during the session are sent to the same instance.", + "SessionStickinessLBCookieDurationSeconds": "The time period, in seconds, after which the load balancer session stickiness cookie should be considered stale. If you do not specify this parameter, the default value is 0, which indicates that the sticky session should last for the duration of the browser session.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." + } + }, + "AWS::Lightsail::LoadBalancerTlsCertificate": { + "attributes": { + "LoadBalancerTlsCertificateArn": "The Amazon Resource Name (ARN) of the SSL/TLS certificate.", + "Ref": "", + "Status": "The validation status of the SSL/TLS certificate.\n\nValid Values: `PENDING_VALIDATION` | `ISSUED` | `INACTIVE` | `EXPIRED` | `VALIDATION_TIMED_OUT` | `REVOKED` | `FAILED` | `UNKNOWN`" + }, + "description": "The `AWS::Lightsail::LoadBalancerTlsCertificate` resource specifies a TLS certificate that can be used with a Lightsail load balancer.", + "properties": { + "CertificateAlternativeNames": "An array of alternative domain names and subdomain names for your SSL/TLS certificate.\n\nIn addition to the primary domain name, you can have up to nine alternative domain names. Wildcards (such as `*.example.com` ) are not supported.", + "CertificateDomainName": "The domain name for the SSL/TLS certificate. For example, `example.com` or `www.example.com` .", + "CertificateName": "The name of the SSL/TLS certificate.", + "IsAttached": "A Boolean value indicating whether the SSL/TLS certificate is attached to a Lightsail load balancer.", + "LoadBalancerName": "The name of the load balancer that the SSL/TLS certificate is attached to." + } + }, "AWS::Lightsail::StaticIp": { "attributes": { "IpAddress": "The IP address of the static IP.", @@ -27331,6 +27539,7 @@ "Mode": "If \"vod,\" all segments are indexed and kept permanently in the destination and manifest. If \"live,\" only the number segments specified in keepSegments and indexNSegments are kept. Newer segments replace older segments, which might prevent players from rewinding all the way to the beginning of the channel. VOD mode uses HLS EXT-X-PLAYLIST-TYPE of EVENT while the channel is running, converting it to a \"VOD\" type manifest on completion of the stream.", "OutputSelection": "MANIFESTSANDSEGMENTS: Generates manifests (the master manifest, if applicable, and media manifests) for this output group. SEGMENTSONLY: Doesn't generate any manifests for this output group.", "ProgramDateTime": "Includes or excludes the EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated as follows: Either the program date and time are initialized using the input timecode source, or the time is initialized using the input timecode source and the date is initialized using the timestampOffset.", + "ProgramDateTimeClock": "", "ProgramDateTimePeriod": "The period of insertion of the EXT-X-PROGRAM-DATE-TIME entry, in seconds.", "RedundantManifest": "ENABLED: The master manifest (.m3u8 file) for each pipeline includes information about both pipelines: first its own media files, then the media files of the other pipeline. This feature allows a playout device that supports stale manifest detection to switch from one manifest to the other, when the current manifest seems to be stale. There are still two destinations and two master manifests, but both master manifests reference the media files from both pipelines. DISABLED: The master manifest (.m3u8 file) for each pipeline includes information about its own pipeline only. For an HLS output group with MediaPackage as the destination, the DISABLED behavior is always followed. MediaPackage regenerates the manifests it serves to players, so a redundant manifest from MediaLive is irrelevant.", "SegmentLength": "The length of the MPEG-2 Transport Stream segments to create, in seconds. Note that segments will end on the next keyframe after this number of seconds, so the actual segment length might be longer.", @@ -27464,6 +27673,7 @@ "FilterStrength": "Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest).", "InputFilter": "Turns on the filter for this input. MPEG-2 inputs have the deblocking filter enabled by default. 1) auto - filtering is applied depending on input type/quality 2) disabled - no filtering is applied to the input 3) forced - filtering is applied regardless of the input type.", "NetworkInputSettings": "Information about how to connect to the upstream system.", + "Scte35Pid": "", "Smpte2038DataPreference": "Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in this input. Applicable data types are captions, timecode, AFD, and SCTE-104 messages.\n- PREFER: Extract from SMPTE-2038 if present in this input, otherwise extract from another source (if any).\n- IGNORE: Never extract any ancillary data from SMPTE-2038.", "SourceEndBehavior": "The loop input if it is a file.", "VideoSelector": "Information about one video to extract from the input." @@ -29243,9 +29453,27 @@ "ClipboardMode": "Enable or disable the use of the system clipboard to copy and paste between the streaming session and streaming client.", "Ec2InstanceTypes": "The EC2 instance types that users can select from when launching a streaming session with this launch profile.", "MaxSessionLengthInMinutes": "The length of time, in minutes, that a streaming session can be active before it is stopped or terminated. After this point, Nimble Studio automatically terminates or stops the session. The default length of time is 690 minutes, and the maximum length of time is 30 days.", + "MaxStoppedSessionLengthInMinutes": "Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.\n\nIf the value is missing or set to 0, your sessions can\u2019t be stopped. If you then call `StopStreamingSession` , the session fails. If the time that a session stays in the READY state exceeds the `maxSessionLengthInMinutes` value, the session will automatically be terminated (instead of stopped).\n\nIf the value is set to a positive number, the session can be stopped. You can call `StopStreamingSession` to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the `maxSessionLengthInMinutes` value, the session will automatically be stopped (instead of terminated).", + "SessionStorage": "(Optional) The upload storage for a streaming session.", "StreamingImageIds": "The streaming images that users can select from when launching a streaming session with this launch profile." } }, + "AWS::NimbleStudio::LaunchProfile.StreamConfigurationSessionStorage": { + "attributes": {}, + "description": "The configuration for a streaming session\u2019s upload storage.", + "properties": { + "Mode": "Allows artists to upload files to their workstations. The only valid option is `UPLOAD` .", + "Root": "The configuration for the upload storage root of the streaming session." + } + }, + "AWS::NimbleStudio::LaunchProfile.StreamingSessionStorageRoot": { + "attributes": {}, + "description": "The upload storage root location (folder) on streaming workstations where files are uploaded.", + "properties": { + "Linux": "The folder path in Linux workstations where files are uploaded.", + "Windows": "The folder path in Windows workstations where files are uploaded." + } + }, "AWS::NimbleStudio::StreamingImage": { "attributes": { "EulaIds": "The list of IDs of EULAs that must be accepted before a streaming session can be started using this streaming image.", @@ -31986,8 +32214,7 @@ "attributes": { "DBProxyArn": "The Amazon Resource Name (ARN) representing the target group.", "Endpoint": "The writer endpoint for the RDS DB instance or Aurora DB cluster.", - "Ref": "`Ref` returns the name of the DB proxy.", - "VpcId": "The VPC ID to associate with the DB proxy." + "Ref": "`Ref` returns the name of the DB proxy." }, "description": "The `AWS::RDS::DBProxy` resource creates or updates a DB proxy.\n\nFor information about RDS Proxy for Amazon RDS, see [Managing Connections with Amazon RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-proxy.html) in the *Amazon RDS User Guide* .\n\nFor information about RDS Proxy for Amazon Aurora, see [Managing Connections with Amazon RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-proxy.html) in the *Amazon Aurora User Guide* .\n\n> Limitations apply to RDS Proxy, including DB engine version limitations and AWS Region limitations.\n> \n> For information about limitations that apply to RDS Proxy for Amazon RDS, see [Limitations for RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-proxy.html#rds-proxy.limitations) in the *Amazon RDS User Guide* .\n> \n> For information about that apply to RDS Proxy for Amazon Aurora, see [Limitations for RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-proxy.html#rds-proxy.limitations) in the *Amazon Aurora User Guide* .", "properties": { @@ -32853,7 +33080,7 @@ "HostedZoneConfig": "A complex type that contains an optional comment.\n\nIf you don't want to specify a comment, omit the `HostedZoneConfig` and `Comment` elements.", "HostedZoneTags": "Adds, edits, or deletes tags for a health check or a hosted zone.\n\nFor information about using tags for cost allocation, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the *AWS Billing and Cost Management User Guide* .", "Name": "The name of the domain. Specify a fully qualified domain name, for example, *www.example.com* . The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Route 53 treats *www.example.com* (without a trailing dot) and *www.example.com.* (with a trailing dot) as identical.\n\nIf you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Route 53, change the name servers for your domain to the set of `NameServers` that are returned by the `Fn::GetAtt` intrinsic function.", - "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", + "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", "VPCs": "*Private hosted zones:* A complex type that contains information about the VPCs that are associated with the specified hosted zone.\n\n> For public hosted zones, omit `VPCs` , `VPCId` , and `VPCRegion` ." } }, @@ -34783,7 +35010,7 @@ "AllowedPattern": "A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "DataType": "The data type of the parameter, such as `text` or `aws:ec2:image` . The default is `text` .", "Description": "Information about the parameter.", - "Name": "The name of the parameter.", + "Name": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", "Policies": "Information about the policies assigned to a parameter.\n\n[Assigning parameter policies](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) in the *AWS Systems Manager User Guide* .", "Tags": "Optional metadata that you assign to a resource in the form of an arbitrary set of tags (key-value pairs). Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter.", "Tier": "The parameter tier.", @@ -37491,7 +37718,8 @@ "attributes": {}, "description": "Protocol settings that are configured for your server.", "properties": { - "PassiveIp": "Indicates passive mode, for FTP and FTPS protocols. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer." + "PassiveIp": "Indicates passive mode, for FTP and FTPS protocols. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.", + "TlsSessionResumptionMode": "A property used with Transfer servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. `TlsSessionResumptionMode` determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during `CreateServer` and `UpdateServer` calls. If a `TlsSessionResumptionMode` value is not specified during CreateServer, it is set to `ENFORCED` by default.\n\n- `DISABLED` : the server does not process TLS session resumption client requests and creates a new TLS session for each request.\n- `ENABLED` : the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.\n- `ENFORCED` : the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to `ENFORCED` , test your clients.\n\n> Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the `ENFORCED` value, you need to test your clients." } }, "AWS::Transfer::Server.WorkflowDetail": { From 912aeda295820920ed880b9c85a98c56421647b8 Mon Sep 17 00:00:00 2001 From: Yerzhan Mazhkenov <20302932+yerzhan7@users.noreply.github.com> Date: Mon, 10 Jan 2022 18:14:19 +0600 Subject: [PATCH 10/40] feat(s3): add EventBridge bucket notifications (#18150) ### **Description** Adds EventBridge bucket notification configuration. See https://aws.amazon.com/blogs/aws/new-use-amazon-s3-event-notifications-with-amazon-eventbridge/ ### **Implementation** - Added new Bucket property to enable this feature (`eventBridgeEnabled: true`) - Added EventBridge config to `S3BucketNotifications` Custom Resource - Added unit tests - Added integration test (currently fails, see below for more info) - Fixed dependent integration tests Closes #18076 ### **FAQ** 1. **Why not simply expose EventBridge Cfn property via S3 BucketProps?** Currently CDK manages `NotificationConfigurations `via CustomResource. If we were to expose that way, then e.g. SNS configuration would override EventBridge config. 2. **Why not create new `IBucketNotificationDestination` class for EventBridge?** We can, but there is no need. Usually we create a subclass to `IBucketNotificationDestination` in order to adjust resource permissions, however in this case there is no need to adjust permissions: [default EventBridge does not require any additional permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-permissions.html) unlike SQS/SNS/Lambda destinations. Additionally, enabling this feature via bucket props is much cleaner/simpler API than creating new dummy object of type `IBucketNotificationDestination` for customers. However, if you still think that we need to create new `IBucketNotificationDestination` subclass for EventBridge for consistency, let me know and I will refactor. ---- **BLOCKED ON LAMBDA RUNTIME SDK UPDATE TO BOTOCORE >= v1.23.16 (Integration test currently fails as current version (v1.21.55) does not contain EventBridge configuration)** Check latest version here: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../test/integ.s3.expected.json | 4 +- .../test/integ.notifications.expected.json | 2 +- .../integ.bucket-notifications.expected.json | 4 +- .../test/lambda/integ.bucket-notifications.ts | 2 +- ...teg.sns-bucket-notifications.expected.json | 2 +- .../integ.bucket-notifications.expected.json | 2 +- packages/@aws-cdk/aws-s3/README.md | 15 ++ packages/@aws-cdk/aws-s3/lib/bucket.ts | 17 ++ .../notifications-resource/lambda/index.py | 14 ++ .../notifications-resource.ts | 10 + .../integ.bucket.notifications.expected.json | 101 +++++++++ .../aws-s3/test/integ.bucket.notifications.ts | 14 ++ .../@aws-cdk/aws-s3/test/notification.test.ts | 18 ++ .../test_index.py | 203 ++++++++++++++++++ 14 files changed, 400 insertions(+), 8 deletions(-) create mode 100644 packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json create mode 100644 packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts diff --git a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json index ff71167d19f9e..bea3d9952fbc6 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json +++ b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json @@ -177,7 +177,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { @@ -195,4 +195,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json index 9026931306ab4..a467d8e2d5a04 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json @@ -247,7 +247,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json index 731effea95a53..71d8c5be9f84f 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json @@ -114,7 +114,7 @@ ] }, "Handler": "index.handler", - "Runtime": "nodejs10.x" + "Runtime": "nodejs14.x" }, "DependsOn": [ "MyFunctionServiceRole3C357FF2" @@ -237,7 +237,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts index c237b24e896e3..1493e29176362 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts +++ b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts @@ -12,7 +12,7 @@ const bucketA = new s3.Bucket(stack, 'MyBucket', { }); const fn = new lambda.Function(stack, 'MyFunction', { - runtime: lambda.Runtime.NODEJS_10_X, + runtime: lambda.Runtime.NODEJS_14_X, handler: 'index.handler', code: lambda.Code.fromInline(`exports.handler = ${handler.toString()}`), }); diff --git a/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json index 47f2a8ea6e0ce..c55da2fa8dcb2 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json @@ -195,7 +195,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json index 229b916beac4b..006856d6de207 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json @@ -184,7 +184,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3/README.md b/packages/@aws-cdk/aws-s3/README.md index 4f2592b9c633a..02c1b86be2855 100644 --- a/packages/@aws-cdk/aws-s3/README.md +++ b/packages/@aws-cdk/aws-s3/README.md @@ -252,6 +252,21 @@ bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination( [S3 Bucket Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +### EventBridge notifications + +Amazon S3 can send events to Amazon EventBridge whenever certain events happen in your bucket. +Unlike other destinations, you don't need to select which event types you want to deliver. + +The following example will enable EventBridge notifications: + +```ts +const bucket = new s3.Bucket(this, 'MyEventBridgeBucket', { + eventBridgeEnabled: true, +}); +``` + +[S3 EventBridge notifications]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html + ## Block Public Access Use `blockPublicAccess` to specify [block public access settings] on the bucket. diff --git a/packages/@aws-cdk/aws-s3/lib/bucket.ts b/packages/@aws-cdk/aws-s3/lib/bucket.ts index bb00aac5adde3..15e69d4610a70 100644 --- a/packages/@aws-cdk/aws-s3/lib/bucket.ts +++ b/packages/@aws-cdk/aws-s3/lib/bucket.ts @@ -865,6 +865,10 @@ export abstract class BucketBase extends Resource implements IBucket { return this.addEventNotification(EventType.OBJECT_REMOVED, dest, ...filters); } + protected enableEventBridgeNotification() { + this.notifications.enableEventBridgeNotification(); + } + private get writeActions(): string[] { return [ ...perms.BUCKET_DELETE_ACTIONS, @@ -1341,6 +1345,13 @@ export interface BucketProps { */ readonly versioned?: boolean; + /** + * Whether this bucket should send notifications to Amazon EventBridge or not. + * + * @default false + */ + readonly eventBridgeEnabled?: boolean; + /** * Rules that define how Amazon S3 manages objects during their lifetime. * @@ -1621,6 +1632,7 @@ export class Bucket extends BucketBase { private accessControl?: BucketAccessControl; private readonly lifecycleRules: LifecycleRule[] = []; private readonly versioned?: boolean; + private readonly eventBridgeEnabled?: boolean; private readonly metrics: BucketMetrics[] = []; private readonly cors: CorsRule[] = []; private readonly inventories: Inventory[] = []; @@ -1660,6 +1672,7 @@ export class Bucket extends BucketBase { this.versioned = props.versioned; this.encryptionKey = encryptionKey; + this.eventBridgeEnabled = props.eventBridgeEnabled; this.bucketName = this.getResourceNameAttribute(resource.ref); this.bucketArn = this.getResourceArnAttribute(resource.attrArn, { @@ -1710,6 +1723,10 @@ export class Bucket extends BucketBase { this.enableAutoDeleteObjects(); } + + if (this.eventBridgeEnabled) { + this.enableEventBridgeNotification(); + } } /** diff --git a/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py b/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py index 2551398d74958..d46989246e827 100644 --- a/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py +++ b/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py @@ -5,6 +5,8 @@ s3 = boto3.client("s3") +EVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration' + CONFIGURATION_TYPES = ["TopicConfigurations", "QueueConfigurations", "LambdaFunctionConfigurations"] def handler(event: dict, context): @@ -57,6 +59,13 @@ def with_id(notification): external = external_notifications.get(t, []) incoming = [with_id(n) for n in notification_configuration.get(t, [])] notifications[t] = external + incoming + + # EventBridge configuration is a special case because it's just an empty object if it exists + if EVENTBRIDGE_CONFIGURATION in notification_configuration: + notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION] + elif EVENTBRIDGE_CONFIGURATION in external_notifications: + notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION] + return notifications @@ -68,6 +77,11 @@ def find_external_notifications(bucket, stack_id): # so we can filter by it. external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f"{stack_id}-")] + # always treat EventBridge configuration as an external config if it already exists + # as there is no way to determine whether it's managed by us or not + if EVENTBRIDGE_CONFIGURATION in existing_notifications: + external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION] + return external_notifications diff --git a/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts b/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts index d5190f1a6a913..7ac760d006da9 100644 --- a/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts +++ b/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts @@ -31,6 +31,7 @@ interface NotificationsProps { * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html */ export class BucketNotifications extends Construct { + private eventBridgeEnabled = false; private readonly lambdaNotifications = new Array(); private readonly queueNotifications = new Array(); private readonly topicNotifications = new Array(); @@ -87,8 +88,14 @@ export class BucketNotifications extends Construct { } } + public enableEventBridgeNotification() { + this.createResourceOnce(); + this.eventBridgeEnabled = true; + } + private renderNotificationConfiguration(): NotificationConfiguration { return { + EventBridgeConfiguration: this.eventBridgeEnabled ? {} : undefined, LambdaFunctionConfigurations: this.lambdaNotifications.length > 0 ? this.lambdaNotifications : undefined, QueueConfigurations: this.queueNotifications.length > 0 ? this.queueNotifications : undefined, TopicConfigurations: this.topicNotifications.length > 0 ? this.topicNotifications : undefined, @@ -167,6 +174,7 @@ function renderFilters(filters?: NotificationKeyFilter[]): Filter | undefined { } interface NotificationConfiguration { + EventBridgeConfiguration?: EventBridgeConfiguration; LambdaFunctionConfigurations?: LambdaFunctionConfiguration[]; QueueConfigurations?: QueueConfiguration[]; TopicConfigurations?: TopicConfiguration[]; @@ -178,6 +186,8 @@ interface CommonConfiguration { Filter?: Filter } +interface EventBridgeConfiguration { } + interface LambdaFunctionConfiguration extends CommonConfiguration { LambdaFunctionArn: string; } diff --git a/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json new file mode 100644 index 0000000000000..4c3711e8018ef --- /dev/null +++ b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json @@ -0,0 +1,101 @@ +{ + "Resources": { + "MyEventBridgeBucketNotifications19C0453F": { + "Type": "Custom::S3BucketNotifications", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691", + "Arn" + ] + }, + "BucketName": { + "Ref": "MyEventBridgeBucket1ABD5C2A" + }, + "NotificationConfiguration": { + "EventBridgeConfiguration": {} + }, + "Managed": true + } + }, + "MyEventBridgeBucket1ABD5C2A": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:PutBucketNotification", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36", + "Roles": [ + { + "Ref": "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC" + } + ] + } + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", + "Code": { + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC", + "Arn" + ] + }, + "Runtime": "python3.7", + "Timeout": 300 + }, + "DependsOn": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36", + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC" + ] + } + } +} diff --git a/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts new file mode 100644 index 0000000000000..3180b491f6250 --- /dev/null +++ b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts @@ -0,0 +1,14 @@ +#!/usr/bin/env node +import * as cdk from '@aws-cdk/core'; +import * as s3 from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-s3-notifications'); + +new s3.Bucket(stack, 'MyEventBridgeBucket', { + eventBridgeEnabled: true, + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +app.synth(); diff --git a/packages/@aws-cdk/aws-s3/test/notification.test.ts b/packages/@aws-cdk/aws-s3/test/notification.test.ts index fbc8e1aa45a49..e3e1d81687100 100644 --- a/packages/@aws-cdk/aws-s3/test/notification.test.ts +++ b/packages/@aws-cdk/aws-s3/test/notification.test.ts @@ -122,4 +122,22 @@ describe('notification', () => { }), }, { suffix: '.png' }, { suffix: '.zip' })).toThrow(/suffix rule/); }); + + test('EventBridge notification custom resource', () => { + // GIVEN + const stack = new cdk.Stack(); + + // WHEN + new s3.Bucket(stack, 'MyBucket', { + eventBridgeEnabled: true, + }); + + // THEN + Template.fromStack(stack).resourceCountIs('AWS::S3::Bucket', 1); + Template.fromStack(stack).hasResourceProperties('Custom::S3BucketNotifications', { + NotificationConfiguration: { + EventBridgeConfiguration: {}, + }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py b/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py index 9127677b02675..ff79da80ef669 100644 --- a/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py +++ b/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py @@ -19,6 +19,8 @@ ) sys.exit(1) +EVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration' + CONFIGURATION_TYPES = ["TopicConfigurations", "QueueConfigurations", "LambdaFunctionConfigurations"] @@ -33,6 +35,16 @@ def make_event(request_type: str, managed: bool): }, } +def make_event_with_eventbridge(request_type: str, managed: bool): + return { + "StackId": "StackId", + "RequestType": request_type, + "ResourceProperties": { + "Managed": str(managed), + "BucketName": "BucketName", + "NotificationConfiguration": make_notification_configuration_with_eventbridge(), + }, + } def make_notification_configuration(id_prefix: str = None): def make_id(): @@ -43,6 +55,11 @@ def make_id(): config[t] = [{"Id": make_id()}] return config +def make_notification_configuration_with_eventbridge(id_prefix: str = None): + return {**make_notification_configuration(id_prefix), **make_eventbridge_configuration()} + +def make_eventbridge_configuration(): + return { EVENTBRIDGE_CONFIGURATION: {} } def make_empty_notification_configuration(): config = {} @@ -50,11 +67,21 @@ def make_empty_notification_configuration(): config[t] = [] return config +def make_empty_notification_configuration_with_eventbridge(): + return {**make_empty_notification_configuration(), **make_eventbridge_configuration()} + def merge_notification_configurations(conf1: Dict, conf2: Dict): notifications = {} for t in CONFIGURATION_TYPES: notifications[t] = conf1.get(t, []) + conf2.get(t, []) + + if EVENTBRIDGE_CONFIGURATION in conf1: + notifications[EVENTBRIDGE_CONFIGURATION] = conf1[EVENTBRIDGE_CONFIGURATION] + + if EVENTBRIDGE_CONFIGURATION in conf2: + notifications[EVENTBRIDGE_CONFIGURATION] = conf2[EVENTBRIDGE_CONFIGURATION] + return notifications @@ -113,6 +140,22 @@ def test_create(self, _, get: MagicMock, put: MagicMock): event["ResourceProperties"]["NotificationConfiguration"], ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + get.return_value = {} + + event = make_event_with_eventbridge("Create", False) + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + event["ResourceProperties"]["NotificationConfiguration"], + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -131,6 +174,46 @@ def test_update(self, _, get: MagicMock, put: MagicMock): event["ResourceProperties"]["NotificationConfiguration"], ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Update", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + event["ResourceProperties"]["NotificationConfiguration"], + ) + + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Update", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration_with_eventbridge(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + make_eventbridge_configuration(), + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -149,6 +232,24 @@ def test_delete(self, _, get: MagicMock, put: MagicMock): make_empty_notification_configuration(), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_delete_with_eventbridge_should_not_remove_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Delete", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration_with_eventbridge(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + make_empty_notification_configuration_with_eventbridge(), + ) + class UnmanagedDirtyBucketTest(unittest.TestCase): @patch("index.put_bucket_notification_configuration") @@ -172,6 +273,48 @@ def test_create(self, _, get: MagicMock, put: MagicMock): ), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Create", False) + + # simulate external notifications + current_notifications = make_notification_configuration() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Create", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -193,6 +336,48 @@ def test_update(self, _, get: MagicMock, put: MagicMock): ), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Update", False) + + # simulate external notifications + current_notifications = make_notification_configuration() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_without_eventbridge_should_not_remove_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Update", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -211,6 +396,24 @@ def test_delete(self, _, get: MagicMock, put: MagicMock): current_notifications, ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_delete_with_eventbridge_should_not_remove_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Delete", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + current_notifications, + ) + class CfnResponsesTest(unittest.TestCase): @patch("index.put_bucket_notification_configuration") From 304f5b6974f1121a8a5ff802076dffe2eff9f407 Mon Sep 17 00:00:00 2001 From: Choryu Park Date: Mon, 10 Jan 2022 22:00:31 +0900 Subject: [PATCH 11/40] feat(eks): cluster tagging (#4995) (#18109) Fixes #4995 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-eks/lib/cluster-resource.ts | 2 ++ packages/@aws-cdk/aws-eks/lib/cluster.ts | 9 ++++++++- .../aws-eks/test/integ.eks-cluster.expected.json | 3 +++ packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts | 3 +++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts index ed0852338a527..6a947380e3dd1 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts @@ -28,6 +28,7 @@ export interface ClusterResourceProps { readonly secretsEncryptionKey?: kms.IKey; readonly onEventLayer?: lambda.ILayerVersion; readonly clusterHandlerSecurityGroup?: ec2.ISecurityGroup; + readonly tags?: { [key: string]: string }; } /** @@ -89,6 +90,7 @@ export class ClusterResource extends CoreConstruct { endpointPrivateAccess: props.endpointPrivateAccess, publicAccessCidrs: props.publicAccessCidrs, }, + tags: props.tags, }, AssumeRoleArn: this.adminRole.roleArn, diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 2b917f8f93f7e..10a3f2123aac1 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -744,13 +744,19 @@ export interface ClusterProps extends ClusterOptions { */ readonly defaultCapacityType?: DefaultCapacityType; - /** * The IAM role to pass to the Kubectl Lambda Handler. * * @default - Default Lambda IAM Execution Role */ readonly kubectlLambdaRole?: iam.IRole; + + /** + * The tags assigned to the EKS cluster + * + * @default - none + */ + readonly tags?: { [key: string]: string }; } /** @@ -1372,6 +1378,7 @@ export class Cluster extends ClusterBase { subnets: placeClusterHandlerInVpc ? privateSubnets : undefined, clusterHandlerSecurityGroup: this.clusterHandlerSecurityGroup, onEventLayer: this.onEventLayer, + tags: props.tags, }); if (this.endpointAccess._config.privateAccess && privateSubnets.length !== 0) { diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json index 00ab6f9f6fe3c..7b5af8f848f2e 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json @@ -951,6 +951,9 @@ ], "endpointPublicAccess": true, "endpointPrivateAccess": true + }, + "tags": { + "foo": "bar" } }, "AssumeRoleArn": { diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts index 5cbc0e384eb17..8dd012b3e2d66 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts @@ -38,6 +38,9 @@ class EksClusterStack extends TestStack { defaultCapacity: 2, version: eks.KubernetesVersion.V1_21, secretsEncryptionKey, + tags: { + foo: 'bar', + }, }); this.assertFargateProfile(); From d94d9cec69a14006398c70a8c152bc21324d3d44 Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Mon, 10 Jan 2022 15:28:22 +0100 Subject: [PATCH 12/40] docs(core): errors will fail deployment, not synthesis (#18342) Fix inaccurate information in the documentation. Closes #18317 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/core/lib/annotations.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/@aws-cdk/core/lib/annotations.ts b/packages/@aws-cdk/core/lib/annotations.ts index f46c830c25757..03fb7a99bd80a 100644 --- a/packages/@aws-cdk/core/lib/annotations.ts +++ b/packages/@aws-cdk/core/lib/annotations.ts @@ -44,7 +44,7 @@ export class Annotations { /** * Adds an { "error": } metadata entry to this construct. - * The toolkit will fail synthesis when errors are reported. + * The toolkit will fail deployment of any stack that has errors reported against it. * @param message The error message. */ public addError(message: string) { From 225668050caef9bfdaa25b8ae984d3886108397f Mon Sep 17 00:00:00 2001 From: Cory Hall <43035978+corymhall@users.noreply.github.com> Date: Mon, 10 Jan 2022 10:16:19 -0500 Subject: [PATCH 13/40] feat(cli): diff now uses the lookup Role for new-style synthesis (#18277) This PR exposes information on the bootstrap lookup role on the CloudFormation stack artifact. This enables the CLI to assume the lookup role during cli operations in order to lookup information in the stack account. Along with the ARN of the lookup role, this also exposes a `requiresBootstrapStackVersion` property which is set to `8` (the version the lookup role was given ReadOnlyAccess), and the `bootstrapStackVersionSsmParameter` which is needed to lookup the bootstrap version if a user has renamed the bootstrap stack. This allows us to first check whether the lookupRole exists and has the correct permissions prior to using it. This also updates the `diff` capability in the CLI (run as part of `cdk diff` or `cdk deploy`) to use this new functionality. It now will try to assume the lookupRole and if it doesn't exist or if the bootstrap stack version is not valid, then it will fallback to using the deployRole (what it uses currently). This PR also updates the `forEnvironment` function to return whether or not it is returning the default credentials. This allows the calling function to decide whether or not it actually wants to use the default credentials. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../lib/cloud-assembly/artifact-schema.ts | 40 +++ .../schema/cloud-assembly.schema.json | 31 ++- .../schema/cloud-assembly.version.json | 2 +- .../stack-synthesizers/default-synthesizer.ts | 33 +++ .../stack-synthesizers/stack-synthesizer.ts | 8 + .../lib/artifacts/cloudformation-artifact.ts | 8 + .../aws-cdk/lib/api/aws-auth/sdk-provider.ts | 39 ++- .../lib/api/bootstrap/deploy-bootstrap.ts | 2 +- .../lib/api/cloudformation-deployments.ts | 127 +++++++++- .../aws-cdk/lib/api/hotswap-deployments.ts | 2 +- packages/aws-cdk/lib/api/toolkit-info.ts | 39 ++- packages/aws-cdk/lib/context-providers/ami.ts | 2 +- .../context-providers/availability-zones.ts | 2 +- .../endpoint-service-availability-zones.ts | 2 +- .../lib/context-providers/hosted-zones.ts | 2 +- .../aws-cdk/lib/context-providers/keys.ts | 4 +- .../lib/context-providers/load-balancers.ts | 4 +- .../lib/context-providers/security-groups.ts | 2 +- .../lib/context-providers/ssm-parameters.ts | 2 +- .../aws-cdk/lib/context-providers/vpcs.ts | 2 +- packages/aws-cdk/lib/util/asset-publishing.ts | 4 +- .../api/cloudformation-deployments.test.ts | 2 +- .../aws-cdk/test/api/sdk-provider.test.ts | 22 +- packages/aws-cdk/test/cdk-toolkit.test.ts | 237 ++++++++++++++++++ .../context-providers/load-balancers.test.ts | 6 +- .../aws-cdk/test/util/cloudformation.test.ts | 2 +- packages/aws-cdk/test/util/mock-sdk.ts | 6 +- .../aws-cdk/test/util/mock-toolkitinfo.ts | 4 +- 28 files changed, 569 insertions(+), 67 deletions(-) diff --git a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts index 9bf124c31c71d..4d98b3a29bb32 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts +++ b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts @@ -1,4 +1,37 @@ +/** + * Information needed to access an IAM role created + * as part of the bootstrap process + */ +export interface BootstrapRole { + /** + * The ARN of the IAM role created as part of bootrapping + * e.g. lookupRoleArn + */ + readonly arn: string; + + /** + * External ID to use when assuming the bootstrap role + * + * @default - No external ID + */ + readonly assumeRoleExternalId?: string; + + /** + * Version of bootstrap stack required to use this role + * + * @default - No bootstrap stack required + */ + readonly requiresBootstrapStackVersion?: number; + + /** + * Name of SSM parameter with bootstrap stack version + * + * @default - Discover SSM parameter by reading stack + */ + readonly bootstrapStackVersionSsmParameter?: string; +} + /** * Artifact properties for CloudFormation stacks. */ @@ -56,6 +89,13 @@ export interface AwsCloudFormationStackProperties { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + readonly lookupRole?: BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json index 9241ae62ef0ff..5fe1f4fb4321a 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json @@ -307,6 +307,10 @@ "description": "The role that is passed to CloudFormation to execute the change set (Default - No role is passed (currently assumed role/credentials are used))", "type": "string" }, + "lookupRole": { + "description": "The role to use to look up values from the target AWS account (Default - No role is assumed (current credentials are used))", + "$ref": "#/definitions/BootstrapRole" + }, "stackTemplateAssetObjectUrl": { "description": "If the stack template has already been included in the asset manifest, its asset URL (Default - Not uploaded yet, upload just before deploying)", "type": "string" @@ -328,6 +332,31 @@ "templateFile" ] }, + "BootstrapRole": { + "description": "Information needed to access an IAM role created\nas part of the bootstrap process", + "type": "object", + "properties": { + "arn": { + "description": "The ARN of the IAM role created as part of bootrapping\ne.g. lookupRoleArn", + "type": "string" + }, + "assumeRoleExternalId": { + "description": "External ID to use when assuming the bootstrap role (Default - No external ID)", + "type": "string" + }, + "requiresBootstrapStackVersion": { + "description": "Version of bootstrap stack required to use this role (Default - No bootstrap stack required)", + "type": "number" + }, + "bootstrapStackVersionSsmParameter": { + "description": "Name of SSM parameter with bootstrap stack version (Default - Discover SSM parameter by reading stack)", + "type": "string" + } + }, + "required": [ + "arn" + ] + }, "AssetManifestProperties": { "description": "Artifact properties for the Asset Manifest", "type": "object", @@ -598,7 +627,7 @@ } }, "returnAsymmetricSubnets": { - "description": "Whether to populate the subnetGroups field of the {@link VpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", + "description": "Whether to populate the subnetGroups field of the{@linkVpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", "default": false, "type": "boolean" }, diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json index 01d4f111912e9..5bdbc9d33c3b3 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json @@ -1 +1 @@ -{"version":"15.0.0"} \ No newline at end of file +{"version":"16.0.0"} \ No newline at end of file diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts index d8e1f8818abc4..ed537e496128a 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts @@ -21,6 +21,12 @@ export const BOOTSTRAP_QUALIFIER_CONTEXT = '@aws-cdk/core:bootstrapQualifier'; */ const MIN_BOOTSTRAP_STACK_VERSION = 6; +/** + * The minimum bootstrap stack version required + * to use the lookup role. + */ +const MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION = 8; + /** * Configuration properties for DefaultStackSynthesizer */ @@ -91,6 +97,25 @@ export interface DefaultStackSynthesizerProps { */ readonly lookupRoleArn?: string; + /** + * External ID to use when assuming lookup role + * + * @default - No external ID + */ + readonly lookupRoleExternalId?: string; + + /** + * Use the bootstrapped lookup role for (read-only) stack operations + * + * Use the lookup role when performing a `cdk diff`. If set to `false`, the + * `deploy role` credentials will be used to perform a `cdk diff`. + * + * Requires bootstrap stack version 8. + * + * @default true + */ + readonly useLookupRoleForStackOperations?: boolean; + /** * External ID to use when assuming role for image asset publishing * @@ -269,6 +294,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { private fileAssetPublishingRoleArn?: string; private imageAssetPublishingRoleArn?: string; private lookupRoleArn?: string; + private useLookupRoleForStackOperations: boolean; private qualifier?: string; private bucketPrefix?: string; private dockerTagPrefix?: string; @@ -279,6 +305,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { constructor(private readonly props: DefaultStackSynthesizerProps = {}) { super(); + this.useLookupRoleForStackOperations = props.useLookupRoleForStackOperations ?? true; for (const key in props) { if (props.hasOwnProperty(key)) { @@ -453,6 +480,12 @@ export class DefaultStackSynthesizer extends StackSynthesizer { requiresBootstrapStackVersion: MIN_BOOTSTRAP_STACK_VERSION, bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, additionalDependencies: [artifactId], + lookupRole: this.useLookupRoleForStackOperations && this.lookupRoleArn ? { + arn: this.lookupRoleArn, + assumeRoleExternalId: this.props.lookupRoleExternalId, + requiresBootstrapStackVersion: MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION, + bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, + } : undefined, }); } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts index 3b283eaae24ce..ea7c7745f2419 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts @@ -1,3 +1,4 @@ +import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; import { ISynthesisSession } from '../construct-compat'; import { Stack } from '../stack'; @@ -100,6 +101,13 @@ export interface SynthesizeStackArtifactOptions { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - None + */ + readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts index 225f256e85f5f..66fc309a2593c 100644 --- a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts +++ b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts @@ -75,6 +75,13 @@ export class CloudFormationStackArtifact extends CloudArtifact { */ public readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + public readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * @@ -135,6 +142,7 @@ export class CloudFormationStackArtifact extends CloudArtifact { this.bootstrapStackVersionSsmParameter = properties.bootstrapStackVersionSsmParameter; this.terminationProtection = properties.terminationProtection; this.validateOnSynth = properties.validateOnSynth; + this.lookupRole = properties.lookupRole; this.stackName = properties.stackName || artifactId; this.assets = this.findMetadataByType(cxschema.ArtifactMetadataEntryType.ASSET).map(e => e.data as cxschema.AssetMetadataEntry); diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts index 0da0b027bbc65..ad2af9f62ef61 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts @@ -77,6 +77,33 @@ export interface SdkHttpOptions { const CACHED_ACCOUNT = Symbol('cached_account'); const CACHED_DEFAULT_CREDENTIALS = Symbol('cached_default_credentials'); +/** + * SDK configuration for a given environment + * 'forEnvironment' will attempt to assume a role and if it + * is not successful, then it will either: + * 1. Check to see if the default credentials (local credentials the CLI was executed with) + * are for the given environment. If they are then return those. + * 2. If the default credentials are not for the given environment then + * throw an error + * + * 'didAssumeRole' allows callers to whether they are receiving the assume role + * credentials or the default credentials. + */ +export interface SdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + /** * Creates instances of the AWS SDK appropriate for a given account/region. * @@ -140,7 +167,11 @@ export class SdkProvider { * * The `environment` parameter is resolved first (see `resolveEnvironment()`). */ - public async forEnvironment(environment: cxapi.Environment, mode: Mode, options?: CredentialsOptions): Promise { + public async forEnvironment( + environment: cxapi.Environment, + mode: Mode, + options?: CredentialsOptions, + ): Promise { const env = await this.resolveEnvironment(environment); const baseCreds = await this.obtainBaseCredentials(env.account, mode); @@ -151,7 +182,7 @@ export class SdkProvider { // account. if (options?.assumeRoleArn === undefined) { if (baseCreds.source === 'incorrectDefault') { throw new Error(fmtObtainCredentialsError(env.account, baseCreds)); } - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } // We will proceed to AssumeRole using whatever we've been given. @@ -161,7 +192,7 @@ export class SdkProvider { // we can determine whether the AssumeRole call succeeds or not. try { await sdk.forceCredentialRetrieval(); - return sdk; + return { sdk, didAssumeRole: true }; } catch (e) { // AssumeRole failed. Proceed and warn *if and only if* the baseCredentials were already for the right account // or returned from a plugin. This is to cover some current setups for people using plugins or preferring to @@ -170,7 +201,7 @@ export class SdkProvider { if (baseCreds.source === 'correctDefault' || baseCreds.source === 'plugin') { debug(e.message); warning(`${fmtObtainedCredentials(baseCreds)} could not be used to assume '${options.assumeRoleArn}', but are for the right account. Proceeding anyway.`); - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } throw e; diff --git a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts index 49f97e71332c3..8cece9d8eed30 100644 --- a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts +++ b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts @@ -27,7 +27,7 @@ export class BootstrapStack { toolkitStackName = toolkitStackName ?? DEFAULT_TOOLKIT_STACK_NAME; const resolvedEnvironment = await sdkProvider.resolveEnvironment(environment); - const sdk = await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting); + const sdk = (await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting)).sdk; const currentToolkitInfo = await ToolkitInfo.lookup(resolvedEnvironment, sdk, toolkitStackName); diff --git a/packages/aws-cdk/lib/api/cloudformation-deployments.ts b/packages/aws-cdk/lib/api/cloudformation-deployments.ts index fb7c5410faf3d..c461c5ac24dc5 100644 --- a/packages/aws-cdk/lib/api/cloudformation-deployments.ts +++ b/packages/aws-cdk/lib/api/cloudformation-deployments.ts @@ -1,9 +1,9 @@ import * as cxapi from '@aws-cdk/cx-api'; import { AssetManifest } from 'cdk-assets'; import { Tag } from '../cdk-toolkit'; -import { debug } from '../logging'; +import { debug, warning } from '../logging'; import { publishAssets } from '../util/asset-publishing'; -import { Mode, SdkProvider } from './aws-auth'; +import { Mode, SdkProvider, ISDK } from './aws-auth'; import { deployStack, DeployStackResult, destroyStack } from './deploy-stack'; import { ToolkitInfo } from './toolkit-info'; import { CloudFormationStack, Template } from './util/cloudformation'; @@ -171,6 +171,54 @@ export interface ProvisionerProps { sdkProvider: SdkProvider; } +/** + * SDK obtained by assuming the lookup role + * for a given environment + */ +export interface PreparedSdkWithLookupRoleForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + +/** + * SDK obtained by assuming the deploy role + * for a given environment + */ +export interface PreparedSdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly stackSdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + /** + * The Execution Role that should be passed to CloudFormation. + * + * @default - no execution role is used + */ + readonly cloudFormationRoleArn?: string; +} + /** * Helper class for CloudFormation deployments * @@ -186,7 +234,19 @@ export class CloudFormationDeployments { public async readCurrentTemplate(stackArtifact: cxapi.CloudFormationStackArtifact): Promise