From 253d4230c9e0b705b207eb883f58a69e94ef8165 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 17:21:16 +0100 Subject: [PATCH 1/9] Add TypedBatchProcessingAspectHandler and related tests for typed record handling --- .../TypedBatchProcessingAspectHandler.cs | 130 +++++++++ ...atchProcessorAttributeTypedHandlerTests.cs | 247 ++++++++++++++++++ .../TypedHandlerAttributeSimpleTest.cs | 76 ++++++ 3 files changed, 453 insertions(+) create mode 100644 libraries/src/AWS.Lambda.Powertools.BatchProcessing/Internal/TypedBatchProcessingAspectHandler.cs create mode 100644 libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs create mode 100644 libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Internal/TypedBatchProcessingAspectHandler.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Internal/TypedBatchProcessingAspectHandler.cs new file mode 100644 index 000000000..e319cb9b1 --- /dev/null +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Internal/TypedBatchProcessingAspectHandler.cs @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System; +using System.Linq; +using System.Reflection; +using System.Threading.Tasks; +using Amazon.Lambda.Core; + +namespace AWS.Lambda.Powertools.BatchProcessing.Internal; + +internal class TypedBatchProcessingAspectHandler : IBatchProcessingAspectHandler +{ + private readonly ITypedBatchProcessor _typedBatchProcessor; + private readonly object _typedHandler; + private readonly bool _hasContext; + private readonly DeserializationOptions _deserializationOptions; + private readonly ProcessingOptions _processingOptions; + + public TypedBatchProcessingAspectHandler( + ITypedBatchProcessor typedBatchProcessor, + object typedHandler, + bool hasContext, + DeserializationOptions deserializationOptions, + ProcessingOptions processingOptions) + { + _typedBatchProcessor = typedBatchProcessor; + _typedHandler = typedHandler; + _hasContext = hasContext; + _deserializationOptions = deserializationOptions; + _processingOptions = processingOptions; + } + + public async Task HandleAsync(object[] args) + { + // Try get event from args + if (args?.FirstOrDefault() is not TEvent @event) + { + throw new InvalidOperationException($"The first function handler parameter must be of type: '{typeof(TEvent).Namespace}'."); + } + + // Get Lambda context if available and needed + ILambdaContext context = null; + if (_hasContext && args.Length > 1 && args[1] is ILambdaContext lambdaContext) + { + context = lambdaContext; + } + + // Use reflection to call the appropriate ProcessAsync method on the typed batch processor + await CallTypedProcessAsync(@event, context); + } + + private async Task CallTypedProcessAsync(TEvent @event, ILambdaContext context) + { + // Get the generic type argument from the handler + var handlerType = _typedHandler.GetType(); + var handlerInterface = handlerType.GetInterfaces() + .FirstOrDefault(i => i.IsGenericType && + (i.GetGenericTypeDefinition() == typeof(ITypedRecordHandler<>) || + i.GetGenericTypeDefinition() == typeof(ITypedRecordHandlerWithContext<>))); + + if (handlerInterface == null) + { + throw new InvalidOperationException($"Handler type '{handlerType.Name}' does not implement ITypedRecordHandler or ITypedRecordHandlerWithContext."); + } + + var dataType = handlerInterface.GetGenericArguments()[0]; + + // Find the appropriate ProcessAsync method on the typed batch processor + MethodInfo processMethod; + if (_hasContext && context != null) + { + // Look for ProcessAsync(TEvent, ITypedRecordHandlerWithContext, ILambdaContext, DeserializationOptions, ProcessingOptions) + processMethod = _typedBatchProcessor.GetType().GetMethods() + .FirstOrDefault(m => m.Name == "ProcessAsync" && + m.IsGenericMethodDefinition && + m.GetParameters().Length == 5 && + m.GetParameters()[1].ParameterType.IsGenericType && + m.GetParameters()[1].ParameterType.GetGenericTypeDefinition() == typeof(ITypedRecordHandlerWithContext<>) && + m.GetParameters()[2].ParameterType == typeof(ILambdaContext) && + m.GetParameters()[3].ParameterType == typeof(DeserializationOptions) && + m.GetParameters()[4].ParameterType == typeof(ProcessingOptions)); + } + else + { + // Look for ProcessAsync(TEvent, ITypedRecordHandler, DeserializationOptions, ProcessingOptions) + processMethod = _typedBatchProcessor.GetType().GetMethods() + .FirstOrDefault(m => m.Name == "ProcessAsync" && + m.IsGenericMethodDefinition && + m.GetParameters().Length == 4 && + m.GetParameters()[1].ParameterType.IsGenericType && + m.GetParameters()[1].ParameterType.GetGenericTypeDefinition() == typeof(ITypedRecordHandler<>) && + m.GetParameters()[2].ParameterType == typeof(DeserializationOptions) && + m.GetParameters()[3].ParameterType == typeof(ProcessingOptions)); + } + + if (processMethod == null) + { + throw new InvalidOperationException($"Could not find appropriate ProcessAsync method on typed batch processor for handler type '{handlerType.Name}'."); + } + + // Make the method generic with the data type + var genericProcessMethod = processMethod.MakeGenericMethod(dataType); + + // Call the method + Task processTask; + if (_hasContext && context != null) + { + processTask = (Task)genericProcessMethod.Invoke(_typedBatchProcessor, new object[] { @event, _typedHandler, context, _deserializationOptions, _processingOptions }); + } + else + { + processTask = (Task)genericProcessMethod.Invoke(_typedBatchProcessor, new object[] { @event, _typedHandler, _deserializationOptions, _processingOptions }); + } + + await processTask; + } +} \ No newline at end of file diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs new file mode 100644 index 000000000..3945803f7 --- /dev/null +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs @@ -0,0 +1,247 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Lambda.Core; +using Amazon.Lambda.SQSEvents; +using AWS.Lambda.Powertools.BatchProcessing.Sqs; +using NSubstitute; +using Xunit; + +namespace AWS.Lambda.Powertools.BatchProcessing.Tests; + +/// +/// Tests for BatchProcessorAttribute with typed handlers. +/// +public class BatchProcessorAttributeTypedHandlerTests +{ + /// + /// Test data class for testing typed record handlers. + /// + public class Order + { + public string Id { get; set; } + public string Name { get; set; } + public decimal Amount { get; set; } + public List Products { get; set; } = new(); + } + + /// + /// Test typed record handler for Order. + /// + public class OrderRecordHandler : ITypedRecordHandler + { + public async Task HandleAsync(Order order, CancellationToken cancellationToken) + { + // Simulate processing + if (order.Id == "fail") + { + throw new ArgumentException("Simulated failure"); + } + + return await Task.FromResult(RecordHandlerResult.None); + } + } + + /// + /// Test typed record handler with context for Order. + /// + public class OrderRecordHandlerWithContext : ITypedRecordHandlerWithContext + { + public async Task HandleAsync(Order order, ILambdaContext context, CancellationToken cancellationToken) + { + // Simulate processing with context + if (order.Id == "fail") + { + throw new ArgumentException("Simulated failure"); + } + + return await Task.FromResult(RecordHandlerResult.None); + } + } + + /// + /// Test function using typed record handler attribute. + /// + public class TestFunction + { + [BatchProcessor(TypedRecordHandler = typeof(OrderRecordHandler))] + public BatchItemFailuresResponse ProcessOrdersWithTypedHandler(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(TypedRecordHandlerWithContext = typeof(OrderRecordHandlerWithContext))] + public BatchItemFailuresResponse ProcessOrdersWithTypedHandlerAndContext(SQSEvent sqsEvent, ILambdaContext context) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + } + + [Fact] + public void ProcessOrdersWithTypedHandler_ValidOrders_ProcessesSuccessfully() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Test Order\",\"Amount\":99.99,\"Products\":[\"Product A\",\"Product B\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "2", + Body = "{\"Id\":\"order-2\",\"Name\":\"Another Order\",\"Amount\":149.99,\"Products\":[\"Product C\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunction(); + + // Act + var result = function.ProcessOrdersWithTypedHandler(sqsEvent); + + // Assert + Assert.NotNull(result); + Assert.Empty(result.BatchItemFailures); + } + + [Fact] + public void ProcessOrdersWithTypedHandler_OneFailure_ReportsPartialFailure() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Test Order\",\"Amount\":99.99,\"Products\":[\"Product A\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "2", + Body = "{\"Id\":\"fail\",\"Name\":\"Failing Order\",\"Amount\":0,\"Products\":[]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunction(); + + // Act + var result = function.ProcessOrdersWithTypedHandler(sqsEvent); + + // Assert + Assert.NotNull(result); + Assert.Single(result.BatchItemFailures); + Assert.Equal("2", result.BatchItemFailures[0].ItemIdentifier); + } + + [Fact] + public void ProcessOrdersWithTypedHandlerAndContext_ValidOrders_ProcessesSuccessfully() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Test Order\",\"Amount\":99.99,\"Products\":[\"Product A\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var mockContext = Substitute.For(); + var function = new TestFunction(); + + // Act + var result = function.ProcessOrdersWithTypedHandlerAndContext(sqsEvent, mockContext); + + // Assert + Assert.NotNull(result); + Assert.Empty(result.BatchItemFailures); + } + + [Fact] + public void ProcessOrdersWithTypedHandlerAndContext_OneFailure_ReportsPartialFailure() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"fail\",\"Name\":\"Failing Order\",\"Amount\":0,\"Products\":[]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var mockContext = Substitute.For(); + var function = new TestFunction(); + + // Act + var result = function.ProcessOrdersWithTypedHandlerAndContext(sqsEvent, mockContext); + + // Assert + Assert.NotNull(result); + Assert.Single(result.BatchItemFailures); + Assert.Equal("1", result.BatchItemFailures[0].ItemIdentifier); + } + + [Fact] + public void ProcessOrdersWithTypedHandler_InvalidJson_HandlesDeserializationError() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "invalid json", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunction(); + + // Act + var result = function.ProcessOrdersWithTypedHandler(sqsEvent); + + // Assert + Assert.NotNull(result); + Assert.Single(result.BatchItemFailures); + Assert.Equal("1", result.BatchItemFailures[0].ItemIdentifier); + } +} \ No newline at end of file diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs new file mode 100644 index 000000000..3be547795 --- /dev/null +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Lambda.SQSEvents; +using AWS.Lambda.Powertools.BatchProcessing.Sqs; +using Xunit; + +namespace AWS.Lambda.Powertools.BatchProcessing.Tests; + +/// +/// Simple test to verify typed handler attribute works. +/// +public class TypedHandlerAttributeSimpleTest +{ + public class SimpleOrder + { + public string Id { get; set; } + public string Name { get; set; } + } + + public class SimpleOrderHandler : ITypedRecordHandler + { + public async Task HandleAsync(SimpleOrder order, CancellationToken cancellationToken) + { + return await Task.FromResult(RecordHandlerResult.None); + } + } + + public class TestFunction + { + [BatchProcessor(TypedRecordHandler = typeof(SimpleOrderHandler))] + public BatchItemFailuresResponse ProcessOrders(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + } + + [Fact] + public void TypedHandlerAttribute_BasicTest_DoesNotThrowException() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Test Order\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunction(); + + // Act & Assert - Should not throw the NotSupportedException anymore + var result = function.ProcessOrders(sqsEvent); + Assert.NotNull(result); + } +} \ No newline at end of file From 80cfa8df60cec3ed3616ece8e0e48fdde8d5fbc3 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 17:21:30 +0100 Subject: [PATCH 2/9] feat: Enhance BatchProcessorAttribute to support typed handlers - Updated BatchProcessorAttribute to allow configuration of typed handlers for DynamoDB, Kinesis, and SQS events. - Introduced CreateTypedBatchProcessingAspectHandler method to handle typed batch processing. - Added validation for typed handler configurations. - Implemented singleton instances for TypedDynamoDbStreamBatchProcessor, TypedKinesisEventBatchProcessor, and TypedSqsBatchProcessor. - Refactored constructors to accept Powertools configurations and default services for typed processors. --- docs/utilities/batch-processing.md | 3079 ++++++++--------- .../BatchProcessorAttribute.cs | 153 +- .../TypedDynamoDbStreamBatchProcessor.cs | 42 +- .../TypedKinesisEventBatchProcessor.cs | 42 +- .../Sqs/TypedSqsBatchProcessor.cs | 42 +- 5 files changed, 1809 insertions(+), 1549 deletions(-) diff --git a/docs/utilities/batch-processing.md b/docs/utilities/batch-processing.md index fde2df6f0..3b30e057e 100644 --- a/docs/utilities/batch-processing.md +++ b/docs/utilities/batch-processing.md @@ -1,1544 +1,1537 @@ ---- -title: Batch Processing -description: Utility ---- - -The batch processing utility handles partial failures when processing batches from Amazon SQS, Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. - -```mermaid -stateDiagram-v2 - direction LR - BatchSource: Amazon SQS

Amazon Kinesis Data Streams

Amazon DynamoDB Streams

- LambdaInit: Lambda invocation - BatchProcessor: Batch Processor - RecordHandler: Record Handler function - YourLogic: Your logic to process each batch item - LambdaResponse: Lambda response - - BatchSource --> LambdaInit - - LambdaInit --> BatchProcessor - BatchProcessor --> RecordHandler - - state BatchProcessor { - [*] --> RecordHandler: Your function - RecordHandler --> YourLogic - } - - RecordHandler --> BatchProcessor: Collect results - BatchProcessor --> LambdaResponse: Report items that failed processing -``` - -## Key features - -- Reports batch item failures to reduce number of retries for a record upon errors -- Simple interface to process each batch record -- Typed batch processing with automatic deserialization -- Lambda context injection for typed handlers -- AOT (Ahead-of-Time) compilation support - -- Bring your own batch processor -- Parallel processing - -## Background - -When using SQS, Kinesis Data Streams, or DynamoDB Streams as a Lambda event source, your Lambda functions are triggered with a batch of messages. - -If your function fails to process any message from the batch, the entire batch returns to your queue or stream. This same batch is then retried until either condition happens first: **a)** your Lambda function returns a successful response, **b)** record reaches maximum retry attempts, or **c)** when records expire. - -```mermaid -journey - section Conditions - Successful response: 5: Success - Maximum retries: 3: Failure - Records expired: 1: Failure -``` - -This behavior changes when you enable Report Batch Item Failures feature in your Lambda function event source configuration: - -- [**SQS queues**](#sqs-standard). Only messages reported as failure will return to the queue for a retry, while successful ones will be deleted. -- [**Kinesis data streams**](#kinesis-and-dynamodb-streams) and [**DynamoDB streams**](#kinesis-and-dynamodb-streams). Single reported failure will use its sequence number as the stream checkpoint. Multiple reported failures will use the lowest sequence number as checkpoint. - - - -???+ warning "Warning: This utility lowers the chance of processing records more than once; it does not guarantee it" - We recommend implementing processing logic in an [idempotent manner](idempotency.md){target="_blank"} wherever possible. - - You can find more details on how Lambda works with either [SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html){target="_blank"}, [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html){target="_blank"}, or [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html){target="_blank"} in the AWS Documentation. - -!!! warning "Migrating to v3" - - If you're upgrading to v3, please review the [Migration Guide v3](../migration-guide-v3.md) for important breaking changes including .NET 8 requirement and AWS SDK v4 migration. - -## Installation - -You should install with NuGet: - -```powershell -Install-Package AWS.Lambda.Powertools.BatchProcessing -``` - -Or via the .NET Core command line interface: - -```bash -dotnet add package AWS.Lambda.Powertools.BatchProcessing -``` - -## Getting started - -For this feature to work, you need to **(1)** configure your Lambda function event source to use `ReportBatchItemFailures`, and **(2)** return [a specific response](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank" rel="nofollow"} to report which records failed to be processed. - -You use your preferred deployment framework to set the correct configuration while this utility handles the correct response to be returned. - -Batch processing can be configured with the settings bellow: - -| Setting | Description | Environment variable | Default | -| ------------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------------- | ----------------- | -| **Error Handling Policy** | The error handling policy to apply during batch processing. | `POWERTOOLS_BATCH_ERROR_HANDLING_POLICY` | `DeriveFromEvent` | -| **Parallel Enabled** | Controls if parallel processing of batch items is enabled. | `POWERTOOLS_BATCH_PARALLEL_ENABLED` | `false` | -| **Max Degree of Parallelism** | The maximum degree of parallelism to apply if parallel processing is enabled. | `POWERTOOLS_BATCH_MAX_DEGREE_OF_PARALLELISM` | `1` | -| **Throw on Full Batch Failure** | Controls if a `BatchProcessingException` is thrown on full batch failure. | `POWERTOOLS_BATCH_THROW_ON_FULL_BATCH_FAILURE` | `true` | - -### Required resources - -The remaining sections of the documentation will rely on these samples. For completeness, this demonstrates IAM permissions and Dead Letter Queue where batch records will be sent after 2 retries were attempted. - -!!! note "You do not need any additional IAM permissions to use this utility, except for what each event source requires." - -=== "SQS" - - ```yaml title="template.yaml" hl_lines="93-94" - --8<-- "docs/snippets/batch/templates/sqs.yaml" - ``` - -=== "Kinesis Data Streams" - - ```yaml title="template.yaml" hl_lines="109-110" - --8<-- "docs/snippets/batch/templates/kinesis.yaml" - ``` - -=== "DynamoDB Streams" - - ```yaml title="template.yaml" hl_lines="102-103" - --8<-- "docs/snippets/batch/templates/dynamodb.yaml" - ``` - -### Processing messages from SQS - -#### Using Typed Handler decorator (Recommended) - -Processing batches from SQS using typed Lambda handler decorator with automatic deserialization works in four stages: - -1. Define your data model class -2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method -3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedSqsBatchProcessor.Result.BatchItemFailuresResponse`** - - -=== "Function.cs" - - ```csharp hl_lines="1 8 19 29 32" - public class Product - { - public int Id { get; set; } - public string? Name { get; set; } - public decimal Price { get; set; } - } - - public class TypedSqsRecordHandler : ITypedRecordHandler // (1)! - { - public async Task HandleAsync(Product product, CancellationToken cancellationToken) - { - /* - * Your business logic with automatic deserialization. - * If an exception is thrown, the item will be marked as a partial batch item failure. - */ - - Logger.LogInformation($"Processing product {product.Id} - {product.Name} (${product.Price})"); - - if (product.Id == 4) // (2)! - { - throw new ArgumentException("Error on id 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - - } - - [BatchProcessor(TypedRecordHandler = typeof(TypedSqsRecordHandler))] - public BatchItemFailuresResponse HandlerUsingTypedAttribute(SQSEvent _) - { - return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Product is automatically deserialized from SQS message body. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 4. **Step 4**. Lambda function returns the Partial batch response using TypedSqsBatchProcessor - -=== "Sample event" - - ```json - { - "Records": [ - { - "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"id\": 1, \"name\": \"Laptop Computer\", \"price\": 999.99}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "7b270e59b47ff90a553787216d55d91d", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"id\": 4, \"name\": \"Invalid Product\", \"price\": -10.00}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "7b270e59b47ff90a553787216d55d92e", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - } - ] - } - - ``` - -=== "Sample response" - - The second record failed to be processed, therefore the processor added its message ID in the response. - - ```json - { - "batchItemFailures": [ - { - "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" - } - ] - } - ``` - -#### Using Handler decorator (Traditional) - -Processing batches from SQS using Lambda handler decorator works in three stages: - -1. Decorate your handler with **`BatchProcessor`** attribute -2. Create a class that implements **`ISqsRecordHandler`** interface and the HandleAsync method. -3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`SqsBatchProcessor.Result.BatchItemFailuresResponse`** - -=== "Function.cs" - - ```csharp hl_lines="1 12 22 17 25" - public class CustomSqsRecordHandler : ISqsRecordHandler // (1)! - { - public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) - { - /* - * Your business logic. - * If an exception is thrown, the item will be marked as a partial batch item failure. - */ - - var product = JsonSerializer.Deserialize(record.Body); - - if (product.Id == 4) // (2)! - { - throw new ArgumentException("Error on id 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - - } - - [BatchProcessor(RecordHandler = typeof(CustomSqsRecordHandler))] - public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) - { - return SqsBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements ISqsRecordHandler interface and the HandleAsync method. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 3. **Step 4**. Lambda function returns the Partial batch response - -=== "Sample event" - - ```json - { - "Records": [ - { - "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "fail", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - ] - } - - ``` - -=== "Sample response" - - The second record failed to be processed, therefore the processor added its message ID in the response. - - ```json - { - "batchItemFailures": [ - { - "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" - }, - { - "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" - } - ] - } - ``` - -#### FIFO queues - -When using [SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html){target="_blank"}, we will stop processing messages after the first failure, and return all failed and unprocessed messages in `batchItemFailures`. -This helps preserve the ordering of messages in your queue. Powertools automatically detects a FIFO queue. - - - - - -### Processing messages from Kinesis - -#### Using Typed Handler decorator (Recommended) - -Processing batches from Kinesis using typed Lambda handler decorator with automatic deserialization works in four stages: - -1. Define your data model class -2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method -3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedKinesisEventBatchProcessor.Result.BatchItemFailuresResponse`** - -=== "Function.cs" - - ```csharp hl_lines="1 9 15 20 24 27" - public class Order - { - public string? OrderId { get; set; } - public DateTime OrderDate { get; set; } - public List Items { get; set; } = new(); - public decimal TotalAmount { get; set; } - } - - internal class TypedKinesisRecordHandler : ITypedRecordHandler // (1)! - { - public async Task HandleAsync(Order order, CancellationToken cancellationToken) - { - Logger.LogInformation($"Processing order {order.OrderId} with {order.Items.Count} items"); - - if (order.TotalAmount <= 0) // (2)! - { - throw new ArgumentException("Invalid order total"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - } - - [BatchProcessor(TypedRecordHandler = typeof(TypedKinesisRecordHandler))] - public BatchItemFailuresResponse HandlerUsingTypedAttribute(KinesisEvent _) - { - return TypedKinesisEventBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Order is automatically deserialized from Kinesis record data. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 4. **Step 4**. Lambda function returns the Partial batch response using TypedKinesisEventBatchProcessor - -#### Using Handler decorator (Traditional) - -Processing batches from Kinesis using Lambda handler decorator works in three stages: - -1. Decorate your handler with **`BatchProcessor`** attribute -2. Create a class that implements **`IKinesisEventRecordHandler`** interface and the HandleAsync method. -3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`KinesisEventBatchProcessor.Result.BatchItemFailuresResponse`** - -=== "Function.cs" - - ```csharp hl_lines="1 7 12 17 20" - internal class CustomKinesisEventRecordHandler : IKinesisEventRecordHandler // (1)! - { - public async Task HandleAsync(KinesisEvent.KinesisEventRecord record, CancellationToken cancellationToken) - { - var product = JsonSerializer.Deserialize(record.Kinesis.Data); - - if (product.Id == 4) // (2)! - { - throw new ArgumentException("Error on id 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - } - - - [BatchProcessor(RecordHandler = typeof(CustomKinesisEventRecordHandler))] - public BatchItemFailuresResponse HandlerUsingAttribute(KinesisEvent _) - { - return KinesisEventBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements the IKinesisEventRecordHandler interface and the HandleAsync method. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 3. **Step 4**. Lambda function returns the Partial batch response - -=== "Sample event" - - ```json - { - "Records": [ - { - "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "fail", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - ] - } - - ``` - -=== "Sample response" - - The second record failed to be processed, therefore the processor added its message ID in the response. - - ```json - { - "batchItemFailures": [ - { - "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" - }, - { - "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" - } - ] - } - ``` - -### Processing messages from DynamoDB - -#### Using Typed Handler decorator (Recommended) - -Processing batches from DynamoDB Streams using typed Lambda handler decorator with automatic deserialization works in four stages: - -1. Define your data model class -2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method -3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedDynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse`** - -=== "Function.cs" - - ```csharp hl_lines="1 9 15 20 24 27" - public class Customer - { - public string? CustomerId { get; set; } - public string? Name { get; set; } - public string? Email { get; set; } - public DateTime CreatedAt { get; set; } - } - - internal class TypedDynamoDbRecordHandler : ITypedRecordHandler // (1)! - { - public async Task HandleAsync(Customer customer, CancellationToken cancellationToken) - { - Logger.LogInformation($"Processing customer {customer.CustomerId} - {customer.Name}"); - - if (string.IsNullOrEmpty(customer.Email)) // (2)! - { - throw new ArgumentException("Customer email is required"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - } - - [BatchProcessor(TypedRecordHandler = typeof(TypedDynamoDbRecordHandler))] - public BatchItemFailuresResponse HandlerUsingTypedAttribute(DynamoDBEvent _) - { - return TypedDynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Customer is automatically deserialized from DynamoDB stream record. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 4. **Step 4**. Lambda function returns the Partial batch response using TypedDynamoDbStreamBatchProcessor - -#### Using Handler decorator (Traditional) - -Processing batches from DynamoDB Streams using Lambda handler decorator works in three stages: - -1. Decorate your handler with **`BatchProcessor`** attribute -2. Create a class that implements **`IDynamoDbStreamRecordHandler`** and the HandleAsync method. -3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute -4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse`** - -=== "Function.cs" - - ```csharp hl_lines="1 7 12 17 20" - internal class CustomDynamoDbStreamRecordHandler : IDynamoDbStreamRecordHandler // (1)! - { - public async Task HandleAsync(DynamoDBEvent.DynamodbStreamRecord record, CancellationToken cancellationToken) - { - var product = JsonSerializer.Deserialize(record.Dynamodb.NewImage["Product"].S); - - if (product.Id == 4) // (2)! - { - throw new ArgumentException("Error on id 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - } - - - [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler))] - public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) - { - return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; // (4)! - } - - ``` - - 1. **Step 1**. Creates a class that implements the IDynamoDbStreamRecordHandler and the HandleAsync method. - 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail - 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. - 3. **Step 4**. Lambda function returns the Partial batch response - -=== "Sample event" - - ```json - { - "Records": [ - { - "eventID": "1", - "eventVersion": "1.0", - "dynamodb": { - "Keys": { - "Id": { - "N": "101" - } - }, - "NewImage": { - "Product": { - "S": "{\"Id\":1,\"Name\":\"product-name\",\"Price\":14}" - } - }, - "StreamViewType": "NEW_AND_OLD_IMAGES", - "SequenceNumber": "3275880929", - "SizeBytes": 26 - }, - "awsRegion": "us-west-2", - "eventName": "INSERT", - "eventSourceARN": "eventsource_arn", - "eventSource": "aws:dynamodb" - }, - { - "eventID": "1", - "eventVersion": "1.0", - "dynamodb": { - "Keys": { - "Id": { - "N": "101" - } - }, - "NewImage": { - "Product": { - "S": "fail" - } - }, - "StreamViewType": "NEW_AND_OLD_IMAGES", - "SequenceNumber": "8640712661", - "SizeBytes": 26 - }, - "awsRegion": "us-west-2", - "eventName": "INSERT", - "eventSourceARN": "eventsource_arn", - "eventSource": "aws:dynamodb" - } - ] - } - - ``` - -=== "Sample response" - - The second record failed to be processed, therefore the processor added its message ID in the response. - - ```json - { - "batchItemFailures": [ - { - "itemIdentifier": "8640712661" - } - ] - } - ``` - -### Error handling - -By default, we catch any exception raised by your custom record handler HandleAsync method (ISqsRecordHandler, IKinesisEventRecordHandler, IDynamoDbStreamRecordHandler). -This allows us to **(1)** continue processing the batch, **(2)** collect each batch item that failed processing, and **(3)** return the appropriate response correctly without failing your Lambda function execution. - -=== "Function.cs" - - ```csharp hl_lines="14" - public class CustomSqsRecordHandler : ISqsRecordHandler // (1)! - { - public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) - { - /* - * Your business logic. - * If an exception is thrown, the item will be marked as a partial batch item failure. - */ - - var product = JsonSerializer.Deserialize(record.Body); - - if (product.Id == 4) // (2)! - { - throw new ArgumentException("Error on id 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); // (3)! - } - - } - - ``` - -=== "Sample event" - - ```json - { - "Records": [ - { - "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "fail", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - { - "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", - "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", - "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", - "attributes": { - "ApproximateReceiveCount": "1", - "SentTimestamp": "1545082649183", - "SenderId": "SENDER_ID", - "ApproximateFirstReceiveTimestamp": "1545082649185" - }, - "messageAttributes": {}, - "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", - "awsRegion": "us-east-1" - }, - ] - } - - ``` - -=== "Sample response" - - The second record failed to be processed, therefore the processor added its message ID in the response. - - ```json - { - "batchItemFailures": [ - { - "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" - }, - { - "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" - } - ] - } - ``` - -#### Error Handling Policy - -You can specify the error handling policy applied during batch processing. - -`ErrorHandlingPolicy` is used to control the error handling policy of the batch item processing. -With a value of `DeriveFromEvent` (default), the specific BatchProcessor, determines the policy based on the incoming event. - -For example, the `SqsBatchProcessor` looks at the EventSourceArn to determine if the ErrorHandlingPolicy should be `StopOnFirstBatchItemFailure` (for FIFO queues) or `ContinueOnBatchItemFailure` (for standard queues). -For `StopOnFirstBatchItemFailure` the batch processor stops processing and marks any remaining records as batch item failures. -For `ContinueOnBatchItemFailure` the batch processor continues processing batch items regardless of item failures. - -| Policy | Description | -| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | -| **DeriveFromEvent** | Auto-derive the policy based on the event. | -| **ContinueOnBatchItemFailure** | Continue processing regardless of whether other batch items fails during processing. | -| **StopOnFirstBatchItemFailure** | Stop processing other batch items after the first batch item has failed processing. This is useful to preserve ordered processing of events. | - -!!! note - - When using **StopOnFirstBatchItemFailure** and parallel processing is enabled, all batch items already scheduled to be processed, will be allowed to complete before the batch processing stops. - - Therefore, if order is important, it is recommended to use sequential (non-parallel) processing together with this value." - -To change the default error handling policy, you can set the **`POWERTOOLS_BATCH_ERROR_HANDLING_POLICY`** Environment Variable. - -Another approach is to decorate the handler and use one of the policies in the **`ErrorHandlingPolicy`** Enum property of the **`BatchProcessor`** attribute - -=== "Function.cs" - - ```csharp hl_lines="2" - [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler), - ErrorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure)] - public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) - { - return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; - } - - ``` - -### Partial failure mechanics - -All records in the batch will be passed to this handler for processing, even if exceptions are thrown - Here's the behaviour after completing the batch: - -- **All records successfully processed**. We will return an empty list of item failures `{'batchItemFailures': []}`. -- **Partial success with some exceptions**. We will return a list of all item IDs/sequence numbers that failed processing. -- **All records failed to be processed**. By defaullt, we will throw a `BatchProcessingException` with a list of all exceptions raised during processing to reflect the failure in your operational metrics. However, in some scenarios, this might not be desired. See [Working with full batch failures](#working-with-full-batch-failures) for more information. - -The following sequence diagrams explain how each Batch processor behaves under different scenarios. - -#### SQS Standard - -> Read more about [Batch Failure Reporting feature in AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank"}. - -Sequence diagram to explain how [`BatchProcessor` works](#processing-messages-from-sqs) with SQS Standard queues. - -
-```mermaid -sequenceDiagram - autonumber - participant SQS queue - participant Lambda service - participant Lambda function - Lambda service->>SQS queue: Poll - Lambda service->>Lambda function: Invoke (batch event) - Lambda function->>Lambda service: Report some failed messages - activate SQS queue - Lambda service->>SQS queue: Delete successful messages - SQS queue-->>SQS queue: Failed messages return - Note over SQS queue,Lambda service: Process repeat - deactivate SQS queue -``` -SQS mechanism with Batch Item Failures -
- -#### SQS FIFO - -> Read more about [Batch Failure Reporting feature in AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank"}. - -Sequence diagram to explain how [`SqsFifoPartialProcessor` works](#fifo-queues) with SQS FIFO queues. - -
-```mermaid -sequenceDiagram - autonumber - participant SQS queue - participant Lambda service - participant Lambda function - Lambda service->>SQS queue: Poll - Lambda service->>Lambda function: Invoke (batch event) - activate Lambda function - Lambda function-->Lambda function: Process 2 out of 10 batch items - Lambda function--xLambda function: Fail on 3rd batch item - Lambda function->>Lambda service: Report 3rd batch item and unprocessed messages as failure - deactivate Lambda function - activate SQS queue - Lambda service->>SQS queue: Delete successful messages (1-2) - SQS queue-->>SQS queue: Failed messages return (3-10) - deactivate SQS queue -``` -SQS FIFO mechanism with Batch Item Failures -
- -#### Kinesis and DynamoDB Streams - -> Read more about [Batch Failure Reporting feature](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-batchfailurereporting){target="_blank"}. - -Sequence diagram to explain how `BatchProcessor` works with both [Kinesis Data Streams](#processing-messages-from-kinesis) and [DynamoDB Streams](#processing-messages-from-dynamodb). - -For brevity, we will use `Streams` to refer to either services. For theory on stream checkpoints, see this [blog post](https://aws.amazon.com/blogs/compute/optimizing-batch-processing-with-custom-checkpoints-in-aws-lambda/){target="_blank"} - -
-```mermaid -sequenceDiagram - autonumber - participant Streams - participant Lambda service - participant Lambda function - Lambda service->>Streams: Poll latest records - Lambda service->>Lambda function: Invoke (batch event) - activate Lambda function - Lambda function-->Lambda function: Process 2 out of 10 batch items - Lambda function--xLambda function: Fail on 3rd batch item - Lambda function-->Lambda function: Continue processing batch items (4-10) - Lambda function->>Lambda service: Report batch item as failure (3) - deactivate Lambda function - activate Streams - Lambda service->>Streams: Checkpoints to sequence number from 3rd batch item - Lambda service->>Streams: Poll records starting from updated checkpoint - deactivate Streams -``` -Kinesis and DynamoDB streams mechanism with single batch item failure -
- -The behavior changes slightly when there are multiple item failures. Stream checkpoint is updated to the lowest sequence number reported. - -!!! important "Note that the batch item sequence number could be different from batch item number in the illustration." - -
-```mermaid -sequenceDiagram - autonumber - participant Streams - participant Lambda service - participant Lambda function - Lambda service->>Streams: Poll latest records - Lambda service->>Lambda function: Invoke (batch event) - activate Lambda function - Lambda function-->Lambda function: Process 2 out of 10 batch items - Lambda function--xLambda function: Fail on 3-5 batch items - Lambda function-->Lambda function: Continue processing batch items (6-10) - Lambda function->>Lambda service: Report batch items as failure (3-5) - deactivate Lambda function - activate Streams - Lambda service->>Streams: Checkpoints to lowest sequence number - Lambda service->>Streams: Poll records starting from updated checkpoint - deactivate Streams -``` -Kinesis and DynamoDB streams mechanism with multiple batch item failures -
- -## Typed Batch Processing Advanced Features - -### AOT (Ahead-of-Time) Compilation Support - -For Native AOT scenarios, you can configure JsonSerializerContext: - -=== "JsonSerializerContext Configuration" - - ```csharp - [JsonSerializable(typeof(Product))] - [JsonSerializable(typeof(Order))] - [JsonSerializable(typeof(Customer))] - [JsonSerializable(typeof(List))] - [JsonSourceGenerationOptions( - PropertyNamingPolicy = JsonKnownNamingPolicy.CamelCase, - WriteIndented = false, - DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull)] - public partial class MyJsonSerializerContext : JsonSerializerContext - { - } - ``` - -=== "Using with Attribute" - - ```csharp hl_lines="2 3" - [BatchProcessor( - TypedRecordHandler = typeof(TypedSqsRecordHandler), - JsonSerializerContext = typeof(MyJsonSerializerContext))] - public BatchItemFailuresResponse ProcessWithAot(SQSEvent sqsEvent) - { - return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - - - -### Lambda Context Injection - -For typed handlers that need access to Lambda context, use `ITypedRecordHandlerWithContext`: - -=== "Handler with Context" - - ```csharp hl_lines="1 3" - public class ProductHandlerWithContext : ITypedRecordHandlerWithContext - { - public async Task HandleAsync(Product product, ILambdaContext context, CancellationToken cancellationToken) - { - Logger.LogInformation($"Processing product {product.Id} in request {context.AwsRequestId}"); - Logger.LogInformation($"Remaining time: {context.RemainingTime.TotalSeconds}s"); - - // Use context for timeout handling - if (context.RemainingTime.TotalSeconds < 5) - { - Logger.LogWarning("Low remaining time, processing quickly"); - } - - return RecordHandlerResult.None; - } - } - ``` - -=== "Function Usage" - - ```csharp hl_lines="1 2" - [BatchProcessor(TypedRecordHandler = typeof(ProductHandlerWithContext))] - public BatchItemFailuresResponse ProcessWithContext(SQSEvent sqsEvent, ILambdaContext context) - { - return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -### Migration from Traditional to Typed Handlers - -You can gradually migrate from traditional to typed handlers: - -=== "Before (Traditional)" - - ```csharp hl_lines="1 6" - public class TraditionalSqsHandler : ISqsRecordHandler - { - public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) - { - // Manual deserialization - var product = JsonSerializer.Deserialize(record.Body); - - Logger.LogInformation($"Processing product {product.Id}"); - - if (product.Price < 0) - throw new ArgumentException("Invalid price"); - - return RecordHandlerResult.None; - } - } - - [BatchProcessor(RecordHandler = typeof(TraditionalSqsHandler))] - public BatchItemFailuresResponse ProcessSqs(SQSEvent sqsEvent) - { - return SqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -=== "After (Typed)" - - ```csharp hl_lines="1 5" - public class TypedSqsHandler : ITypedRecordHandler - { - public async Task HandleAsync(Product product, CancellationToken cancellationToken) - { - // Automatic deserialization - product is already deserialized! - Logger.LogInformation($"Processing product {product.Id}"); - - // Same business logic - if (product.Price < 0) - throw new ArgumentException("Invalid price"); - - return RecordHandlerResult.None; - } - } - - [BatchProcessor(TypedRecordHandler = typeof(TypedSqsHandler))] - public BatchItemFailuresResponse ProcessSqs(SQSEvent sqsEvent) - { - return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -### Error Handling with Typed Processors - -Typed processors support the same error handling policies as traditional processors: - -=== "Custom Error Handling" - - ```csharp hl_lines="2" - [BatchProcessor( - TypedRecordHandler = typeof(TypedSqsHandler), - ErrorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure)] - public BatchItemFailuresResponse ProcessWithErrorPolicy(SQSEvent sqsEvent) - { - return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -### Advanced - -#### Using utility outside handler and IoC - -You can use Batch processing without using the decorator. - -Calling the **`ProcessAsync`** method on the Instance of the static BatchProcessor (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) - -=== "Function.cs" - - ```csharp hl_lines="3" - public async Task HandlerUsingUtility(DynamoDBEvent dynamoDbEvent) - { - var result = await DynamoDbStreamBatchProcessor.Instance.ProcessAsync(dynamoDbEvent, RecordHandler.From(record => - { - var product = JsonSerializer.Deserialize(record.Dynamodb.NewImage["Product"].S); - - if (product.GetProperty("Id").GetInt16() == 4) - { - throw new ArgumentException("Error on 4"); - } - })); - return result.BatchItemFailuresResponse; - } - - ``` - -To make the handler testable you can use Dependency Injection to resolve the BatchProcessor (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) instance and then call the **`ProcessAsync`** method. - -=== "GetRequiredService inside the method" - - ```csharp hl_lines="3 4 5" - public async Task HandlerUsingUtilityFromIoc(DynamoDBEvent dynamoDbEvent) - { - var batchProcessor = Services.Provider.GetRequiredService(); - var recordHandler = Services.Provider.GetRequiredService(); - var result = await batchProcessor.ProcessAsync(dynamoDbEvent, recordHandler); - return result.BatchItemFailuresResponse; - } - - ``` - -=== "Injecting method parameters" - - ```csharp hl_lines="2 4" - public async Task HandlerUsingUtilityFromIoc(DynamoDBEvent dynamoDbEvent, - IDynamoDbStreamBatchProcessor batchProcessor, IDynamoDbStreamRecordHandler recordHandler) - { - var result = await batchProcessor.ProcessAsync(dynamoDbEvent, recordHandler); - return result.BatchItemFailuresResponse; - } - - ``` - -=== "Example implementation of IServiceProvider" - - ```csharp hl_lines="16 17" - internal class Services - { - private static readonly Lazy LazyInstance = new(Build); - - private static ServiceCollection _services; - public static IServiceProvider Provider => LazyInstance.Value; - - public static IServiceProvider Init() - { - return LazyInstance.Value; - } - - private static IServiceProvider Build() - { - _services = new ServiceCollection(); - _services.AddScoped(); - _services.AddScoped(); - return _services.BuildServiceProvider(); - } - } - - ``` - -#### Processing messages in parallel - -You can set the `POWERTOOLS_BATCH_PARALLEL_ENABLED` Environment Variable to `true` or set the property `BatchParallelProcessingEnabled` on the Lambda decorator to process messages concurrently. - -You can also set `POWERTOOLS_BATCH_MAX_DEGREE_OF_PARALLELISM` Environment Variable to the number of parallelism you which. - -!!! note - - MaxDegreeOfParallelism is used to control the parallelism of the batch item processing. - - With a value of 1, the processing is done sequentially (default). Sequential processing is recommended when preserving order is important - i.e. with SQS FIFIO queues. - - With a value > 1, the processing is done in parallel. Doing parallel processing can enable processing to complete faster, i.e., when processing does downstream service calls. - - With a value of -1, the parallelism is automatically configured to be the vCPU count of the Lambda function. Internally, the Batch Processing Utility utilizes Parallel.ForEachAsync Method and the ParallelOptions.MaxDegreeOfParallelism Property to enable this functionality. - -???+ question "When is this useful?" - Your use case might be able to process multiple records at the same time without conflicting with one another. - - For example, imagine you need to process multiple loyalty points and incrementally save in a database. While you await the database to confirm your records are saved, you could start processing another request concurrently. - - The reason this is not the default behaviour is that not all use cases can handle concurrency safely (e.g., loyalty points must be updated in order). - -=== "Function.cs" - - ```csharp hl_lines="1" - [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler), BatchParallelProcessingEnabled = true )] - public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) - { - return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -#### Working with full batch failures - -By default, the `BatchProcessor` will throw a `BatchProcessingException` if all records in the batch fail to process. We do this to reflect the failure in your operational metrics. - -When working with functions that handle batches with a small number of records, or when you use errors as a flow control mechanism, this behavior might not be desirable as your function might generate an unnaturally high number of errors. When this happens, the [Lambda service will scale down the concurrency of your function](https://docs.aws.amazon.com/lambda/latest/dg/services-sqs-errorhandling.html#services-sqs-backoff-strategy){target="_blank"}, potentially impacting performance. - -For these scenarios, you can set `POWERTOOLS_BATCH_THROW_ON_FULL_BATCH_FAILURE = false`, or the equivalent on either the `BatchProcessor` decorator or on the `ProcessingOptions` object. See examples below. - -=== "Setting ThrowOnFullBatchFailure on Decorator" - - ```csharp hl_lines="3" - [BatchProcessor( - RecordHandler = typeof(CustomSqsRecordHandler), - ThrowOnFullBatchFailure = false)] - public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) - { - return SqsBatchProcessor.Result.BatchItemFailuresResponse; - } - - ``` - -=== "Setting ThrowOnFullBatchFailure outside Decorator" - - ```csharp hl_lines="8" - public async Task HandlerUsingUtility(SQSEvent sqsEvent) - { - var result = await SqsBatchProcessor.Instance.ProcessAsync(sqsEvent, RecordHandler.From(x => - { - // Inline handling of SQS message... - }), new ProcessingOptions - { - ThrowOnFullBatchFailure = false - }); - return result.BatchItemFailuresResponse; - } - - ``` - -#### Extending BatchProcessor - -You might want to bring custom logic to the existing `BatchProcessor` to slightly override how we handle successes and failures. - -For these scenarios, you can create a class that inherits from `BatchProcessor` (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) and quickly override `ProcessAsync` and `HandleRecordFailureAsync` methods: - -- **`ProcessAsync()`** – Keeps track of successful batch records -- **`HandleRecordFailureAsync()`** – Keeps track of failed batch records - -???+ example - Let's suppose you'd like to add a metric named `BatchRecordFailures` for each batch record that failed processing. And also override the default error handling policy to stop on first item failure. - -=== "Function.cs" - - ```csharp hl_lines="1 21 54 97" - - public class CustomDynamoDbStreamBatchProcessor : DynamoDbStreamBatchProcessor - { - public override async Task> ProcessAsync(DynamoDBEvent @event, - IRecordHandler recordHandler, ProcessingOptions processingOptions) - { - ProcessingResult = new ProcessingResult(); - - // Prepare batch records (order is preserved) - var batchRecords = GetRecordsFromEvent(@event).Select(x => new KeyValuePair(GetRecordId(x), x)) - .ToArray(); - - // We assume all records fail by default to avoid loss of data - var failureBatchRecords = batchRecords.Select(x => new KeyValuePair>(x.Key, - new RecordFailure - { - Exception = new UnprocessedRecordException($"Record: '{x.Key}' has not been processed."), - Record = x.Value - })); - - // Override to fail on first failure - var errorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure; - - var successRecords = new Dictionary>(); - var failureRecords = new Dictionary>(failureBatchRecords); - - try - { - foreach (var pair in batchRecords) - { - var (recordId, record) = pair; - - try - { - var result = await HandleRecordAsync(record, recordHandler, CancellationToken.None); - failureRecords.Remove(recordId, out _); - successRecords.TryAdd(recordId, new RecordSuccess - { - Record = record, - RecordId = recordId, - HandlerResult = result - }); - } - catch (Exception ex) - { - // Capture exception - failureRecords[recordId] = new RecordFailure - { - Exception = new RecordProcessingException( - $"Failed processing record: '{recordId}'. See inner exception for details.", ex), - Record = record, - RecordId = recordId - }; - - Metrics.AddMetric("BatchRecordFailures", 1, MetricUnit.Count); - - try - { - // Invoke hook - await HandleRecordFailureAsync(record, ex); - } - catch - { - // NOOP - } - - // Check if we should stop record processing on first error - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - if (errorHandlingPolicy == BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure) - { - // This causes the loop's (inner) cancellation token to be cancelled for all operations already scheduled internally - throw new CircuitBreakerException( - "Error handling policy is configured to stop processing on first batch item failure. See inner exception for details.", - ex); - } - } - } - } - catch (Exception ex) when (ex is CircuitBreakerException or OperationCanceledException) - { - // NOOP - } - - ProcessingResult.BatchRecords.AddRange(batchRecords.Select(x => x.Value)); - ProcessingResult.BatchItemFailuresResponse.BatchItemFailures.AddRange(failureRecords.Select(x => - new BatchItemFailuresResponse.BatchItemFailure - { - ItemIdentifier = x.Key - })); - ProcessingResult.FailureRecords.AddRange(failureRecords.Values); - - ProcessingResult.SuccessRecords.AddRange(successRecords.Values); - - return ProcessingResult; - } - - // ReSharper disable once RedundantOverriddenMember - protected override async Task HandleRecordFailureAsync(DynamoDBEvent.DynamodbStreamRecord record, Exception exception) - { - await base.HandleRecordFailureAsync(record, exception); - } - } - ``` - -## Testing your code - -### Testing Typed Handlers - -Testing typed batch processors is straightforward since you work directly with your data models: - -=== "Typed Handler Test" - - ```csharp - [Fact] - public async Task TypedHandler_ValidProduct_ProcessesSuccessfully() - { - // Arrange - var product = new Product { Id = 1, Name = "Test Product", Price = 10.99m }; - var handler = new TypedSqsRecordHandler(); - var cancellationToken = CancellationToken.None; - - // Act - var result = await handler.HandleAsync(product, cancellationToken); - - // Assert - Assert.Equal(RecordHandlerResult.None, result); - } - - [Fact] - public async Task TypedHandler_InvalidProduct_ThrowsException() - { - // Arrange - var product = new Product { Id = 4, Name = "Invalid", Price = -10 }; - var handler = new TypedSqsRecordHandler(); - - // Act & Assert - await Assert.ThrowsAsync(() => - handler.HandleAsync(product, CancellationToken.None)); - } - ``` - -=== "Integration Test" - - ```csharp - [Fact] - public async Task ProcessSqsEvent_WithTypedHandler_ProcessesAllRecords() - { - // Arrange - var sqsEvent = new SQSEvent - { - Records = new List - { - new() { - MessageId = "1", - Body = JsonSerializer.Serialize(new Product { Id = 1, Name = "Product 1", Price = 10 }), - EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:my-queue" - }, - new() { - MessageId = "2", - Body = JsonSerializer.Serialize(new Product { Id = 2, Name = "Product 2", Price = 20 }), - EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:my-queue" - } - } - }; - - var function = new TypedFunction(); - - // Act - var result = function.HandlerUsingTypedAttribute(sqsEvent); - - // Assert - Assert.Empty(result.BatchItemFailures); - } - ``` - -### Testing Traditional Handlers - -As there is no external calls, you can unit test your code with `BatchProcessor` quite easily. - -=== "Test.cs" - - ```csharp - [Fact] - public Task Sqs_Handler_Using_Attribute() - { - var request = new SQSEvent - { - Records = TestHelper.SqsMessages - }; - - var function = new HandlerFunction(); - - var response = function.HandlerUsingAttribute(request); - - Assert.Equal(2, response.BatchItemFailures.Count); - Assert.Equal("2", response.BatchItemFailures[0].ItemIdentifier); - Assert.Equal("4", response.BatchItemFailures[1].ItemIdentifier); - - return Task.CompletedTask; - } - ``` - -=== "Function.cs" - - ```csharp - [BatchProcessor(RecordHandler = typeof(CustomSqsRecordHandler))] - public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) - { - return SqsBatchProcessor.Result.BatchItemFailuresResponse; - } - ``` - -=== "CustomSqsRecordHandler.cs" - - ```csharp - public class CustomSqsRecordHandler : ISqsRecordHandler - { - public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) - { - var product = JsonSerializer.Deserialize(record.Body); - - if (product.GetProperty("Id").GetInt16() == 4) - { - throw new ArgumentException("Error on 4"); - } - - return await Task.FromResult(RecordHandlerResult.None); - } - } - ``` - -=== "SQS Event.cs" - - ```csharp - internal static List SqsMessages => new() - { - new SQSEvent.SQSMessage - { - MessageId = "1", - Body = "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", - EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" - }, - new SQSEvent.SQSMessage - { - MessageId = "2", - Body = "fail", - EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" - }, - new SQSEvent.SQSMessage - { - MessageId = "3", - Body = "{\"Id\":3,\"Name\":\"product-4\",\"Price\":14}", - EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" - }, - new SQSEvent.SQSMessage - { - MessageId = "4", - Body = "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", - EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" - }, - new SQSEvent.SQSMessage - { - MessageId = "5", - Body = "{\"Id\":5,\"Name\":\"product-4\",\"Price\":14}", - EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" - }, - }; - ``` - -## Complete Examples and Documentation - -The [BatchProcessing example](https://github.com/aws-powertools/powertools-lambda-dotnet/tree/develop/examples/BatchProcessing){target="\_blank"} contains complete working examples: - -- **TypedFunction.cs** - Complete examples using all typed batch processing patterns -- **TypedHandlers/** - Example implementations for SQS, Kinesis, and DynamoDB +--- +title: Batch Processing +description: Utility +--- + +The batch processing utility handles partial failures when processing batches from Amazon SQS, Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. + +```mermaid +stateDiagram-v2 + direction LR + BatchSource: Amazon SQS

Amazon Kinesis Data Streams

Amazon DynamoDB Streams

+ LambdaInit: Lambda invocation + BatchProcessor: Batch Processor + RecordHandler: Record Handler function + YourLogic: Your logic to process each batch item + LambdaResponse: Lambda response + + BatchSource --> LambdaInit + + LambdaInit --> BatchProcessor + BatchProcessor --> RecordHandler + + state BatchProcessor { + [*] --> RecordHandler: Your function + RecordHandler --> YourLogic + } + + RecordHandler --> BatchProcessor: Collect results + BatchProcessor --> LambdaResponse: Report items that failed processing +``` + +## Key features + +- Reports batch item failures to reduce number of retries for a record upon errors +- Simple interface to process each batch record +- Typed batch processing with automatic deserialization +- Lambda context injection for typed handlers +- AOT (Ahead-of-Time) compilation support + +- Bring your own batch processor +- Parallel processing + +## Background + +When using SQS, Kinesis Data Streams, or DynamoDB Streams as a Lambda event source, your Lambda functions are triggered with a batch of messages. + +If your function fails to process any message from the batch, the entire batch returns to your queue or stream. This same batch is then retried until either condition happens first: **a)** your Lambda function returns a successful response, **b)** record reaches maximum retry attempts, or **c)** when records expire. + +```mermaid +journey + section Conditions + Successful response: 5: Success + Maximum retries: 3: Failure + Records expired: 1: Failure +``` + +This behavior changes when you enable Report Batch Item Failures feature in your Lambda function event source configuration: + +- [**SQS queues**](#sqs-standard). Only messages reported as failure will return to the queue for a retry, while successful ones will be deleted. +- [**Kinesis data streams**](#kinesis-and-dynamodb-streams) and [**DynamoDB streams**](#kinesis-and-dynamodb-streams). Single reported failure will use its sequence number as the stream checkpoint. Multiple reported failures will use the lowest sequence number as checkpoint. + + + +???+ warning "Warning: This utility lowers the chance of processing records more than once; it does not guarantee it" + We recommend implementing processing logic in an [idempotent manner](idempotency.md){target="_blank"} wherever possible. + + You can find more details on how Lambda works with either [SQS](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html){target="_blank"}, [Kinesis](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html){target="_blank"}, or [DynamoDB](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html){target="_blank"} in the AWS Documentation. + +!!! warning "Migrating to v3" + + If you're upgrading to v3, please review the [Migration Guide v3](../migration-guide-v3.md) for important breaking changes including .NET 8 requirement and AWS SDK v4 migration. + +## Installation + +You should install with NuGet: + +```powershell +Install-Package AWS.Lambda.Powertools.BatchProcessing +``` + +Or via the .NET Core command line interface: + +```bash +dotnet add package AWS.Lambda.Powertools.BatchProcessing +``` + +## Getting started + +For this feature to work, you need to **(1)** configure your Lambda function event source to use `ReportBatchItemFailures`, and **(2)** return [a specific response](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank" rel="nofollow"} to report which records failed to be processed. + +You use your preferred deployment framework to set the correct configuration while this utility handles the correct response to be returned. + +Batch processing can be configured with the settings bellow: + +| Setting | Description | Environment variable | Default | +| ------------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------------- | ----------------- | +| **Error Handling Policy** | The error handling policy to apply during batch processing. | `POWERTOOLS_BATCH_ERROR_HANDLING_POLICY` | `DeriveFromEvent` | +| **Parallel Enabled** | Controls if parallel processing of batch items is enabled. | `POWERTOOLS_BATCH_PARALLEL_ENABLED` | `false` | +| **Max Degree of Parallelism** | The maximum degree of parallelism to apply if parallel processing is enabled. | `POWERTOOLS_BATCH_MAX_DEGREE_OF_PARALLELISM` | `1` | +| **Throw on Full Batch Failure** | Controls if a `BatchProcessingException` is thrown on full batch failure. | `POWERTOOLS_BATCH_THROW_ON_FULL_BATCH_FAILURE` | `true` | + +### Required resources + +The remaining sections of the documentation will rely on these samples. For completeness, this demonstrates IAM permissions and Dead Letter Queue where batch records will be sent after 2 retries were attempted. + +!!! note "You do not need any additional IAM permissions to use this utility, except for what each event source requires." + +=== "SQS" + + ```yaml title="template.yaml" hl_lines="93-94" + --8<-- "docs/snippets/batch/templates/sqs.yaml" + ``` + +=== "Kinesis Data Streams" + + ```yaml title="template.yaml" hl_lines="109-110" + --8<-- "docs/snippets/batch/templates/kinesis.yaml" + ``` + +=== "DynamoDB Streams" + + ```yaml title="template.yaml" hl_lines="102-103" + --8<-- "docs/snippets/batch/templates/dynamodb.yaml" + ``` + +### Processing messages from SQS + +#### Using Typed Handler decorator (Recommended) + +Processing batches from SQS using typed Lambda handler decorator with automatic deserialization works in four stages: + +1. Define your data model class +2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method +3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedSqsBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 8 19 29 32" + public class Product + { + public int Id { get; set; } + public string? Name { get; set; } + public decimal Price { get; set; } + } + + public class TypedSqsRecordHandler : ITypedRecordHandler // (1)! + { + public async Task HandleAsync(Product product, CancellationToken cancellationToken) + { + /* + * Your business logic with automatic deserialization. + * If an exception is thrown, the item will be marked as a partial batch item failure. + */ + + Logger.LogInformation($"Processing product {product.Id} - {product.Name} (${product.Price})"); + + if (product.Id == 4) // (2)! + { + throw new ArgumentException("Error on id 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + + } + + [BatchProcessor(TypedRecordHandler = typeof(TypedSqsRecordHandler))] + public BatchItemFailuresResponse HandlerUsingTypedAttribute(SQSEvent _) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Product is automatically deserialized from SQS message body. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 4. **Step 4**. Lambda function returns the Partial batch response using TypedSqsBatchProcessor + +=== "Sample event" + + ```json + { + "Records": [ + { + "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"id\": 1, \"name\": \"Laptop Computer\", \"price\": 999.99}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "7b270e59b47ff90a553787216d55d91d", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"id\": 4, \"name\": \"Invalid Product\", \"price\": -10.00}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "7b270e59b47ff90a553787216d55d92e", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + } + ] + } + + ``` + +=== "Sample response" + + The second record failed to be processed, therefore the processor added its message ID in the response. + + ```json + { + "batchItemFailures": [ + { + "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" + } + ] + } + ``` + +#### Using Handler decorator (Traditional) + +Processing batches from SQS using Lambda handler decorator works in three stages: + +1. Decorate your handler with **`BatchProcessor`** attribute +2. Create a class that implements **`ISqsRecordHandler`** interface and the HandleAsync method. +3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`SqsBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 12 22 17 25" + public class CustomSqsRecordHandler : ISqsRecordHandler // (1)! + { + public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) + { + /* + * Your business logic. + * If an exception is thrown, the item will be marked as a partial batch item failure. + */ + + var product = JsonSerializer.Deserialize(record.Body); + + if (product.Id == 4) // (2)! + { + throw new ArgumentException("Error on id 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + + } + + [BatchProcessor(RecordHandler = typeof(CustomSqsRecordHandler))] + public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) + { + return SqsBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements ISqsRecordHandler interface and the HandleAsync method. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 3. **Step 4**. Lambda function returns the Partial batch response + +=== "Sample event" + + ```json + { + "Records": [ + { + "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "fail", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + ] + } + + ``` + +=== "Sample response" + + The second record failed to be processed, therefore the processor added its message ID in the response. + + ```json + { + "batchItemFailures": [ + { + "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" + }, + { + "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" + } + ] + } + ``` + +#### FIFO queues + +When using [SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html){target="_blank"}, we will stop processing messages after the first failure, and return all failed and unprocessed messages in `batchItemFailures`. +This helps preserve the ordering of messages in your queue. Powertools automatically detects a FIFO queue. + +### Processing messages from Kinesis + +#### Using Typed Handler decorator (Recommended) + +Processing batches from Kinesis using typed Lambda handler decorator with automatic deserialization works in four stages: + +1. Define your data model class +2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method +3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedKinesisEventBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 9 15 20 24 27" + public class Order + { + public string? OrderId { get; set; } + public DateTime OrderDate { get; set; } + public List Items { get; set; } = new(); + public decimal TotalAmount { get; set; } + } + + internal class TypedKinesisRecordHandler : ITypedRecordHandler // (1)! + { + public async Task HandleAsync(Order order, CancellationToken cancellationToken) + { + Logger.LogInformation($"Processing order {order.OrderId} with {order.Items.Count} items"); + + if (order.TotalAmount <= 0) // (2)! + { + throw new ArgumentException("Invalid order total"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + } + + [BatchProcessor(TypedRecordHandler = typeof(TypedKinesisRecordHandler))] + public BatchItemFailuresResponse HandlerUsingTypedAttribute(KinesisEvent _) + { + return TypedKinesisEventBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Order is automatically deserialized from Kinesis record data. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 4. **Step 4**. Lambda function returns the Partial batch response using TypedKinesisEventBatchProcessor + +#### Using Handler decorator (Traditional) + +Processing batches from Kinesis using Lambda handler decorator works in three stages: + +1. Decorate your handler with **`BatchProcessor`** attribute +2. Create a class that implements **`IKinesisEventRecordHandler`** interface and the HandleAsync method. +3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`KinesisEventBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 7 12 17 20" + internal class CustomKinesisEventRecordHandler : IKinesisEventRecordHandler // (1)! + { + public async Task HandleAsync(KinesisEvent.KinesisEventRecord record, CancellationToken cancellationToken) + { + var product = JsonSerializer.Deserialize(record.Kinesis.Data); + + if (product.Id == 4) // (2)! + { + throw new ArgumentException("Error on id 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + } + + + [BatchProcessor(RecordHandler = typeof(CustomKinesisEventRecordHandler))] + public BatchItemFailuresResponse HandlerUsingAttribute(KinesisEvent _) + { + return KinesisEventBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements the IKinesisEventRecordHandler interface and the HandleAsync method. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 3. **Step 4**. Lambda function returns the Partial batch response + +=== "Sample event" + + ```json + { + "Records": [ + { + "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "fail", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + ] + } + + ``` + +=== "Sample response" + + The second record failed to be processed, therefore the processor added its message ID in the response. + + ```json + { + "batchItemFailures": [ + { + "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" + }, + { + "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" + } + ] + } + ``` + +### Processing messages from DynamoDB + +#### Using Typed Handler decorator (Recommended) + +Processing batches from DynamoDB Streams using typed Lambda handler decorator with automatic deserialization works in four stages: + +1. Define your data model class +2. Create a class that implements **`ITypedRecordHandler`** interface and the HandleAsync method +3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedDynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 9 15 20 24 27" + public class Customer + { + public string? CustomerId { get; set; } + public string? Name { get; set; } + public string? Email { get; set; } + public DateTime CreatedAt { get; set; } + } + + internal class TypedDynamoDbRecordHandler : ITypedRecordHandler // (1)! + { + public async Task HandleAsync(Customer customer, CancellationToken cancellationToken) + { + Logger.LogInformation($"Processing customer {customer.CustomerId} - {customer.Name}"); + + if (string.IsNullOrEmpty(customer.Email)) // (2)! + { + throw new ArgumentException("Customer email is required"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + } + + [BatchProcessor(TypedRecordHandler = typeof(TypedDynamoDbRecordHandler))] + public BatchItemFailuresResponse HandlerUsingTypedAttribute(DynamoDBEvent _) + { + return TypedDynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements ITypedRecordHandler interface - Customer is automatically deserialized from DynamoDB stream record. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 4. **Step 4**. Lambda function returns the Partial batch response using TypedDynamoDbStreamBatchProcessor + +#### Using Handler decorator (Traditional) + +Processing batches from DynamoDB Streams using Lambda handler decorator works in three stages: + +1. Decorate your handler with **`BatchProcessor`** attribute +2. Create a class that implements **`IDynamoDbStreamRecordHandler`** and the HandleAsync method. +3. Pass the type of that class to **`RecordHandler`** property of the **`BatchProcessor`** attribute +4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse`** + +=== "Function.cs" + + ```csharp hl_lines="1 7 12 17 20" + internal class CustomDynamoDbStreamRecordHandler : IDynamoDbStreamRecordHandler // (1)! + { + public async Task HandleAsync(DynamoDBEvent.DynamodbStreamRecord record, CancellationToken cancellationToken) + { + var product = JsonSerializer.Deserialize(record.Dynamodb.NewImage["Product"].S); + + if (product.Id == 4) // (2)! + { + throw new ArgumentException("Error on id 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + } + + + [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler))] + public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) + { + return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; // (4)! + } + + ``` + + 1. **Step 1**. Creates a class that implements the IDynamoDbStreamRecordHandler and the HandleAsync method. + 2. **Step 2**. You can have custom logic inside the record handler and throw exceptions that will cause this message to fail + 3. **Step 3**. RecordHandlerResult can return empty (None) or some data. + 3. **Step 4**. Lambda function returns the Partial batch response + +=== "Sample event" + + ```json + { + "Records": [ + { + "eventID": "1", + "eventVersion": "1.0", + "dynamodb": { + "Keys": { + "Id": { + "N": "101" + } + }, + "NewImage": { + "Product": { + "S": "{\"Id\":1,\"Name\":\"product-name\",\"Price\":14}" + } + }, + "StreamViewType": "NEW_AND_OLD_IMAGES", + "SequenceNumber": "3275880929", + "SizeBytes": 26 + }, + "awsRegion": "us-west-2", + "eventName": "INSERT", + "eventSourceARN": "eventsource_arn", + "eventSource": "aws:dynamodb" + }, + { + "eventID": "1", + "eventVersion": "1.0", + "dynamodb": { + "Keys": { + "Id": { + "N": "101" + } + }, + "NewImage": { + "Product": { + "S": "fail" + } + }, + "StreamViewType": "NEW_AND_OLD_IMAGES", + "SequenceNumber": "8640712661", + "SizeBytes": 26 + }, + "awsRegion": "us-west-2", + "eventName": "INSERT", + "eventSourceARN": "eventsource_arn", + "eventSource": "aws:dynamodb" + } + ] + } + + ``` + +=== "Sample response" + + The second record failed to be processed, therefore the processor added its message ID in the response. + + ```json + { + "batchItemFailures": [ + { + "itemIdentifier": "8640712661" + } + ] + } + ``` + +### Error handling + +By default, we catch any exception raised by your custom record handler HandleAsync method (ISqsRecordHandler, IKinesisEventRecordHandler, IDynamoDbStreamRecordHandler). +This allows us to **(1)** continue processing the batch, **(2)** collect each batch item that failed processing, and **(3)** return the appropriate response correctly without failing your Lambda function execution. + +=== "Function.cs" + + ```csharp hl_lines="14" + public class CustomSqsRecordHandler : ISqsRecordHandler // (1)! + { + public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) + { + /* + * Your business logic. + * If an exception is thrown, the item will be marked as a partial batch item failure. + */ + + var product = JsonSerializer.Deserialize(record.Body); + + if (product.Id == 4) // (2)! + { + throw new ArgumentException("Error on id 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); // (3)! + } + + } + + ``` + +=== "Sample event" + + ```json + { + "Records": [ + { + "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "244fc6b4-87a3-44ab-83d2-361172410c3a", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "fail", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + { + "messageId": "213f4fd3-84a4-4667-a1b9-c277964197d9", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a", + "body": "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "SENDER_ID", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": {}, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2: 123456789012:my-queue", + "awsRegion": "us-east-1" + }, + ] + } + + ``` + +=== "Sample response" + + The second record failed to be processed, therefore the processor added its message ID in the response. + + ```json + { + "batchItemFailures": [ + { + "itemIdentifier": "244fc6b4-87a3-44ab-83d2-361172410c3a" + }, + { + "itemIdentifier": "213f4fd3-84a4-4667-a1b9-c277964197d9" + } + ] + } + ``` + +#### Error Handling Policy + +You can specify the error handling policy applied during batch processing. + +`ErrorHandlingPolicy` is used to control the error handling policy of the batch item processing. +With a value of `DeriveFromEvent` (default), the specific BatchProcessor, determines the policy based on the incoming event. + +For example, the `SqsBatchProcessor` looks at the EventSourceArn to determine if the ErrorHandlingPolicy should be `StopOnFirstBatchItemFailure` (for FIFO queues) or `ContinueOnBatchItemFailure` (for standard queues). +For `StopOnFirstBatchItemFailure` the batch processor stops processing and marks any remaining records as batch item failures. +For `ContinueOnBatchItemFailure` the batch processor continues processing batch items regardless of item failures. + +| Policy | Description | +| ------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| **DeriveFromEvent** | Auto-derive the policy based on the event. | +| **ContinueOnBatchItemFailure** | Continue processing regardless of whether other batch items fails during processing. | +| **StopOnFirstBatchItemFailure** | Stop processing other batch items after the first batch item has failed processing. This is useful to preserve ordered processing of events. | + +!!! note + + When using **StopOnFirstBatchItemFailure** and parallel processing is enabled, all batch items already scheduled to be processed, will be allowed to complete before the batch processing stops. + + Therefore, if order is important, it is recommended to use sequential (non-parallel) processing together with this value." + +To change the default error handling policy, you can set the **`POWERTOOLS_BATCH_ERROR_HANDLING_POLICY`** Environment Variable. + +Another approach is to decorate the handler and use one of the policies in the **`ErrorHandlingPolicy`** Enum property of the **`BatchProcessor`** attribute + +=== "Function.cs" + + ```csharp hl_lines="2" + [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler), + ErrorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure)] + public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) + { + return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; + } + + ``` + +### Partial failure mechanics + +All records in the batch will be passed to this handler for processing, even if exceptions are thrown - Here's the behaviour after completing the batch: + +- **All records successfully processed**. We will return an empty list of item failures `{'batchItemFailures': []}`. +- **Partial success with some exceptions**. We will return a list of all item IDs/sequence numbers that failed processing. +- **All records failed to be processed**. By defaullt, we will throw a `BatchProcessingException` with a list of all exceptions raised during processing to reflect the failure in your operational metrics. However, in some scenarios, this might not be desired. See [Working with full batch failures](#working-with-full-batch-failures) for more information. + +The following sequence diagrams explain how each Batch processor behaves under different scenarios. + +#### SQS Standard + +> Read more about [Batch Failure Reporting feature in AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank"}. + +Sequence diagram to explain how [`BatchProcessor` works](#processing-messages-from-sqs) with SQS Standard queues. + +
+```mermaid +sequenceDiagram + autonumber + participant SQS queue + participant Lambda service + participant Lambda function + Lambda service->>SQS queue: Poll + Lambda service->>Lambda function: Invoke (batch event) + Lambda function->>Lambda service: Report some failed messages + activate SQS queue + Lambda service->>SQS queue: Delete successful messages + SQS queue-->>SQS queue: Failed messages return + Note over SQS queue,Lambda service: Process repeat + deactivate SQS queue +``` +SQS mechanism with Batch Item Failures +
+ +#### SQS FIFO + +> Read more about [Batch Failure Reporting feature in AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#services-sqs-batchfailurereporting){target="_blank"}. + +Sequence diagram to explain how [`SqsFifoPartialProcessor` works](#fifo-queues) with SQS FIFO queues. + +
+```mermaid +sequenceDiagram + autonumber + participant SQS queue + participant Lambda service + participant Lambda function + Lambda service->>SQS queue: Poll + Lambda service->>Lambda function: Invoke (batch event) + activate Lambda function + Lambda function-->Lambda function: Process 2 out of 10 batch items + Lambda function--xLambda function: Fail on 3rd batch item + Lambda function->>Lambda service: Report 3rd batch item and unprocessed messages as failure + deactivate Lambda function + activate SQS queue + Lambda service->>SQS queue: Delete successful messages (1-2) + SQS queue-->>SQS queue: Failed messages return (3-10) + deactivate SQS queue +``` +SQS FIFO mechanism with Batch Item Failures +
+ +#### Kinesis and DynamoDB Streams + +> Read more about [Batch Failure Reporting feature](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-batchfailurereporting){target="_blank"}. + +Sequence diagram to explain how `BatchProcessor` works with both [Kinesis Data Streams](#processing-messages-from-kinesis) and [DynamoDB Streams](#processing-messages-from-dynamodb). + +For brevity, we will use `Streams` to refer to either services. For theory on stream checkpoints, see this [blog post](https://aws.amazon.com/blogs/compute/optimizing-batch-processing-with-custom-checkpoints-in-aws-lambda/){target="_blank"} + +
+```mermaid +sequenceDiagram + autonumber + participant Streams + participant Lambda service + participant Lambda function + Lambda service->>Streams: Poll latest records + Lambda service->>Lambda function: Invoke (batch event) + activate Lambda function + Lambda function-->Lambda function: Process 2 out of 10 batch items + Lambda function--xLambda function: Fail on 3rd batch item + Lambda function-->Lambda function: Continue processing batch items (4-10) + Lambda function->>Lambda service: Report batch item as failure (3) + deactivate Lambda function + activate Streams + Lambda service->>Streams: Checkpoints to sequence number from 3rd batch item + Lambda service->>Streams: Poll records starting from updated checkpoint + deactivate Streams +``` +Kinesis and DynamoDB streams mechanism with single batch item failure +
+ +The behavior changes slightly when there are multiple item failures. Stream checkpoint is updated to the lowest sequence number reported. + +!!! important "Note that the batch item sequence number could be different from batch item number in the illustration." + +
+```mermaid +sequenceDiagram + autonumber + participant Streams + participant Lambda service + participant Lambda function + Lambda service->>Streams: Poll latest records + Lambda service->>Lambda function: Invoke (batch event) + activate Lambda function + Lambda function-->Lambda function: Process 2 out of 10 batch items + Lambda function--xLambda function: Fail on 3-5 batch items + Lambda function-->Lambda function: Continue processing batch items (6-10) + Lambda function->>Lambda service: Report batch items as failure (3-5) + deactivate Lambda function + activate Streams + Lambda service->>Streams: Checkpoints to lowest sequence number + Lambda service->>Streams: Poll records starting from updated checkpoint + deactivate Streams +``` +Kinesis and DynamoDB streams mechanism with multiple batch item failures +
+ +## Typed Batch Processing Advanced Features + +### AOT (Ahead-of-Time) Compilation Support + +For Native AOT scenarios, you can configure JsonSerializerContext: + +=== "JsonSerializerContext Configuration" + + ```csharp + [JsonSerializable(typeof(Product))] + [JsonSerializable(typeof(Order))] + [JsonSerializable(typeof(Customer))] + [JsonSerializable(typeof(List))] + [JsonSourceGenerationOptions( + PropertyNamingPolicy = JsonKnownNamingPolicy.CamelCase, + WriteIndented = false, + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull)] + public partial class MyJsonSerializerContext : JsonSerializerContext + { + } + ``` + +=== "Using with Attribute" + + ```csharp hl_lines="2 3" + [BatchProcessor( + TypedRecordHandler = typeof(TypedSqsRecordHandler), + JsonSerializerContext = typeof(MyJsonSerializerContext))] + public BatchItemFailuresResponse ProcessWithAot(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +### Lambda Context Injection + +For typed handlers that need access to Lambda context, use `ITypedRecordHandlerWithContext`: + +=== "Handler with Context" + + ```csharp hl_lines="1 3" + public class ProductHandlerWithContext : ITypedRecordHandlerWithContext + { + public async Task HandleAsync(Product product, ILambdaContext context, CancellationToken cancellationToken) + { + Logger.LogInformation($"Processing product {product.Id} in request {context.AwsRequestId}"); + Logger.LogInformation($"Remaining time: {context.RemainingTime.TotalSeconds}s"); + + // Use context for timeout handling + if (context.RemainingTime.TotalSeconds < 5) + { + Logger.LogWarning("Low remaining time, processing quickly"); + } + + return RecordHandlerResult.None; + } + } + ``` + +=== "Function Usage" + + ```csharp hl_lines="1 2" + [BatchProcessor(TypedRecordHandler = typeof(ProductHandlerWithContext))] + public BatchItemFailuresResponse ProcessWithContext(SQSEvent sqsEvent, ILambdaContext context) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +### Migration from Traditional to Typed Handlers + +You can gradually migrate from traditional to typed handlers: + +=== "Before (Traditional)" + + ```csharp hl_lines="1 6" + public class TraditionalSqsHandler : ISqsRecordHandler + { + public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) + { + // Manual deserialization + var product = JsonSerializer.Deserialize(record.Body); + + Logger.LogInformation($"Processing product {product.Id}"); + + if (product.Price < 0) + throw new ArgumentException("Invalid price"); + + return RecordHandlerResult.None; + } + } + + [BatchProcessor(RecordHandler = typeof(TraditionalSqsHandler))] + public BatchItemFailuresResponse ProcessSqs(SQSEvent sqsEvent) + { + return SqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +=== "After (Typed)" + + ```csharp hl_lines="1 5" + public class TypedSqsHandler : ITypedRecordHandler + { + public async Task HandleAsync(Product product, CancellationToken cancellationToken) + { + // Automatic deserialization - product is already deserialized! + Logger.LogInformation($"Processing product {product.Id}"); + + // Same business logic + if (product.Price < 0) + throw new ArgumentException("Invalid price"); + + return RecordHandlerResult.None; + } + } + + [BatchProcessor(TypedRecordHandler = typeof(TypedSqsHandler))] + public BatchItemFailuresResponse ProcessSqs(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +### Error Handling with Typed Processors + +Typed processors support the same error handling policies as traditional processors: + +=== "Custom Error Handling" + + ```csharp hl_lines="2" + [BatchProcessor( + TypedRecordHandler = typeof(TypedSqsHandler), + ErrorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure)] + public BatchItemFailuresResponse ProcessWithErrorPolicy(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +### Advanced + +#### Using utility outside handler and IoC + +You can use Batch processing without using the decorator. + +Calling the **`ProcessAsync`** method on the Instance of the static BatchProcessor (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) + +=== "Function.cs" + + ```csharp hl_lines="3" + public async Task HandlerUsingUtility(DynamoDBEvent dynamoDbEvent) + { + var result = await DynamoDbStreamBatchProcessor.Instance.ProcessAsync(dynamoDbEvent, RecordHandler.From(record => + { + var product = JsonSerializer.Deserialize(record.Dynamodb.NewImage["Product"].S); + + if (product.GetProperty("Id").GetInt16() == 4) + { + throw new ArgumentException("Error on 4"); + } + })); + return result.BatchItemFailuresResponse; + } + + ``` + +To make the handler testable you can use Dependency Injection to resolve the BatchProcessor (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) instance and then call the **`ProcessAsync`** method. + +=== "GetRequiredService inside the method" + + ```csharp hl_lines="3 4 5" + public async Task HandlerUsingUtilityFromIoc(DynamoDBEvent dynamoDbEvent) + { + var batchProcessor = Services.Provider.GetRequiredService(); + var recordHandler = Services.Provider.GetRequiredService(); + var result = await batchProcessor.ProcessAsync(dynamoDbEvent, recordHandler); + return result.BatchItemFailuresResponse; + } + + ``` + +=== "Injecting method parameters" + + ```csharp hl_lines="2 4" + public async Task HandlerUsingUtilityFromIoc(DynamoDBEvent dynamoDbEvent, + IDynamoDbStreamBatchProcessor batchProcessor, IDynamoDbStreamRecordHandler recordHandler) + { + var result = await batchProcessor.ProcessAsync(dynamoDbEvent, recordHandler); + return result.BatchItemFailuresResponse; + } + + ``` + +=== "Example implementation of IServiceProvider" + + ```csharp hl_lines="16 17" + internal class Services + { + private static readonly Lazy LazyInstance = new(Build); + + private static ServiceCollection _services; + public static IServiceProvider Provider => LazyInstance.Value; + + public static IServiceProvider Init() + { + return LazyInstance.Value; + } + + private static IServiceProvider Build() + { + _services = new ServiceCollection(); + _services.AddScoped(); + _services.AddScoped(); + return _services.BuildServiceProvider(); + } + } + + ``` + +#### Processing messages in parallel + +You can set the `POWERTOOLS_BATCH_PARALLEL_ENABLED` Environment Variable to `true` or set the property `BatchParallelProcessingEnabled` on the Lambda decorator to process messages concurrently. + +You can also set `POWERTOOLS_BATCH_MAX_DEGREE_OF_PARALLELISM` Environment Variable to the number of parallelism you which. + +!!! note + + MaxDegreeOfParallelism is used to control the parallelism of the batch item processing. + + With a value of 1, the processing is done sequentially (default). Sequential processing is recommended when preserving order is important - i.e. with SQS FIFIO queues. + + With a value > 1, the processing is done in parallel. Doing parallel processing can enable processing to complete faster, i.e., when processing does downstream service calls. + + With a value of -1, the parallelism is automatically configured to be the vCPU count of the Lambda function. Internally, the Batch Processing Utility utilizes Parallel.ForEachAsync Method and the ParallelOptions.MaxDegreeOfParallelism Property to enable this functionality. + +???+ question "When is this useful?" + Your use case might be able to process multiple records at the same time without conflicting with one another. + + For example, imagine you need to process multiple loyalty points and incrementally save in a database. While you await the database to confirm your records are saved, you could start processing another request concurrently. + + The reason this is not the default behaviour is that not all use cases can handle concurrency safely (e.g., loyalty points must be updated in order). + +=== "Function.cs" + + ```csharp hl_lines="1" + [BatchProcessor(RecordHandler = typeof(CustomDynamoDbStreamRecordHandler), BatchParallelProcessingEnabled = true )] + public BatchItemFailuresResponse HandlerUsingAttribute(DynamoDBEvent _) + { + return DynamoDbStreamBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +#### Working with full batch failures + +By default, the `BatchProcessor` will throw a `BatchProcessingException` if all records in the batch fail to process. We do this to reflect the failure in your operational metrics. + +When working with functions that handle batches with a small number of records, or when you use errors as a flow control mechanism, this behavior might not be desirable as your function might generate an unnaturally high number of errors. When this happens, the [Lambda service will scale down the concurrency of your function](https://docs.aws.amazon.com/lambda/latest/dg/services-sqs-errorhandling.html#services-sqs-backoff-strategy){target="_blank"}, potentially impacting performance. + +For these scenarios, you can set `POWERTOOLS_BATCH_THROW_ON_FULL_BATCH_FAILURE = false`, or the equivalent on either the `BatchProcessor` decorator or on the `ProcessingOptions` object. See examples below. + +=== "Setting ThrowOnFullBatchFailure on Decorator" + + ```csharp hl_lines="3" + [BatchProcessor( + RecordHandler = typeof(CustomSqsRecordHandler), + ThrowOnFullBatchFailure = false)] + public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) + { + return SqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + ``` + +=== "Setting ThrowOnFullBatchFailure outside Decorator" + + ```csharp hl_lines="8" + public async Task HandlerUsingUtility(SQSEvent sqsEvent) + { + var result = await SqsBatchProcessor.Instance.ProcessAsync(sqsEvent, RecordHandler.From(x => + { + // Inline handling of SQS message... + }), new ProcessingOptions + { + ThrowOnFullBatchFailure = false + }); + return result.BatchItemFailuresResponse; + } + + ``` + +#### Extending BatchProcessor + +You might want to bring custom logic to the existing `BatchProcessor` to slightly override how we handle successes and failures. + +For these scenarios, you can create a class that inherits from `BatchProcessor` (`SqsBatchProcessor`, `DynamoDbStreamBatchProcessor`, `KinesisEventBatchProcessor`) and quickly override `ProcessAsync` and `HandleRecordFailureAsync` methods: + +- **`ProcessAsync()`** – Keeps track of successful batch records +- **`HandleRecordFailureAsync()`** – Keeps track of failed batch records + +???+ example + Let's suppose you'd like to add a metric named `BatchRecordFailures` for each batch record that failed processing. And also override the default error handling policy to stop on first item failure. + +=== "Function.cs" + + ```csharp hl_lines="1 21 54 97" + + public class CustomDynamoDbStreamBatchProcessor : DynamoDbStreamBatchProcessor + { + public override async Task> ProcessAsync(DynamoDBEvent @event, + IRecordHandler recordHandler, ProcessingOptions processingOptions) + { + ProcessingResult = new ProcessingResult(); + + // Prepare batch records (order is preserved) + var batchRecords = GetRecordsFromEvent(@event).Select(x => new KeyValuePair(GetRecordId(x), x)) + .ToArray(); + + // We assume all records fail by default to avoid loss of data + var failureBatchRecords = batchRecords.Select(x => new KeyValuePair>(x.Key, + new RecordFailure + { + Exception = new UnprocessedRecordException($"Record: '{x.Key}' has not been processed."), + Record = x.Value + })); + + // Override to fail on first failure + var errorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure; + + var successRecords = new Dictionary>(); + var failureRecords = new Dictionary>(failureBatchRecords); + + try + { + foreach (var pair in batchRecords) + { + var (recordId, record) = pair; + + try + { + var result = await HandleRecordAsync(record, recordHandler, CancellationToken.None); + failureRecords.Remove(recordId, out _); + successRecords.TryAdd(recordId, new RecordSuccess + { + Record = record, + RecordId = recordId, + HandlerResult = result + }); + } + catch (Exception ex) + { + // Capture exception + failureRecords[recordId] = new RecordFailure + { + Exception = new RecordProcessingException( + $"Failed processing record: '{recordId}'. See inner exception for details.", ex), + Record = record, + RecordId = recordId + }; + + Metrics.AddMetric("BatchRecordFailures", 1, MetricUnit.Count); + + try + { + // Invoke hook + await HandleRecordFailureAsync(record, ex); + } + catch + { + // NOOP + } + + // Check if we should stop record processing on first error + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + if (errorHandlingPolicy == BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure) + { + // This causes the loop's (inner) cancellation token to be cancelled for all operations already scheduled internally + throw new CircuitBreakerException( + "Error handling policy is configured to stop processing on first batch item failure. See inner exception for details.", + ex); + } + } + } + } + catch (Exception ex) when (ex is CircuitBreakerException or OperationCanceledException) + { + // NOOP + } + + ProcessingResult.BatchRecords.AddRange(batchRecords.Select(x => x.Value)); + ProcessingResult.BatchItemFailuresResponse.BatchItemFailures.AddRange(failureRecords.Select(x => + new BatchItemFailuresResponse.BatchItemFailure + { + ItemIdentifier = x.Key + })); + ProcessingResult.FailureRecords.AddRange(failureRecords.Values); + + ProcessingResult.SuccessRecords.AddRange(successRecords.Values); + + return ProcessingResult; + } + + // ReSharper disable once RedundantOverriddenMember + protected override async Task HandleRecordFailureAsync(DynamoDBEvent.DynamodbStreamRecord record, Exception exception) + { + await base.HandleRecordFailureAsync(record, exception); + } + } + ``` + +## Testing your code + +### Testing Typed Handlers + +Testing typed batch processors is straightforward since you work directly with your data models: + +=== "Typed Handler Test" + + ```csharp + [Fact] + public async Task TypedHandler_ValidProduct_ProcessesSuccessfully() + { + // Arrange + var product = new Product { Id = 1, Name = "Test Product", Price = 10.99m }; + var handler = new TypedSqsRecordHandler(); + var cancellationToken = CancellationToken.None; + + // Act + var result = await handler.HandleAsync(product, cancellationToken); + + // Assert + Assert.Equal(RecordHandlerResult.None, result); + } + + [Fact] + public async Task TypedHandler_InvalidProduct_ThrowsException() + { + // Arrange + var product = new Product { Id = 4, Name = "Invalid", Price = -10 }; + var handler = new TypedSqsRecordHandler(); + + // Act & Assert + await Assert.ThrowsAsync(() => + handler.HandleAsync(product, CancellationToken.None)); + } + ``` + +=== "Integration Test" + + ```csharp + [Fact] + public async Task ProcessSqsEvent_WithTypedHandler_ProcessesAllRecords() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new() { + MessageId = "1", + Body = JsonSerializer.Serialize(new Product { Id = 1, Name = "Product 1", Price = 10 }), + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:my-queue" + }, + new() { + MessageId = "2", + Body = JsonSerializer.Serialize(new Product { Id = 2, Name = "Product 2", Price = 20 }), + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:my-queue" + } + } + }; + + var function = new TypedFunction(); + + // Act + var result = function.HandlerUsingTypedAttribute(sqsEvent); + + // Assert + Assert.Empty(result.BatchItemFailures); + } + ``` + +### Testing Traditional Handlers + +As there is no external calls, you can unit test your code with `BatchProcessor` quite easily. + +=== "Test.cs" + + ```csharp + [Fact] + public Task Sqs_Handler_Using_Attribute() + { + var request = new SQSEvent + { + Records = TestHelper.SqsMessages + }; + + var function = new HandlerFunction(); + + var response = function.HandlerUsingAttribute(request); + + Assert.Equal(2, response.BatchItemFailures.Count); + Assert.Equal("2", response.BatchItemFailures[0].ItemIdentifier); + Assert.Equal("4", response.BatchItemFailures[1].ItemIdentifier); + + return Task.CompletedTask; + } + ``` + +=== "Function.cs" + + ```csharp + [BatchProcessor(RecordHandler = typeof(CustomSqsRecordHandler))] + public BatchItemFailuresResponse HandlerUsingAttribute(SQSEvent _) + { + return SqsBatchProcessor.Result.BatchItemFailuresResponse; + } + ``` + +=== "CustomSqsRecordHandler.cs" + + ```csharp + public class CustomSqsRecordHandler : ISqsRecordHandler + { + public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) + { + var product = JsonSerializer.Deserialize(record.Body); + + if (product.GetProperty("Id").GetInt16() == 4) + { + throw new ArgumentException("Error on 4"); + } + + return await Task.FromResult(RecordHandlerResult.None); + } + } + ``` + +=== "SQS Event.cs" + + ```csharp + internal static List SqsMessages => new() + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":1,\"Name\":\"product-4\",\"Price\":14}", + EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "2", + Body = "fail", + EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "3", + Body = "{\"Id\":3,\"Name\":\"product-4\",\"Price\":14}", + EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "4", + Body = "{\"Id\":4,\"Name\":\"product-4\",\"Price\":14}", + EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "5", + Body = "{\"Id\":5,\"Name\":\"product-4\",\"Price\":14}", + EventSourceArn = "arn:aws:sqs:us-east-2:123456789012:my-queue" + }, + }; + ``` + +## Complete Examples and Documentation + +The [BatchProcessing example](https://github.com/aws-powertools/powertools-lambda-dotnet/tree/develop/examples/BatchProcessing){target="\_blank"} contains complete working examples: + +- **TypedFunction.cs** - Complete examples using all typed batch processing patterns +- **TypedHandlers/** - Example implementations for SQS, Kinesis, and DynamoDB - **Sample Events** - Test events for all event types with typed data \ No newline at end of file diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs index f28f7f3be..847562d38 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs @@ -299,13 +299,20 @@ internal IBatchProcessingAspectHandler CreateAspectHandler(IReadOnlyList // Validate typed handler configurations ValidateTypedHandlerConfiguration(); - // Check if typed handlers are configured (not yet fully supported in attributes) + // Check if typed handlers are configured if (IsTypedHandlerConfigured()) { - throw new NotSupportedException("Typed record handlers are not yet fully supported with BatchProcessorAttribute. Please use direct typed batch processor calls for typed processing."); + // Create typed aspect handler + return eventType switch + { + BatchEventType.DynamoDbStream => CreateTypedBatchProcessingAspectHandler(() => TypedDynamoDbStreamBatchProcessor.TypedInstance, args), + BatchEventType.KinesisDataStream => CreateTypedBatchProcessingAspectHandler(() => TypedKinesisEventBatchProcessor.TypedInstance, args), + BatchEventType.Sqs => CreateTypedBatchProcessingAspectHandler(() => TypedSqsBatchProcessor.TypedInstance, args), + _ => throw new ArgumentOutOfRangeException(nameof(eventType), eventType, "Unsupported event type.") + }; } - // Create aspect handler + // Create traditional aspect handler return eventType switch { BatchEventType.DynamoDbStream => CreateBatchProcessingAspectHandler(() => DynamoDbStreamBatchProcessor.Instance), @@ -395,6 +402,146 @@ private BatchProcessingAspectHandler CreateBatchProcessingAspec }); } + private TypedBatchProcessingAspectHandler CreateTypedBatchProcessingAspectHandler(Func> defaultTypedBatchProcessorProvider, IReadOnlyList args) + { + // Create typed batch processor + ITypedBatchProcessor typedBatchProcessor; + if (BatchProcessor != null && BatchProcessor.IsAssignableTo(TypedBatchProcessorTypes[GetEventTypeFromArgs(args)])) + { + try + { + typedBatchProcessor = (ITypedBatchProcessor)Activator.CreateInstance(BatchProcessor)!; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of: '{BatchProcessor.Name}'.", ex); + } + } + else + { + typedBatchProcessor = defaultTypedBatchProcessorProvider.Invoke(); + } + + // Create deserialization options + var deserializationOptions = new DeserializationOptions + { + ErrorPolicy = DeserializationErrorPolicy + }; + + if (JsonSerializerContext != null) + { + try + { + var jsonSerializerContext = (JsonSerializerContext)Activator.CreateInstance(JsonSerializerContext)!; + deserializationOptions.JsonSerializerContext = jsonSerializerContext; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of JsonSerializerContext: '{JsonSerializerContext.Name}'.", ex); + } + } + + // Create processing options + var errorHandlingPolicy = Enum.TryParse(PowertoolsConfigurations.Instance.BatchProcessingErrorHandlingPolicy, true, out BatchProcessorErrorHandlingPolicy errHandlingPolicy) + ? errHandlingPolicy + : ErrorHandlingPolicy; + if (ErrorHandlingPolicy != BatchProcessorErrorHandlingPolicy.DeriveFromEvent) + { + errorHandlingPolicy = ErrorHandlingPolicy; + } + + var processingOptions = new ProcessingOptions + { + CancellationToken = CancellationToken.None, + ErrorHandlingPolicy = errorHandlingPolicy, + MaxDegreeOfParallelism = MaxDegreeOfParallelism, + BatchParallelProcessingEnabled = BatchParallelProcessingEnabled, + ThrowOnFullBatchFailure = ThrowOnFullBatchFailure + }; + + // Create typed handler wrapper + object typedHandler = null; + bool hasContext = false; + + if (TypedRecordHandler != null) + { + try + { + typedHandler = Activator.CreateInstance(TypedRecordHandler)!; + hasContext = false; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of: '{TypedRecordHandler.Name}'.", ex); + } + } + else if (TypedRecordHandlerProvider != null) + { + try + { + var provider = Activator.CreateInstance(TypedRecordHandlerProvider)!; + // Assume the provider has a Create() method that returns the handler + var createMethod = TypedRecordHandlerProvider.GetMethod("Create"); + if (createMethod == null) + { + throw new InvalidOperationException($"TypedRecordHandlerProvider '{TypedRecordHandlerProvider.Name}' must have a 'Create()' method."); + } + typedHandler = createMethod.Invoke(provider, null)!; + hasContext = false; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of typed record handler using provider: '{TypedRecordHandlerProvider.Name}'.", ex); + } + } + else if (TypedRecordHandlerWithContext != null) + { + try + { + typedHandler = Activator.CreateInstance(TypedRecordHandlerWithContext)!; + hasContext = true; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of: '{TypedRecordHandlerWithContext.Name}'.", ex); + } + } + else if (TypedRecordHandlerWithContextProvider != null) + { + try + { + var provider = Activator.CreateInstance(TypedRecordHandlerWithContextProvider)!; + // Assume the provider has a Create() method that returns the handler + var createMethod = TypedRecordHandlerWithContextProvider.GetMethod("Create"); + if (createMethod == null) + { + throw new InvalidOperationException($"TypedRecordHandlerWithContextProvider '{TypedRecordHandlerWithContextProvider.Name}' must have a 'Create()' method."); + } + typedHandler = createMethod.Invoke(provider, null)!; + hasContext = true; + } + catch (Exception ex) + { + throw new InvalidOperationException($"Error during creation of typed record handler with context using provider: '{TypedRecordHandlerWithContextProvider.Name}'.", ex); + } + } + else + { + throw new InvalidOperationException("A typed record handler or typed record handler provider is required."); + } + + return new TypedBatchProcessingAspectHandler(typedBatchProcessor, typedHandler, hasContext, deserializationOptions, processingOptions); + } + + private BatchEventType GetEventTypeFromArgs(IReadOnlyList args) + { + if (args == null || args.Count == 0 || !EventTypes.TryGetValue(args[0].GetType(), out var eventType)) + { + throw new ArgumentException($"The first function handler parameter must be of one of the following types: {string.Join(',', EventTypes.Keys.Select(x => $"'{x.Namespace}'"))}."); + } + return eventType; + } + private void ValidateTypedHandlerConfiguration() { // Ensure only one type of handler is configured diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs index 1270a16b1..1282e5751 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs @@ -7,6 +7,7 @@ using Amazon.Lambda.DynamoDBEvents; using AWS.Lambda.Powertools.BatchProcessing.Exceptions; using AWS.Lambda.Powertools.BatchProcessing.Internal; +using AWS.Lambda.Powertools.Common; namespace AWS.Lambda.Powertools.BatchProcessing.DynamoDb; @@ -18,17 +19,56 @@ public class TypedDynamoDbStreamBatchProcessor : DynamoDbStreamBatchProcessor, I private readonly IDeserializationService _deserializationService; private readonly IRecordDataExtractor _recordDataExtractor; + /// + /// The singleton instance of the typed DynamoDB stream batch processor. + /// + private static ITypedBatchProcessor _typedInstance; + + /// + /// Gets the typed instance. + /// + /// The typed instance. + public static ITypedBatchProcessor TypedInstance => + _typedInstance ??= new TypedDynamoDbStreamBatchProcessor(); + + /// + /// Return the typed instance ProcessingResult + /// + public new static ProcessingResult Result => _typedInstance?.ProcessingResult; + /// /// Initializes a new instance of the TypedDynamoDbStreamBatchProcessor class. /// + /// The Powertools configurations. /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses DynamoDbRecordDataExtractor.Instance. - public TypedDynamoDbStreamBatchProcessor(IDeserializationService deserializationService = null, + public TypedDynamoDbStreamBatchProcessor( + IPowertoolsConfigurations powertoolsConfigurations, + IDeserializationService deserializationService = null, IRecordDataExtractor recordDataExtractor = null) { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? DynamoDbRecordDataExtractor.Instance; + _typedInstance = this; + } + + /// + /// Initializes a new instance of the TypedDynamoDbStreamBatchProcessor class with default services. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + /// The record data extractor. If null, uses DynamoDbRecordDataExtractor.Instance. + public TypedDynamoDbStreamBatchProcessor(IDeserializationService deserializationService = null, + IRecordDataExtractor recordDataExtractor = null) + : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) + { + } + + /// + /// Default constructor for when consumers create a custom typed batch processor. + /// + public TypedDynamoDbStreamBatchProcessor() : this(PowertoolsConfigurations.Instance) + { } /// diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs index 2c2b6c145..1eeacab06 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs @@ -7,6 +7,7 @@ using Amazon.Lambda.KinesisEvents; using AWS.Lambda.Powertools.BatchProcessing.Exceptions; using AWS.Lambda.Powertools.BatchProcessing.Internal; +using AWS.Lambda.Powertools.Common; namespace AWS.Lambda.Powertools.BatchProcessing.Kinesis; @@ -18,17 +19,56 @@ public class TypedKinesisEventBatchProcessor : KinesisEventBatchProcessor, IType private readonly IDeserializationService _deserializationService; private readonly IRecordDataExtractor _recordDataExtractor; + /// + /// The singleton instance of the typed Kinesis batch processor. + /// + private static ITypedBatchProcessor _typedInstance; + + /// + /// Gets the typed instance. + /// + /// The typed instance. + public static ITypedBatchProcessor TypedInstance => + _typedInstance ??= new TypedKinesisEventBatchProcessor(); + + /// + /// Return the typed instance ProcessingResult + /// + public new static ProcessingResult Result => _typedInstance?.ProcessingResult; + /// /// Initializes a new instance of the TypedKinesisEventBatchProcessor class. /// + /// The Powertools configurations. /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses KinesisRecordDataExtractor.Instance. - public TypedKinesisEventBatchProcessor(IDeserializationService deserializationService = null, + public TypedKinesisEventBatchProcessor( + IPowertoolsConfigurations powertoolsConfigurations, + IDeserializationService deserializationService = null, IRecordDataExtractor recordDataExtractor = null) { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? KinesisRecordDataExtractor.Instance; + _typedInstance = this; + } + + /// + /// Initializes a new instance of the TypedKinesisEventBatchProcessor class with default services. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + /// The record data extractor. If null, uses KinesisRecordDataExtractor.Instance. + public TypedKinesisEventBatchProcessor(IDeserializationService deserializationService = null, + IRecordDataExtractor recordDataExtractor = null) + : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) + { + } + + /// + /// Default constructor for when consumers create a custom typed batch processor. + /// + public TypedKinesisEventBatchProcessor() : this(PowertoolsConfigurations.Instance) + { } /// diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs index 093a79882..ce97e00d9 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs @@ -7,6 +7,7 @@ using Amazon.Lambda.SQSEvents; using AWS.Lambda.Powertools.BatchProcessing.Exceptions; using AWS.Lambda.Powertools.BatchProcessing.Internal; +using AWS.Lambda.Powertools.Common; namespace AWS.Lambda.Powertools.BatchProcessing.Sqs; @@ -18,17 +19,56 @@ public class TypedSqsBatchProcessor : SqsBatchProcessor, ITypedBatchProcessor _recordDataExtractor; + /// + /// The singleton instance of the typed SQS batch processor. + /// + private static ITypedBatchProcessor _typedInstance; + + /// + /// Gets the typed instance. + /// + /// The typed instance. + public static ITypedBatchProcessor TypedInstance => + _typedInstance ??= new TypedSqsBatchProcessor(); + + /// + /// Return the typed instance ProcessingResult + /// + public new static ProcessingResult Result => _typedInstance?.ProcessingResult; + /// /// Initializes a new instance of the TypedSqsBatchProcessor class. /// + /// The Powertools configurations. /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses SqsRecordDataExtractor.Instance. - public TypedSqsBatchProcessor(IDeserializationService deserializationService = null, + public TypedSqsBatchProcessor( + IPowertoolsConfigurations powertoolsConfigurations, + IDeserializationService deserializationService = null, IRecordDataExtractor recordDataExtractor = null) { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? SqsRecordDataExtractor.Instance; + _typedInstance = this; + } + + /// + /// Initializes a new instance of the TypedSqsBatchProcessor class with default services. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + /// The record data extractor. If null, uses SqsRecordDataExtractor.Instance. + public TypedSqsBatchProcessor(IDeserializationService deserializationService = null, + IRecordDataExtractor recordDataExtractor = null) + : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) + { + } + + /// + /// Default constructor for when consumers create a custom typed batch processor. + /// + public TypedSqsBatchProcessor() : this(PowertoolsConfigurations.Instance) + { } /// From c0d36171602210d79dc7fdd7609bbf3d488b35f7 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 17:30:53 +0100 Subject: [PATCH 3/9] update readme --- docs/utilities/batch-processing.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/utilities/batch-processing.md b/docs/utilities/batch-processing.md index 3b30e057e..9b464723f 100644 --- a/docs/utilities/batch-processing.md +++ b/docs/utilities/batch-processing.md @@ -134,6 +134,7 @@ Processing batches from SQS using typed Lambda handler decorator with automatic 3. Decorate your handler with **`BatchProcessor`** attribute using **`TypedRecordHandler`** property 4. Return **`BatchItemFailuresResponse`** from Lambda handler using **`TypedSqsBatchProcessor.Result.BatchItemFailuresResponse`** + === "Function.cs" ```csharp hl_lines="1 8 19 29 32" @@ -360,6 +361,10 @@ Processing batches from SQS using Lambda handler decorator works in three stages When using [SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html){target="_blank"}, we will stop processing messages after the first failure, and return all failed and unprocessed messages in `batchItemFailures`. This helps preserve the ordering of messages in your queue. Powertools automatically detects a FIFO queue. + + + + ### Processing messages from Kinesis #### Using Typed Handler decorator (Recommended) @@ -987,6 +992,8 @@ For Native AOT scenarios, you can configure JsonSerializerContext: } ``` + + ### Lambda Context Injection For typed handlers that need access to Lambda context, use `ITypedRecordHandlerWithContext`: @@ -1015,7 +1022,7 @@ For typed handlers that need access to Lambda context, use `ITypedRecordHandlerW === "Function Usage" ```csharp hl_lines="1 2" - [BatchProcessor(TypedRecordHandler = typeof(ProductHandlerWithContext))] + [BatchProcessor(TypedRecordHandlerWithContext = typeof(ProductHandlerWithContext))] public BatchItemFailuresResponse ProcessWithContext(SQSEvent sqsEvent, ILambdaContext context) { return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; From b847dd532dab136c993574340446d328458c20b0 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 17:57:53 +0100 Subject: [PATCH 4/9] fix the tests --- .../ProcessingResult.cs | 1 + .../BatchProcessingAttributeTest.cs | 64 ------------------- ...atchProcessorAttributeTypedHandlerTests.cs | 32 +++++++++- .../TypedHandlerAttributeSimpleTest.cs | 4 ++ 4 files changed, 35 insertions(+), 66 deletions(-) diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/ProcessingResult.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/ProcessingResult.cs index 8deda4d70..e399e4336 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/ProcessingResult.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/ProcessingResult.cs @@ -39,5 +39,6 @@ public void Clear() BatchRecords.Clear(); SuccessRecords.Clear(); FailureRecords.Clear(); + BatchItemFailuresResponse.BatchItemFailures.Clear(); } } \ No newline at end of file diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessingAttributeTest.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessingAttributeTest.cs index d50782345..91c8afa78 100644 --- a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessingAttributeTest.cs +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessingAttributeTest.cs @@ -14,70 +14,6 @@ namespace AWS.Lambda.Powertools.BatchProcessing.Tests [Collection("Sequential")] public partial class BatchProcessingAttributeTest { - [Fact] - public void BatchProcessorAttribute_WithTypedRecordHandler_ThrowsNotSupportedException() - { - // Arrange - var attribute = new BatchProcessorAttribute - { - TypedRecordHandler = typeof(TestTypedRecordHandler) - }; - - // Act & Assert - var exception = Assert.Throws(() => - attribute.CreateAspectHandler(new object[] { new SQSEvent() })); - - Assert.Contains("Typed record handlers are not yet fully supported with BatchProcessorAttribute", exception.Message); - } - - [Fact] - public void BatchProcessorAttribute_WithTypedRecordHandlerProvider_ThrowsNotSupportedException() - { - // Arrange - var attribute = new BatchProcessorAttribute - { - TypedRecordHandlerProvider = typeof(TestTypedRecordHandlerProvider) - }; - - // Act & Assert - var exception = Assert.Throws(() => - attribute.CreateAspectHandler(new object[] { new SQSEvent() })); - - Assert.Contains("Typed record handlers are not yet fully supported with BatchProcessorAttribute", exception.Message); - } - - [Fact] - public void BatchProcessorAttribute_WithTypedRecordHandlerWithContext_ThrowsNotSupportedException() - { - // Arrange - var attribute = new BatchProcessorAttribute - { - TypedRecordHandlerWithContext = typeof(TestTypedRecordHandlerWithContext) - }; - - // Act & Assert - var exception = Assert.Throws(() => - attribute.CreateAspectHandler(new object[] { new SQSEvent() })); - - Assert.Contains("Typed record handlers are not yet fully supported with BatchProcessorAttribute", exception.Message); - } - - [Fact] - public void BatchProcessorAttribute_WithTypedRecordHandlerWithContextProvider_ThrowsNotSupportedException() - { - // Arrange - var attribute = new BatchProcessorAttribute - { - TypedRecordHandlerWithContextProvider = typeof(TestTypedRecordHandlerWithContextProvider) - }; - - // Act & Assert - var exception = Assert.Throws(() => - attribute.CreateAspectHandler(new object[] { new SQSEvent() })); - - Assert.Contains("Typed record handlers are not yet fully supported with BatchProcessorAttribute", exception.Message); - } - [Fact] public void BatchProcessorAttribute_WithMultipleHandlerTypes_ThrowsInvalidOperationException() { diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs index 3945803f7..4d6814451 100644 --- a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeTypedHandlerTests.cs @@ -28,6 +28,7 @@ namespace AWS.Lambda.Powertools.BatchProcessing.Tests; /// /// Tests for BatchProcessorAttribute with typed handlers. /// +[Collection("BatchProcessorTests")] public class BatchProcessorAttributeTypedHandlerTests { /// @@ -96,6 +97,9 @@ public BatchItemFailuresResponse ProcessOrdersWithTypedHandlerAndContext(SQSEven [Fact] public void ProcessOrdersWithTypedHandler_ValidOrders_ProcessesSuccessfully() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { @@ -129,6 +133,9 @@ public void ProcessOrdersWithTypedHandler_ValidOrders_ProcessesSuccessfully() [Fact] public void ProcessOrdersWithTypedHandler_OneFailure_ReportsPartialFailure() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { @@ -163,6 +170,9 @@ public void ProcessOrdersWithTypedHandler_OneFailure_ReportsPartialFailure() [Fact] public void ProcessOrdersWithTypedHandlerAndContext_ValidOrders_ProcessesSuccessfully() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { @@ -191,6 +201,9 @@ public void ProcessOrdersWithTypedHandlerAndContext_ValidOrders_ProcessesSuccess [Fact] public void ProcessOrdersWithTypedHandlerAndContext_OneFailure_ReportsPartialFailure() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { @@ -199,6 +212,12 @@ public void ProcessOrdersWithTypedHandlerAndContext_OneFailure_ReportsPartialFai new SQSEvent.SQSMessage { MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Valid Order\",\"Amount\":99.99,\"Products\":[\"Product A\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "2", Body = "{\"Id\":\"fail\",\"Name\":\"Failing Order\",\"Amount\":0,\"Products\":[]}", EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" } @@ -214,12 +233,15 @@ public void ProcessOrdersWithTypedHandlerAndContext_OneFailure_ReportsPartialFai // Assert Assert.NotNull(result); Assert.Single(result.BatchItemFailures); - Assert.Equal("1", result.BatchItemFailures[0].ItemIdentifier); + Assert.Equal("2", result.BatchItemFailures[0].ItemIdentifier); } [Fact] public void ProcessOrdersWithTypedHandler_InvalidJson_HandlesDeserializationError() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { @@ -228,6 +250,12 @@ public void ProcessOrdersWithTypedHandler_InvalidJson_HandlesDeserializationErro new SQSEvent.SQSMessage { MessageId = "1", + Body = "{\"Id\":\"order-1\",\"Name\":\"Valid Order\",\"Amount\":99.99,\"Products\":[\"Product A\"]}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + }, + new SQSEvent.SQSMessage + { + MessageId = "2", Body = "invalid json", EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" } @@ -242,6 +270,6 @@ public void ProcessOrdersWithTypedHandler_InvalidJson_HandlesDeserializationErro // Assert Assert.NotNull(result); Assert.Single(result.BatchItemFailures); - Assert.Equal("1", result.BatchItemFailures[0].ItemIdentifier); + Assert.Equal("2", result.BatchItemFailures[0].ItemIdentifier); } } \ No newline at end of file diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs index 3be547795..539968f37 100644 --- a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/TypedHandlerAttributeSimpleTest.cs @@ -25,6 +25,7 @@ namespace AWS.Lambda.Powertools.BatchProcessing.Tests; /// /// Simple test to verify typed handler attribute works. /// +[Collection("BatchProcessorTests")] public class TypedHandlerAttributeSimpleTest { public class SimpleOrder @@ -53,6 +54,9 @@ public BatchItemFailuresResponse ProcessOrders(SQSEvent sqsEvent) [Fact] public void TypedHandlerAttribute_BasicTest_DoesNotThrowException() { + // Clear any previous test state + TypedSqsBatchProcessor.Result?.Clear(); + // Arrange var sqsEvent = new SQSEvent { From 7fb0e9b690e4f50a8d9917a2c54f657a1b25263a Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:08:08 +0100 Subject: [PATCH 5/9] more test coverage --- .../BatchProcessorAttributeValidationTests.cs | 364 ++++++++++++++++++ 1 file changed, 364 insertions(+) create mode 100644 libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs new file mode 100644 index 000000000..3a3fd0430 --- /dev/null +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs @@ -0,0 +1,364 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; +using Amazon.Lambda.Core; +using Amazon.Lambda.SQSEvents; +using AWS.Lambda.Powertools.BatchProcessing.Sqs; +using NSubstitute; +using Xunit; + +namespace AWS.Lambda.Powertools.BatchProcessing.Tests; + +/// +/// Tests for BatchProcessorAttribute validation and error handling scenarios. +/// +[Collection("BatchProcessorTests")] +public partial class BatchProcessorAttributeValidationTests +{ + public class TestData + { + public string Id { get; set; } + public string Name { get; set; } + } + + public class ValidHandler : ITypedRecordHandler + { + public async Task HandleAsync(TestData data, CancellationToken cancellationToken) + { + return await Task.FromResult(RecordHandlerResult.None); + } + } + + public class ValidHandlerWithContext : ITypedRecordHandlerWithContext + { + public async Task HandleAsync(TestData data, ILambdaContext context, CancellationToken cancellationToken) + { + return await Task.FromResult(RecordHandlerResult.None); + } + } + + public class InvalidHandler + { + // Does not implement any batch processing interface + } + + public class ValidHandlerProvider : ITypedRecordHandlerProvider + { + public ITypedRecordHandler Create() + { + return new ValidHandler(); + } + } + + public class InvalidHandlerProvider + { + // Missing Create method + } + + public class ValidHandlerWithContextProvider : ITypedRecordHandlerWithContextProvider + { + public ITypedRecordHandlerWithContext Create() + { + return new ValidHandlerWithContext(); + } + } + + public class InvalidHandlerWithContextProvider + { + // Missing Create method + } + + [JsonSerializable(typeof(TestData))] + public partial class TestJsonContext : JsonSerializerContext + { + } + + public class InvalidJsonContext + { + // Does not inherit from JsonSerializerContext + } + + public class TestFunctions + { + [BatchProcessor(TypedRecordHandler = typeof(ValidHandler), TypedRecordHandlerWithContext = typeof(ValidHandlerWithContext))] + public BatchItemFailuresResponse ProcessWithMultipleHandlers(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(JsonSerializerContext = typeof(TestJsonContext), TypedRecordHandler = typeof(ValidHandler))] + public BatchItemFailuresResponse ProcessWithJsonContext(SQSEvent sqsEvent) + { + TypedSqsBatchProcessor.Result?.Clear(); + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(JsonSerializerContext = typeof(InvalidJsonContext), TypedRecordHandler = typeof(ValidHandler))] + public BatchItemFailuresResponse ProcessWithInvalidJsonContext(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(TypedRecordHandlerProvider = typeof(ValidHandlerProvider))] + public BatchItemFailuresResponse ProcessWithValidProvider(SQSEvent sqsEvent) + { + TypedSqsBatchProcessor.Result?.Clear(); + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(TypedRecordHandlerProvider = typeof(InvalidHandlerProvider))] + public BatchItemFailuresResponse ProcessWithInvalidProvider(SQSEvent sqsEvent) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(TypedRecordHandlerWithContextProvider = typeof(ValidHandlerWithContextProvider))] + public BatchItemFailuresResponse ProcessWithValidContextProvider(SQSEvent sqsEvent, ILambdaContext context) + { + TypedSqsBatchProcessor.Result?.Clear(); + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + [BatchProcessor(TypedRecordHandlerWithContextProvider = typeof(InvalidHandlerWithContextProvider))] + public BatchItemFailuresResponse ProcessWithInvalidContextProvider(SQSEvent sqsEvent, ILambdaContext context) + { + return TypedSqsBatchProcessor.Result.BatchItemFailuresResponse; + } + + public BatchItemFailuresResponse ProcessWithoutAttribute(SQSEvent sqsEvent) + { + return new BatchItemFailuresResponse(); + } + } + + [Fact] + public void BatchProcessorAttribute_WithMultipleHandlers_ThrowsInvalidOperationException() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunctions(); + + // Act & Assert + Assert.Throws(() => function.ProcessWithMultipleHandlers(sqsEvent)); + } + + [Fact] + public void BatchProcessorAttribute_WithJsonContext_ProcessesSuccessfully() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunctions(); + + // Act + var result = function.ProcessWithJsonContext(sqsEvent); + + // Assert + Assert.NotNull(result); + Assert.Empty(result.BatchItemFailures); + } + + [Fact] + public void BatchProcessorAttribute_WithInvalidJsonContext_ThrowsInvalidOperationException() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunctions(); + + // Act & Assert + Assert.Throws(() => function.ProcessWithInvalidJsonContext(sqsEvent)); + } + + [Fact] + public void BatchProcessorAttribute_WithValidProvider_ProcessesSuccessfully() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunctions(); + + // Act + var result = function.ProcessWithValidProvider(sqsEvent); + + // Assert + Assert.NotNull(result); + Assert.Empty(result.BatchItemFailures); + } + + [Fact] + public void BatchProcessorAttribute_WithInvalidProvider_ThrowsInvalidOperationException() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var function = new TestFunctions(); + + // Act & Assert + Assert.Throws(() => function.ProcessWithInvalidProvider(sqsEvent)); + } + + [Fact] + public void BatchProcessorAttribute_WithValidContextProvider_ProcessesSuccessfully() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var mockContext = Substitute.For(); + var function = new TestFunctions(); + + // Act + var result = function.ProcessWithValidContextProvider(sqsEvent, mockContext); + + // Assert + Assert.NotNull(result); + Assert.Empty(result.BatchItemFailures); + } + + [Fact] + public void BatchProcessorAttribute_WithInvalidContextProvider_ThrowsInvalidOperationException() + { + // Arrange + var sqsEvent = new SQSEvent + { + Records = new List + { + new SQSEvent.SQSMessage + { + MessageId = "1", + Body = "{\"Id\":\"test-1\",\"Name\":\"Test Data\"}", + EventSourceArn = "arn:aws:sqs:us-east-1:123456789012:test-queue" + } + } + }; + + var mockContext = Substitute.For(); + var function = new TestFunctions(); + + // Act & Assert + Assert.Throws(() => function.ProcessWithInvalidContextProvider(sqsEvent, mockContext)); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithNullArgs_ThrowsArgumentException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandler = typeof(ValidHandler) + }; + + // Act & Assert + Assert.Throws(() => attribute.CreateAspectHandler(null)); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithEmptyArgs_ThrowsArgumentException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandler = typeof(ValidHandler) + }; + + // Act & Assert + Assert.Throws(() => attribute.CreateAspectHandler(new object[0])); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithInvalidEventType_ThrowsArgumentException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandler = typeof(ValidHandler) + }; + + // Act & Assert + Assert.Throws(() => attribute.CreateAspectHandler(new object[] { "invalid event" })); + } +} \ No newline at end of file From d70cd6bc9f7bd385356186e2637fd975f9574e2a Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:21:23 +0100 Subject: [PATCH 6/9] add tests for fail paths --- .../BatchProcessorAttributeValidationTests.cs | 259 ++++++++++++++++++ 1 file changed, 259 insertions(+) diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs index 3a3fd0430..303e73ef0 100644 --- a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs @@ -80,6 +80,14 @@ public ITypedRecordHandlerWithContext Create() } } + public class ValidSqsRecordHandler : IRecordHandler + { + public async Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) + { + return await Task.FromResult(RecordHandlerResult.None); + } + } + public class InvalidHandlerWithContextProvider { // Missing Create method @@ -361,4 +369,255 @@ public void BatchProcessorAttribute_CreateAspectHandler_WithInvalidEventType_Thr // Act & Assert Assert.Throws(() => attribute.CreateAspectHandler(new object[] { "invalid event" })); } + + // Test classes to trigger constructor exceptions + public class FailingBatchProcessor : IBatchProcessor + { + public FailingBatchProcessor() + { + throw new InvalidOperationException("Constructor failed"); + } + + public ProcessingResult ProcessingResult => throw new NotImplementedException(); + public Task> ProcessAsync(SQSEvent @event, IRecordHandler recordHandler, ProcessingOptions processingOptions) => throw new NotImplementedException(); + public Task> ProcessAsync(SQSEvent @event, IRecordHandler recordHandler) => throw new NotImplementedException(); + public Task> ProcessAsync(SQSEvent @event, IRecordHandler recordHandler, CancellationToken cancellationToken) => throw new NotImplementedException(); + } + + public class FailingBatchProcessorProvider : IBatchProcessorProvider + { + public FailingBatchProcessorProvider() + { + throw new InvalidOperationException("Provider constructor failed"); + } + + public IBatchProcessor Create() => throw new NotImplementedException(); + } + + public class FailingRecordHandler : IRecordHandler + { + public FailingRecordHandler() + { + throw new InvalidOperationException("Handler constructor failed"); + } + + public Task HandleAsync(SQSEvent.SQSMessage record, CancellationToken cancellationToken) => throw new NotImplementedException(); + } + + public class FailingRecordHandlerProvider : IRecordHandlerProvider + { + public FailingRecordHandlerProvider() + { + throw new InvalidOperationException("Handler provider constructor failed"); + } + + public IRecordHandler Create() => throw new NotImplementedException(); + } + + public class FailingTypedHandler : ITypedRecordHandler + { + public FailingTypedHandler() + { + throw new InvalidOperationException("Typed handler constructor failed"); + } + + public Task HandleAsync(TestData data, CancellationToken cancellationToken) => throw new NotImplementedException(); + } + + public class FailingTypedHandlerWithContext : ITypedRecordHandlerWithContext + { + public FailingTypedHandlerWithContext() + { + throw new InvalidOperationException("Typed handler with context constructor failed"); + } + + public Task HandleAsync(TestData data, ILambdaContext context, CancellationToken cancellationToken) => throw new NotImplementedException(); + } + + public class FailingJsonSerializerContext : JsonSerializerContext + { + public FailingJsonSerializerContext() : base(null) + { + throw new InvalidOperationException("JsonSerializerContext constructor failed"); + } + + protected override System.Text.Json.JsonSerializerOptions GeneratedSerializerOptions => throw new NotImplementedException(); + public override System.Text.Json.Serialization.Metadata.JsonTypeInfo GetTypeInfo(Type type) => throw new NotImplementedException(); + } + + public class FailingTypedHandlerProvider + { + public FailingTypedHandlerProvider() + { + throw new InvalidOperationException("Typed handler provider constructor failed"); + } + + public ITypedRecordHandler Create() => throw new NotImplementedException(); + } + + public class FailingTypedHandlerWithContextProvider + { + public FailingTypedHandlerWithContextProvider() + { + throw new InvalidOperationException("Typed handler with context provider constructor failed"); + } + + public ITypedRecordHandlerWithContext Create() => throw new NotImplementedException(); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingBatchProcessor_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + BatchProcessor = typeof(FailingBatchProcessor), + RecordHandler = typeof(ValidSqsRecordHandler) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of: 'FailingBatchProcessor'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingBatchProcessorProvider_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + BatchProcessorProvider = typeof(FailingBatchProcessorProvider), + RecordHandler = typeof(ValidSqsRecordHandler) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of batch processor using provider: 'FailingBatchProcessorProvider'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingRecordHandler_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + RecordHandler = typeof(FailingRecordHandler) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of: 'FailingRecordHandler'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingRecordHandlerProvider_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + RecordHandlerProvider = typeof(FailingRecordHandlerProvider) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of record handler using provider: 'FailingRecordHandlerProvider'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingTypedHandler_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandler = typeof(FailingTypedHandler) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of: 'FailingTypedHandler'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingTypedHandlerWithContext_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandlerWithContext = typeof(FailingTypedHandlerWithContext) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of: 'FailingTypedHandlerWithContext'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingJsonSerializerContext_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandler = typeof(ValidHandler), + JsonSerializerContext = typeof(FailingJsonSerializerContext) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of JsonSerializerContext: 'FailingJsonSerializerContext'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingTypedHandlerProvider_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandlerProvider = typeof(FailingTypedHandlerProvider) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of typed record handler using provider: 'FailingTypedHandlerProvider'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithFailingTypedHandlerWithContextProvider_ThrowsInvalidOperationException() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + TypedRecordHandlerWithContextProvider = typeof(FailingTypedHandlerWithContextProvider) + }; + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("Error during creation of typed record handler with context using provider: 'FailingTypedHandlerWithContextProvider'", ex.Message); + } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithErrorHandlingPolicyOverride_UsesOverride() + { + // Arrange + var attribute = new BatchProcessorAttribute + { + RecordHandler = typeof(ValidSqsRecordHandler), + ErrorHandlingPolicy = BatchProcessorErrorHandlingPolicy.StopOnFirstBatchItemFailure + }; + var sqsEvent = new SQSEvent(); + + // Act + var handler = attribute.CreateAspectHandler(new object[] { sqsEvent }); + + // Assert - Should not throw, policy should be applied + Assert.NotNull(handler); + } } \ No newline at end of file From 1f97ce3abcf2c3976d106e9c71fdd48728375d40 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:27:43 +0100 Subject: [PATCH 7/9] more tests --- .../BatchProcessorAttributeValidationTests.cs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs index 303e73ef0..df1d9b791 100644 --- a/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs +++ b/libraries/tests/AWS.Lambda.Powertools.BatchProcessing.Tests/BatchProcessorAttributeValidationTests.cs @@ -620,4 +620,44 @@ public void BatchProcessorAttribute_CreateAspectHandler_WithErrorHandlingPolicyO // Assert - Should not throw, policy should be applied Assert.NotNull(handler); } + + [Fact] + public void BatchProcessorAttribute_CreateAspectHandler_WithNoHandlerProvided_ThrowsInvalidOperationException() + { + // Arrange - No handlers configured at all (neither traditional nor typed) + var attribute = new BatchProcessorAttribute(); + var sqsEvent = new SQSEvent(); + + // Act & Assert + var ex = Assert.Throws(() => attribute.CreateAspectHandler(new object[] { sqsEvent })); + Assert.Contains("A record handler, record handler provider, typed record handler, or typed record handler provider is required", ex.Message); + } + + // Test class that uses reflection to directly test the CreateTypedBatchProcessingAspectHandler method + [Fact] + public void BatchProcessorAttribute_CreateTypedBatchProcessingAspectHandler_WithNoTypedHandlerProvided_ThrowsInvalidOperationException() + { + // Arrange - Create an attribute and use reflection to call the private method directly + var attribute = new BatchProcessorAttribute(); + var sqsEvent = new SQSEvent(); + + // Use reflection to get the private method + var method = typeof(BatchProcessorAttribute).GetMethod("CreateTypedBatchProcessingAspectHandler", + System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance); + + // Create the generic method for SQSEvent and SQSMessage + var genericMethod = method.MakeGenericMethod(typeof(SQSEvent), typeof(SQSEvent.SQSMessage)); + + // Create a lambda that returns a mock typed batch processor + Func> mockProvider = () => + Substitute.For>(); + + // Act & Assert - Call the method with no typed handlers configured + var ex = Assert.Throws(() => + genericMethod.Invoke(attribute, new object[] { mockProvider, new object[] { sqsEvent } })); + + // The actual exception is wrapped in TargetInvocationException + Assert.IsType(ex.InnerException); + Assert.Contains("A typed record handler or typed record handler provider is required", ex.InnerException.Message); + } } \ No newline at end of file From 51ab6d116a1e88f57f6c3756777e2430942bd84a Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:32:30 +0100 Subject: [PATCH 8/9] fix sonar warnings --- .../TypedDynamoDbStreamBatchProcessor.cs | 16 ++++++++++++---- .../Kinesis/TypedKinesisEventBatchProcessor.cs | 16 ++++++++++++---- .../Sqs/TypedSqsBatchProcessor.cs | 16 ++++++++++++---- 3 files changed, 36 insertions(+), 12 deletions(-) diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs index 1282e5751..c08136be5 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/DynamoDb/TypedDynamoDbStreamBatchProcessor.cs @@ -50,16 +50,24 @@ public TypedDynamoDbStreamBatchProcessor( { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? DynamoDbRecordDataExtractor.Instance; - _typedInstance = this; } /// - /// Initializes a new instance of the TypedDynamoDbStreamBatchProcessor class with default services. + /// Initializes a new instance of the TypedDynamoDbStreamBatchProcessor class with custom deserialization service. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + public TypedDynamoDbStreamBatchProcessor(IDeserializationService deserializationService) + : this(PowertoolsConfigurations.Instance, deserializationService, null) + { + } + + /// + /// Initializes a new instance of the TypedDynamoDbStreamBatchProcessor class with custom services. /// /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses DynamoDbRecordDataExtractor.Instance. - public TypedDynamoDbStreamBatchProcessor(IDeserializationService deserializationService = null, - IRecordDataExtractor recordDataExtractor = null) + public TypedDynamoDbStreamBatchProcessor(IDeserializationService deserializationService, + IRecordDataExtractor recordDataExtractor) : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) { } diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs index 1eeacab06..ee790f278 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Kinesis/TypedKinesisEventBatchProcessor.cs @@ -50,16 +50,24 @@ public TypedKinesisEventBatchProcessor( { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? KinesisRecordDataExtractor.Instance; - _typedInstance = this; } /// - /// Initializes a new instance of the TypedKinesisEventBatchProcessor class with default services. + /// Initializes a new instance of the TypedKinesisEventBatchProcessor class with custom deserialization service. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + public TypedKinesisEventBatchProcessor(IDeserializationService deserializationService) + : this(PowertoolsConfigurations.Instance, deserializationService, null) + { + } + + /// + /// Initializes a new instance of the TypedKinesisEventBatchProcessor class with custom services. /// /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses KinesisRecordDataExtractor.Instance. - public TypedKinesisEventBatchProcessor(IDeserializationService deserializationService = null, - IRecordDataExtractor recordDataExtractor = null) + public TypedKinesisEventBatchProcessor(IDeserializationService deserializationService, + IRecordDataExtractor recordDataExtractor) : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) { } diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs index ce97e00d9..95e42ffe5 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/Sqs/TypedSqsBatchProcessor.cs @@ -50,16 +50,24 @@ public TypedSqsBatchProcessor( { _deserializationService = deserializationService ?? JsonDeserializationService.Instance; _recordDataExtractor = recordDataExtractor ?? SqsRecordDataExtractor.Instance; - _typedInstance = this; } /// - /// Initializes a new instance of the TypedSqsBatchProcessor class with default services. + /// Initializes a new instance of the TypedSqsBatchProcessor class with custom deserialization service. + /// + /// The deserialization service. If null, uses JsonDeserializationService.Instance. + public TypedSqsBatchProcessor(IDeserializationService deserializationService) + : this(PowertoolsConfigurations.Instance, deserializationService, null) + { + } + + /// + /// Initializes a new instance of the TypedSqsBatchProcessor class with custom services. /// /// The deserialization service. If null, uses JsonDeserializationService.Instance. /// The record data extractor. If null, uses SqsRecordDataExtractor.Instance. - public TypedSqsBatchProcessor(IDeserializationService deserializationService = null, - IRecordDataExtractor recordDataExtractor = null) + public TypedSqsBatchProcessor(IDeserializationService deserializationService, + IRecordDataExtractor recordDataExtractor) : this(PowertoolsConfigurations.Instance, deserializationService, recordDataExtractor) { } From c8a32822c48244026f3f4958709995e63160bf10 Mon Sep 17 00:00:00 2001 From: Henrique Graca <999396+hjgraca@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:36:22 +0100 Subject: [PATCH 9/9] more sonar warning fix --- .../BatchProcessorAttribute.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs index 847562d38..5ff851411 100644 --- a/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs +++ b/libraries/src/AWS.Lambda.Powertools.BatchProcessing/BatchProcessorAttribute.cs @@ -308,7 +308,7 @@ internal IBatchProcessingAspectHandler CreateAspectHandler(IReadOnlyList BatchEventType.DynamoDbStream => CreateTypedBatchProcessingAspectHandler(() => TypedDynamoDbStreamBatchProcessor.TypedInstance, args), BatchEventType.KinesisDataStream => CreateTypedBatchProcessingAspectHandler(() => TypedKinesisEventBatchProcessor.TypedInstance, args), BatchEventType.Sqs => CreateTypedBatchProcessingAspectHandler(() => TypedSqsBatchProcessor.TypedInstance, args), - _ => throw new ArgumentOutOfRangeException(nameof(eventType), eventType, "Unsupported event type.") + _ => throw new ArgumentOutOfRangeException($"{eventType}", eventType, "Unsupported event type.") }; } @@ -318,7 +318,7 @@ internal IBatchProcessingAspectHandler CreateAspectHandler(IReadOnlyList BatchEventType.DynamoDbStream => CreateBatchProcessingAspectHandler(() => DynamoDbStreamBatchProcessor.Instance), BatchEventType.KinesisDataStream => CreateBatchProcessingAspectHandler(() => KinesisEventBatchProcessor.Instance), BatchEventType.Sqs => CreateBatchProcessingAspectHandler(() => SqsBatchProcessor.Instance), - _ => throw new ArgumentOutOfRangeException(nameof(eventType), eventType, "Unsupported event type.") + _ => throw new ArgumentOutOfRangeException($"{eventType}", eventType, "Unsupported event type.") }; } @@ -533,7 +533,7 @@ private TypedBatchProcessingAspectHandler CreateTypedBatchProce return new TypedBatchProcessingAspectHandler(typedBatchProcessor, typedHandler, hasContext, deserializationOptions, processingOptions); } - private BatchEventType GetEventTypeFromArgs(IReadOnlyList args) + private static BatchEventType GetEventTypeFromArgs(IReadOnlyList args) { if (args == null || args.Count == 0 || !EventTypes.TryGetValue(args[0].GetType(), out var eventType)) {