diff --git a/apps/ruststack-server/src/events_bridge.rs b/apps/ruststack-server/src/events_bridge.rs index 79f984d..43af4df 100644 --- a/apps/ruststack-server/src/events_bridge.rs +++ b/apps/ruststack-server/src/events_bridge.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use async_trait::async_trait; - use ruststack_events_core::delivery::{DeliveryError, TargetDelivery}; use ruststack_sqs_core::provider::RustStackSqs; use ruststack_sqs_model::input::SendMessageInput; diff --git a/apps/ruststack-server/src/gateway.rs b/apps/ruststack-server/src/gateway.rs index 43abf14..6f5bf71 100644 --- a/apps/ruststack-server/src/gateway.rs +++ b/apps/ruststack-server/src/gateway.rs @@ -8,13 +8,9 @@ //! intercepted at the gateway level and return a combined status for all //! registered services. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; -use hyper::body::Incoming; -use hyper::service::Service; +use hyper::{body::Incoming, service::Service}; use crate::service::{GatewayBody, ServiceRouter, gateway_body_from_string}; diff --git a/apps/ruststack-server/src/handler.rs b/apps/ruststack-server/src/handler.rs index b8dd18a..23350c1 100644 --- a/apps/ruststack-server/src/handler.rs +++ b/apps/ruststack-server/src/handler.rs @@ -5,22 +5,20 @@ //! dispatched to the corresponding `handle_*` method on [`RustStackS3`], with request //! deserialization via [`FromS3Request`] and response serialization via [`IntoS3Response`]. -use std::collections::HashMap; -use std::future::Future; -use std::pin::Pin; +use std::{collections::HashMap, future::Future, pin::Pin}; use bytes::Bytes; use ruststack_s3_core::RustStackS3; -use ruststack_s3_http::body::S3ResponseBody; -use ruststack_s3_http::dispatch::S3Handler; -use ruststack_s3_http::multipart; -use ruststack_s3_http::request::FromS3Request; -use ruststack_s3_http::response::IntoS3Response; -use ruststack_s3_http::router::RoutingContext; -use ruststack_s3_model::S3Operation; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; -use ruststack_s3_model::input::PutObjectInput; -use ruststack_s3_model::request::StreamingBlob; +use ruststack_s3_http::{ + body::S3ResponseBody, dispatch::S3Handler, multipart, request::FromS3Request, + response::IntoS3Response, router::RoutingContext, +}; +use ruststack_s3_model::{ + S3Operation, + error::{S3Error, S3ErrorCode}, + input::PutObjectInput, + request::StreamingBlob, +}; /// Wrapper that implements [`S3Handler`] by delegating to [`RustStackS3`] handler methods. #[derive(Debug, Clone)] @@ -623,13 +621,11 @@ async fn dispatch_post_object( let etag = output.e_tag.unwrap_or_default(); let location = format!("/{bucket_name}/{key}"); let xml = format!( - "\n\ - \n\ - {location}\n\ - {bucket_name}\n\ - {key}\n\ - {etag}\n\ - " + "\n\n{location}\\ + \ + n{bucket_name}\n{key}\n{etag}\n" ); http::Response::builder() .status(success_status) diff --git a/apps/ruststack-server/src/main.rs b/apps/ruststack-server/src/main.rs index d41def0..71a5452 100644 --- a/apps/ruststack-server/src/main.rs +++ b/apps/ruststack-server/src/main.rs @@ -30,16 +30,29 @@ mod service; #[cfg(feature = "sns")] mod sns_bridge; -use std::net::SocketAddr; -use std::sync::Arc; +use std::{net::SocketAddr, sync::Arc}; use anyhow::{Context, Result}; -use hyper_util::rt::{TokioExecutor, TokioIo}; -use hyper_util::server::conn::auto::Builder as HttpConnBuilder; -use tokio::net::TcpListener; -use tracing::{info, warn}; -use tracing_subscriber::EnvFilter; - +use hyper_util::{ + rt::{TokioExecutor, TokioIo}, + server::conn::auto::Builder as HttpConnBuilder, +}; +#[cfg(feature = "apigatewayv2")] +use ruststack_apigatewayv2_core::config::ApiGatewayV2Config; +#[cfg(feature = "apigatewayv2")] +use ruststack_apigatewayv2_core::handler::RustStackApiGatewayV2Handler; +#[cfg(feature = "apigatewayv2")] +use ruststack_apigatewayv2_core::provider::RustStackApiGatewayV2; +#[cfg(feature = "apigatewayv2")] +use ruststack_apigatewayv2_http::service::{ApiGatewayV2HttpConfig, ApiGatewayV2HttpService}; +#[cfg(feature = "cloudwatch")] +use ruststack_cloudwatch_core::config::CloudWatchConfig; +#[cfg(feature = "cloudwatch")] +use ruststack_cloudwatch_core::handler::RustStackCloudWatchHandler; +#[cfg(feature = "cloudwatch")] +use ruststack_cloudwatch_core::provider::RustStackCloudWatch; +#[cfg(feature = "cloudwatch")] +use ruststack_cloudwatch_http::service::{CloudWatchHttpConfig, CloudWatchHttpService}; #[cfg(feature = "dynamodb")] use ruststack_dynamodb_core::config::DynamoDBConfig; #[cfg(feature = "dynamodb")] @@ -48,47 +61,22 @@ use ruststack_dynamodb_core::handler::RustStackDynamoDBHandler; use ruststack_dynamodb_core::provider::RustStackDynamoDB; #[cfg(feature = "dynamodb")] use ruststack_dynamodb_http::service::{DynamoDBHttpConfig, DynamoDBHttpService}; - -#[cfg(feature = "sqs")] -use ruststack_sqs_core::config::SqsConfig; -#[cfg(feature = "sqs")] -use ruststack_sqs_core::handler::RustStackSqsHandler; -#[cfg(feature = "sqs")] -use ruststack_sqs_core::provider::RustStackSqs; -#[cfg(feature = "sqs")] -use ruststack_sqs_http::service::{SqsHttpConfig, SqsHttpService}; - -#[cfg(feature = "ssm")] -use ruststack_ssm_core::config::SsmConfig; -#[cfg(feature = "ssm")] -use ruststack_ssm_core::handler::RustStackSsmHandler; -#[cfg(feature = "ssm")] -use ruststack_ssm_core::provider::RustStackSsm; -#[cfg(feature = "ssm")] -use ruststack_ssm_http::service::{SsmHttpConfig, SsmHttpService}; - -#[cfg(feature = "sns")] -use crate::sns_bridge::RustStackSqsPublisher; -#[cfg(feature = "sns")] -use ruststack_sns_core::config::SnsConfig; -#[cfg(feature = "sns")] -use ruststack_sns_core::handler::RustStackSnsHandler; -#[cfg(feature = "sns")] -use ruststack_sns_core::provider::RustStackSns; -#[cfg(feature = "sns")] -use ruststack_sns_http::service::{SnsHttpConfig, SnsHttpService}; - -#[cfg(feature = "lambda")] -use ruststack_lambda_core::config::LambdaConfig; -#[cfg(feature = "lambda")] -use ruststack_lambda_core::handler::RustStackLambdaHandler; -#[cfg(feature = "lambda")] -use ruststack_lambda_core::provider::RustStackLambda; -#[cfg(feature = "lambda")] -use ruststack_lambda_http::service::{LambdaHttpConfig, LambdaHttpService}; - -#[cfg(feature = "events")] -use crate::events_bridge::LocalTargetDelivery; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_core::config::DynamoDBStreamsConfig; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_core::emitter::{ + DynamoDBStreamEmitter, DynamoDBStreamLifecycleManager, +}; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_core::handler::RustStackDynamoDBStreamsHandler; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_core::provider::RustStackDynamoDBStreams; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_core::storage::StreamStore; +#[cfg(feature = "dynamodbstreams")] +use ruststack_dynamodbstreams_http::service::{ + DynamoDBStreamsHttpConfig, DynamoDBStreamsHttpService, +}; #[cfg(feature = "events")] use ruststack_events_core::config::EventsConfig; #[cfg(feature = "events")] @@ -97,25 +85,16 @@ use ruststack_events_core::handler::RustStackEventsHandler; use ruststack_events_core::provider::RustStackEvents; #[cfg(feature = "events")] use ruststack_events_http::service::{EventsHttpConfig, EventsHttpService}; - -#[cfg(feature = "logs")] -use ruststack_logs_core::config::LogsConfig; -#[cfg(feature = "logs")] -use ruststack_logs_core::handler::RustStackLogsHandler; -#[cfg(feature = "logs")] -use ruststack_logs_core::provider::RustStackLogs; -#[cfg(feature = "logs")] -use ruststack_logs_http::service::{LogsHttpConfig, LogsHttpService}; - -#[cfg(feature = "kms")] -use ruststack_kms_core::config::KmsConfig; -#[cfg(feature = "kms")] -use ruststack_kms_core::handler::RustStackKmsHandler; -#[cfg(feature = "kms")] -use ruststack_kms_core::provider::RustStackKms; -#[cfg(feature = "kms")] -use ruststack_kms_http::service::{KmsHttpConfig, KmsHttpService}; - +#[cfg(feature = "iam")] +use ruststack_iam_core::config::IamConfig; +#[cfg(feature = "iam")] +use ruststack_iam_core::handler::RustStackIamHandler; +#[cfg(feature = "iam")] +use ruststack_iam_core::provider::RustStackIam; +#[cfg(feature = "iam")] +use ruststack_iam_core::store::IamStore; +#[cfg(feature = "iam")] +use ruststack_iam_http::service::{IamHttpConfig, IamHttpService}; #[cfg(feature = "kinesis")] use ruststack_kinesis_core::config::KinesisConfig; #[cfg(feature = "kinesis")] @@ -124,7 +103,34 @@ use ruststack_kinesis_core::handler::RustStackKinesisHandler; use ruststack_kinesis_core::provider::RustStackKinesis; #[cfg(feature = "kinesis")] use ruststack_kinesis_http::service::{KinesisHttpConfig, KinesisHttpService}; - +#[cfg(feature = "kms")] +use ruststack_kms_core::config::KmsConfig; +#[cfg(feature = "kms")] +use ruststack_kms_core::handler::RustStackKmsHandler; +#[cfg(feature = "kms")] +use ruststack_kms_core::provider::RustStackKms; +#[cfg(feature = "kms")] +use ruststack_kms_http::service::{KmsHttpConfig, KmsHttpService}; +#[cfg(feature = "lambda")] +use ruststack_lambda_core::config::LambdaConfig; +#[cfg(feature = "lambda")] +use ruststack_lambda_core::handler::RustStackLambdaHandler; +#[cfg(feature = "lambda")] +use ruststack_lambda_core::provider::RustStackLambda; +#[cfg(feature = "lambda")] +use ruststack_lambda_http::service::{LambdaHttpConfig, LambdaHttpService}; +#[cfg(feature = "logs")] +use ruststack_logs_core::config::LogsConfig; +#[cfg(feature = "logs")] +use ruststack_logs_core::handler::RustStackLogsHandler; +#[cfg(feature = "logs")] +use ruststack_logs_core::provider::RustStackLogs; +#[cfg(feature = "logs")] +use ruststack_logs_http::service::{LogsHttpConfig, LogsHttpService}; +#[cfg(feature = "s3")] +use ruststack_s3_core::{RustStackS3, S3Config}; +#[cfg(feature = "s3")] +use ruststack_s3_http::service::{S3HttpConfig, S3HttpService}; #[cfg(feature = "secretsmanager")] use ruststack_secretsmanager_core::config::SecretsManagerConfig; #[cfg(feature = "secretsmanager")] @@ -133,7 +139,6 @@ use ruststack_secretsmanager_core::handler::RustStackSecretsManagerHandler; use ruststack_secretsmanager_core::provider::RustStackSecretsManager; #[cfg(feature = "secretsmanager")] use ruststack_secretsmanager_http::service::{SecretsManagerHttpConfig, SecretsManagerHttpService}; - #[cfg(feature = "ses")] use ruststack_ses_core::config::SesConfig; #[cfg(feature = "ses")] @@ -144,53 +149,30 @@ use ruststack_ses_core::provider::RustStackSes; use ruststack_ses_http::service::{SesHttpConfig, SesHttpService}; #[cfg(feature = "ses")] use ruststack_ses_http::v2::SesV2HttpService; - -#[cfg(feature = "apigatewayv2")] -use ruststack_apigatewayv2_core::config::ApiGatewayV2Config; -#[cfg(feature = "apigatewayv2")] -use ruststack_apigatewayv2_core::handler::RustStackApiGatewayV2Handler; -#[cfg(feature = "apigatewayv2")] -use ruststack_apigatewayv2_core::provider::RustStackApiGatewayV2; -#[cfg(feature = "apigatewayv2")] -use ruststack_apigatewayv2_http::service::{ApiGatewayV2HttpConfig, ApiGatewayV2HttpService}; - -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_core::config::DynamoDBStreamsConfig; -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_core::emitter::{ - DynamoDBStreamEmitter, DynamoDBStreamLifecycleManager, -}; -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_core::handler::RustStackDynamoDBStreamsHandler; -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_core::provider::RustStackDynamoDBStreams; -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_core::storage::StreamStore; -#[cfg(feature = "dynamodbstreams")] -use ruststack_dynamodbstreams_http::service::{ - DynamoDBStreamsHttpConfig, DynamoDBStreamsHttpService, -}; - -#[cfg(feature = "cloudwatch")] -use ruststack_cloudwatch_core::config::CloudWatchConfig; -#[cfg(feature = "cloudwatch")] -use ruststack_cloudwatch_core::handler::RustStackCloudWatchHandler; -#[cfg(feature = "cloudwatch")] -use ruststack_cloudwatch_core::provider::RustStackCloudWatch; -#[cfg(feature = "cloudwatch")] -use ruststack_cloudwatch_http::service::{CloudWatchHttpConfig, CloudWatchHttpService}; - -#[cfg(feature = "iam")] -use ruststack_iam_core::config::IamConfig; -#[cfg(feature = "iam")] -use ruststack_iam_core::handler::RustStackIamHandler; -#[cfg(feature = "iam")] -use ruststack_iam_core::provider::RustStackIam; -#[cfg(feature = "iam")] -use ruststack_iam_core::store::IamStore; -#[cfg(feature = "iam")] -use ruststack_iam_http::service::{IamHttpConfig, IamHttpService}; - +#[cfg(feature = "sns")] +use ruststack_sns_core::config::SnsConfig; +#[cfg(feature = "sns")] +use ruststack_sns_core::handler::RustStackSnsHandler; +#[cfg(feature = "sns")] +use ruststack_sns_core::provider::RustStackSns; +#[cfg(feature = "sns")] +use ruststack_sns_http::service::{SnsHttpConfig, SnsHttpService}; +#[cfg(feature = "sqs")] +use ruststack_sqs_core::config::SqsConfig; +#[cfg(feature = "sqs")] +use ruststack_sqs_core::handler::RustStackSqsHandler; +#[cfg(feature = "sqs")] +use ruststack_sqs_core::provider::RustStackSqs; +#[cfg(feature = "sqs")] +use ruststack_sqs_http::service::{SqsHttpConfig, SqsHttpService}; +#[cfg(feature = "ssm")] +use ruststack_ssm_core::config::SsmConfig; +#[cfg(feature = "ssm")] +use ruststack_ssm_core::handler::RustStackSsmHandler; +#[cfg(feature = "ssm")] +use ruststack_ssm_core::provider::RustStackSsm; +#[cfg(feature = "ssm")] +use ruststack_ssm_http::service::{SsmHttpConfig, SsmHttpService}; #[cfg(feature = "sts")] use ruststack_sts_core::config::StsConfig; #[cfg(feature = "sts")] @@ -199,14 +181,15 @@ use ruststack_sts_core::handler::RustStackStsHandler; use ruststack_sts_core::provider::RustStackSts; #[cfg(feature = "sts")] use ruststack_sts_http::service::{StsHttpConfig, StsHttpService}; +use tokio::net::TcpListener; +use tracing::{info, warn}; +use tracing_subscriber::EnvFilter; -#[cfg(feature = "s3")] -use ruststack_s3_core::{RustStackS3, S3Config}; -#[cfg(feature = "s3")] -use ruststack_s3_http::service::{S3HttpConfig, S3HttpService}; - -use crate::gateway::GatewayService; -use crate::service::ServiceRouter; +#[cfg(feature = "events")] +use crate::events_bridge::LocalTargetDelivery; +#[cfg(feature = "sns")] +use crate::sns_bridge::RustStackSqsPublisher; +use crate::{gateway::GatewayService, service::ServiceRouter}; /// Server version reported in health check responses. const VERSION: &str = env!("CARGO_PKG_VERSION"); @@ -630,8 +613,10 @@ fn parse_services_value(raw: &str) -> Vec { /// Exits with code 0 if the response is 200 OK and contains at least one /// running service, 1 otherwise. async fn run_health_check(addr: &str) -> Result<()> { - use tokio::io::{AsyncReadExt, AsyncWriteExt}; - use tokio::net::TcpStream; + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, + }; let mut stream = TcpStream::connect(addr) .await @@ -1029,8 +1014,8 @@ async fn main() -> Result<()> { if services.is_empty() { anyhow::bail!( - "no services enabled. Check the SERVICES environment variable \ - and compiled feature flags." + "no services enabled. Check the SERVICES environment variable and compiled feature \ + flags." ); } diff --git a/apps/ruststack-server/src/service.rs b/apps/ruststack-server/src/service.rs index 9926a7e..f719755 100644 --- a/apps/ruststack-server/src/service.rs +++ b/apps/ruststack-server/src/service.rs @@ -6,14 +6,10 @@ //! //! [`GatewayBody`] is a type-erased HTTP response body shared by all services. -use std::convert::Infallible; -use std::future::Future; -use std::io; -use std::pin::Pin; +use std::{convert::Infallible, future::Future, io, pin::Pin}; use bytes::Bytes; -use http_body_util::combinators::BoxBody; -use http_body_util::{BodyExt, Full}; +use http_body_util::{BodyExt, Full, combinators::BoxBody}; use hyper::body::Incoming; /// Type-erased response body used by the gateway. @@ -53,15 +49,11 @@ pub trait ServiceRouter: Send + Sync { #[cfg(feature = "s3")] mod s3_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_s3_http::dispatch::S3Handler; - use ruststack_s3_http::service::S3HttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_s3_http::{dispatch::S3Handler, service::S3HttpService}; use super::{GatewayBody, ServiceRouter}; @@ -113,15 +105,11 @@ pub use s3_router::S3ServiceRouter; #[cfg(feature = "dynamodb")] mod dynamodb_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_dynamodb_http::dispatch::DynamoDBHandler; - use ruststack_dynamodb_http::service::DynamoDBHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_dynamodb_http::{dispatch::DynamoDBHandler, service::DynamoDBHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -174,15 +162,13 @@ pub use dynamodb_router::DynamoDBServiceRouter; #[cfg(feature = "dynamodbstreams")] mod dynamodbstreams_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_dynamodbstreams_http::dispatch::DynamoDBStreamsHandler; - use ruststack_dynamodbstreams_http::service::DynamoDBStreamsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_dynamodbstreams_http::{ + dispatch::DynamoDBStreamsHandler, service::DynamoDBStreamsHttpService, + }; use super::{GatewayBody, ServiceRouter}; @@ -235,15 +221,11 @@ pub use dynamodbstreams_router::DynamoDBStreamsServiceRouter; #[cfg(feature = "sqs")] mod sqs_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_sqs_http::dispatch::SqsHandler; - use ruststack_sqs_http::service::SqsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_sqs_http::{dispatch::SqsHandler, service::SqsHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -296,15 +278,11 @@ pub use sqs_router::SqsServiceRouter; #[cfg(feature = "ssm")] mod ssm_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_ssm_http::dispatch::SsmHandler; - use ruststack_ssm_http::service::SsmHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_ssm_http::{dispatch::SsmHandler, service::SsmHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -357,15 +335,11 @@ pub use ssm_router::SsmServiceRouter; #[cfg(feature = "sns")] mod sns_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_sns_http::dispatch::SnsHandler; - use ruststack_sns_http::service::SnsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_sns_http::{dispatch::SnsHandler, service::SnsHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -448,15 +422,11 @@ pub use sns_router::SnsServiceRouter; #[cfg(feature = "lambda")] mod lambda_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_lambda_http::dispatch::LambdaHandler; - use ruststack_lambda_http::service::LambdaHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_lambda_http::{dispatch::LambdaHandler, service::LambdaHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -491,6 +461,27 @@ mod lambda_router { } } } + // Layer paths (e.g., /2018-10-31/layers/{name}/versions). + if path.contains("/layers") { + if let Some(rest) = path.strip_prefix('/') { + let parts: Vec<&str> = rest.splitn(2, '/').collect(); + if parts.len() == 2 && parts[0].len() == 10 && parts[1].starts_with("layers") { + return true; + } + } + } + // Event source mapping paths (e.g., /2015-03-31/event-source-mappings/). + if path.contains("/event-source-mappings") { + if let Some(rest) = path.strip_prefix('/') { + let parts: Vec<&str> = rest.splitn(2, '/').collect(); + if parts.len() == 2 + && parts[0].len() == 10 + && parts[1].starts_with("event-source-mappings") + { + return true; + } + } + } // Function URL invocation paths. path.starts_with("/lambda-url/") } @@ -543,15 +534,11 @@ pub use lambda_router::LambdaServiceRouter; #[cfg(feature = "events")] mod events_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_events_http::dispatch::EventsHandler; - use ruststack_events_http::service::EventsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_events_http::{dispatch::EventsHandler, service::EventsHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -604,15 +591,11 @@ pub use events_router::EventsServiceRouter; #[cfg(feature = "logs")] mod logs_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_logs_http::dispatch::LogsHandler; - use ruststack_logs_http::service::LogsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_logs_http::{dispatch::LogsHandler, service::LogsHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -665,15 +648,11 @@ pub use logs_router::LogsServiceRouter; #[cfg(feature = "kms")] mod kms_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_kms_http::dispatch::KmsHandler; - use ruststack_kms_http::service::KmsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_kms_http::{dispatch::KmsHandler, service::KmsHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -726,15 +705,11 @@ pub use kms_router::KmsServiceRouter; #[cfg(feature = "kinesis")] mod kinesis_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_kinesis_http::dispatch::KinesisHandler; - use ruststack_kinesis_http::service::KinesisHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_kinesis_http::{dispatch::KinesisHandler, service::KinesisHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -787,15 +762,13 @@ pub use kinesis_router::KinesisServiceRouter; #[cfg(feature = "secretsmanager")] mod secretsmanager_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_secretsmanager_http::dispatch::SecretsManagerHandler; - use ruststack_secretsmanager_http::service::SecretsManagerHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_secretsmanager_http::{ + dispatch::SecretsManagerHandler, service::SecretsManagerHttpService, + }; use super::{GatewayBody, ServiceRouter}; @@ -848,16 +821,11 @@ pub use secretsmanager_router::SecretsManagerServiceRouter; #[cfg(feature = "ses")] mod ses_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_ses_http::dispatch::SesHandler; - use ruststack_ses_http::service::SesHttpService; - use ruststack_ses_http::v2::SesV2HttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_ses_http::{dispatch::SesHandler, service::SesHttpService, v2::SesV2HttpService}; use super::{GatewayBody, ServiceRouter}; @@ -978,7 +946,8 @@ mod ses_router { headers.insert( "authorization", http::HeaderValue::from_static( - "AWS4-HMAC-SHA256 Credential=test/20260319/us-east-1/ses/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=abc123", + "AWS4-HMAC-SHA256 Credential=test/20260319/us-east-1/ses/aws4_request, \ + SignedHeaders=content-type;host;x-amz-date, Signature=abc123", ), ); assert_eq!(extract_sigv4_service(&headers), Some("ses")); @@ -990,7 +959,8 @@ mod ses_router { headers.insert( "authorization", http::HeaderValue::from_static( - "AWS4-HMAC-SHA256 Credential=AKID/20260319/us-east-1/email/aws4_request, SignedHeaders=host, Signature=abc123", + "AWS4-HMAC-SHA256 Credential=AKID/20260319/us-east-1/email/aws4_request, \ + SignedHeaders=host, Signature=abc123", ), ); assert_eq!(extract_sigv4_service(&headers), Some("email")); @@ -1002,7 +972,8 @@ mod ses_router { headers.insert( "authorization", http::HeaderValue::from_static( - "AWS4-HMAC-SHA256 Credential=AKID/20260319/us-east-1/sns/aws4_request, SignedHeaders=host, Signature=abc123", + "AWS4-HMAC-SHA256 Credential=AKID/20260319/us-east-1/sns/aws4_request, \ + SignedHeaders=host, Signature=abc123", ), ); assert_eq!(extract_sigv4_service(&headers), Some("sns")); @@ -1035,19 +1006,18 @@ pub use ses_router::SesServiceRouter; #[cfg(feature = "apigatewayv2")] mod apigatewayv2_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; - use std::sync::Arc; + use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_apigatewayv2_core::execution::{handle_execution, parse_execution_path}; - use ruststack_apigatewayv2_core::provider::RustStackApiGatewayV2; - use ruststack_apigatewayv2_http::dispatch::ApiGatewayV2Handler; - use ruststack_apigatewayv2_http::service::ApiGatewayV2HttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_apigatewayv2_core::{ + execution::{handle_execution, parse_execution_path}, + provider::RustStackApiGatewayV2, + }; + use ruststack_apigatewayv2_http::{ + dispatch::ApiGatewayV2Handler, service::ApiGatewayV2HttpService, + }; use super::{GatewayBody, ServiceRouter, gateway_body_from_string}; @@ -1223,15 +1193,11 @@ pub use apigatewayv2_router::{ApiGatewayV2ExecutionRouter, ApiGatewayV2Managemen #[cfg(feature = "cloudwatch")] mod cloudwatch_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_cloudwatch_http::dispatch::CloudWatchHandler; - use ruststack_cloudwatch_http::service::CloudWatchHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_cloudwatch_http::{dispatch::CloudWatchHandler, service::CloudWatchHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -1276,8 +1242,8 @@ mod cloudwatch_router { /// CloudWatch Metrics matches in three ways: /// 1. awsQuery: form-urlencoded POST signed with `monitoring` SigV4 service. /// 2. rpcv2Cbor path: POST to `/service/GraniteServiceVersion20100801/...`. - /// 3. rpcv2Cbor header: POST with `smithy-protocol: rpc-v2-cbor` signed - /// with `monitoring` SigV4 service (AWS SDK v1.108+). + /// 3. rpcv2Cbor header: POST with `smithy-protocol: rpc-v2-cbor` signed with `monitoring` + /// SigV4 service (AWS SDK v1.108+). fn matches(&self, req: &http::Request) -> bool { if *req.method() != http::Method::POST { return false; @@ -1347,15 +1313,11 @@ pub use cloudwatch_router::CloudWatchServiceRouter; #[cfg(feature = "iam")] mod iam_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_iam_http::dispatch::IamHandler; - use ruststack_iam_http::service::IamHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_iam_http::{dispatch::IamHandler, service::IamHttpService}; use super::{GatewayBody, ServiceRouter}; @@ -1437,15 +1399,11 @@ pub use iam_router::IamServiceRouter; #[cfg(feature = "sts")] mod sts_router { - use std::convert::Infallible; - use std::future::Future; - use std::pin::Pin; + use std::{convert::Infallible, future::Future, pin::Pin}; use http_body_util::BodyExt; - use hyper::body::Incoming; - use hyper::service::Service; - use ruststack_sts_http::dispatch::StsHandler; - use ruststack_sts_http::service::StsHttpService; + use hyper::{body::Incoming, service::Service}; + use ruststack_sts_http::{dispatch::StsHandler, service::StsHttpService}; use super::{GatewayBody, ServiceRouter}; diff --git a/apps/ruststack-server/src/sns_bridge.rs b/apps/ruststack-server/src/sns_bridge.rs index 8e7db93..c34bb9e 100644 --- a/apps/ruststack-server/src/sns_bridge.rs +++ b/apps/ruststack-server/src/sns_bridge.rs @@ -7,9 +7,10 @@ use std::sync::Arc; use async_trait::async_trait; - -use ruststack_sns_core::config::SnsConfig; -use ruststack_sns_core::publisher::{DeliveryError, SqsPublisher}; +use ruststack_sns_core::{ + config::SnsConfig, + publisher::{DeliveryError, SqsPublisher}, +}; use ruststack_sqs_core::provider::RustStackSqs; use ruststack_sqs_model::input::SendMessageInput; diff --git a/crates/ruststack-apigatewayv2-core/src/execution/http_proxy.rs b/crates/ruststack-apigatewayv2-core/src/execution/http_proxy.rs index 03278bb..0b7c563 100644 --- a/crates/ruststack-apigatewayv2-core/src/execution/http_proxy.rs +++ b/crates/ruststack-apigatewayv2-core/src/execution/http_proxy.rs @@ -4,9 +4,9 @@ use bytes::Bytes; -use crate::error::ApiGatewayV2ServiceError; -use crate::provider::RustStackApiGatewayV2; -use crate::storage::IntegrationRecord; +use crate::{ + error::ApiGatewayV2ServiceError, provider::RustStackApiGatewayV2, storage::IntegrationRecord, +}; /// Handle an HTTP proxy integration. /// diff --git a/crates/ruststack-apigatewayv2-core/src/execution/lambda_proxy.rs b/crates/ruststack-apigatewayv2-core/src/execution/lambda_proxy.rs index 34c9365..97f41f8 100644 --- a/crates/ruststack-apigatewayv2-core/src/execution/lambda_proxy.rs +++ b/crates/ruststack-apigatewayv2-core/src/execution/lambda_proxy.rs @@ -8,11 +8,10 @@ use std::collections::HashMap; use bytes::Bytes; use serde::Deserialize; -use crate::error::ApiGatewayV2ServiceError; -use crate::provider::RustStackApiGatewayV2; -use crate::storage::IntegrationRecord; - use super::event::build_lambda_event; +use crate::{ + error::ApiGatewayV2ServiceError, provider::RustStackApiGatewayV2, storage::IntegrationRecord, +}; /// Lambda function response (payload format version 2.0). #[derive(Debug, Deserialize)] diff --git a/crates/ruststack-apigatewayv2-core/src/execution/mock_integration.rs b/crates/ruststack-apigatewayv2-core/src/execution/mock_integration.rs index 0d152bd..30a1b7a 100644 --- a/crates/ruststack-apigatewayv2-core/src/execution/mock_integration.rs +++ b/crates/ruststack-apigatewayv2-core/src/execution/mock_integration.rs @@ -4,8 +4,7 @@ use bytes::Bytes; -use crate::error::ApiGatewayV2ServiceError; -use crate::storage::IntegrationRecord; +use crate::{error::ApiGatewayV2ServiceError, storage::IntegrationRecord}; /// Handle a mock integration. /// diff --git a/crates/ruststack-apigatewayv2-core/src/execution/mod.rs b/crates/ruststack-apigatewayv2-core/src/execution/mod.rs index eedb499..1b2e1b3 100644 --- a/crates/ruststack-apigatewayv2-core/src/execution/mod.rs +++ b/crates/ruststack-apigatewayv2-core/src/execution/mod.rs @@ -13,8 +13,7 @@ pub mod router; use bytes::Bytes; use ruststack_apigatewayv2_model::types::IntegrationType; -use crate::error::ApiGatewayV2ServiceError; -use crate::provider::RustStackApiGatewayV2; +use crate::{error::ApiGatewayV2ServiceError, provider::RustStackApiGatewayV2}; /// Target for an API execution request. #[derive(Debug)] diff --git a/crates/ruststack-apigatewayv2-core/src/handler.rs b/crates/ruststack-apigatewayv2-core/src/handler.rs index 216784b..bc83400 100644 --- a/crates/ruststack-apigatewayv2-core/src/handler.rs +++ b/crates/ruststack-apigatewayv2-core/src/handler.rs @@ -6,20 +6,18 @@ //! Uses manual `Pin>` return types because the `ApiGatewayV2Handler` //! trait requires object safety for `Arc`. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_apigatewayv2_http::body::ApiGatewayV2ResponseBody; -use ruststack_apigatewayv2_http::dispatch::ApiGatewayV2Handler; -use ruststack_apigatewayv2_http::response::{empty_response, json_response}; -use ruststack_apigatewayv2_http::router::PathParams; -use ruststack_apigatewayv2_model::error::ApiGatewayV2Error; +use ruststack_apigatewayv2_http::{ + body::ApiGatewayV2ResponseBody, + dispatch::ApiGatewayV2Handler, + response::{empty_response, json_response}, + router::PathParams, +}; #[allow(clippy::wildcard_imports)] use ruststack_apigatewayv2_model::input::*; -use ruststack_apigatewayv2_model::operations::ApiGatewayV2Operation; +use ruststack_apigatewayv2_model::{error::ApiGatewayV2Error, operations::ApiGatewayV2Operation}; use crate::provider::RustStackApiGatewayV2; diff --git a/crates/ruststack-apigatewayv2-core/src/provider.rs b/crates/ruststack-apigatewayv2-core/src/provider.rs index 1f34454..4dc53be 100644 --- a/crates/ruststack-apigatewayv2-core/src/provider.rs +++ b/crates/ruststack-apigatewayv2-core/src/provider.rs @@ -3,12 +3,9 @@ //! Implements all 56 API Gateway v2 operations, maintaining internal storage //! and converting between model input/output types and internal records. -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use chrono::Utc; -use tracing::info; - #[allow(clippy::wildcard_imports)] use ruststack_apigatewayv2_model::input::*; #[allow(clippy::wildcard_imports)] @@ -18,13 +15,16 @@ use ruststack_apigatewayv2_model::types::{ DeploymentStatus, DomainName, Integration, Model, MutualTlsAuthentication, Route, RouteResponse, Stage, TlsConfig, VpcLink, VpcLinkStatus, VpcLinkVersion, }; +use tracing::info; -use crate::config::ApiGatewayV2Config; -use crate::error::ApiGatewayV2ServiceError; -use crate::storage::{ - ApiMappingRecord, ApiRecord, ApiStore, AuthorizerRecord, DeploymentRecord, DomainNameRecord, - IntegrationRecord, ModelRecord, RouteRecord, RouteResponseRecord, StageRecord, VpcLinkRecord, - generate_id, +use crate::{ + config::ApiGatewayV2Config, + error::ApiGatewayV2ServiceError, + storage::{ + ApiMappingRecord, ApiRecord, ApiStore, AuthorizerRecord, DeploymentRecord, + DomainNameRecord, IntegrationRecord, ModelRecord, RouteRecord, RouteResponseRecord, + StageRecord, VpcLinkRecord, generate_id, + }, }; /// Main API Gateway v2 provider. Owns resource storage and configuration. diff --git a/crates/ruststack-apigatewayv2-core/src/storage.rs b/crates/ruststack-apigatewayv2-core/src/storage.rs index 06bae7a..b35cef5 100644 --- a/crates/ruststack-apigatewayv2-core/src/storage.rs +++ b/crates/ruststack-apigatewayv2-core/src/storage.rs @@ -9,7 +9,6 @@ use std::collections::HashMap; use chrono::{DateTime, Utc}; use dashmap::DashMap; use rand::Rng; - use ruststack_apigatewayv2_model::types::{ AccessLogSettings, AuthorizationType, AuthorizerType, ConnectionType, ContentHandlingStrategy, Cors, DeploymentStatus, DomainNameConfiguration, IntegrationType, IpAddressType, diff --git a/crates/ruststack-apigatewayv2-http/src/body.rs b/crates/ruststack-apigatewayv2-http/src/body.rs index 82bacfc..313ebac 100644 --- a/crates/ruststack-apigatewayv2-http/src/body.rs +++ b/crates/ruststack-apigatewayv2-http/src/body.rs @@ -1,7 +1,9 @@ //! API Gateway v2 HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-apigatewayv2-http/src/dispatch.rs b/crates/ruststack-apigatewayv2-http/src/dispatch.rs index 23534ad..edc9e04 100644 --- a/crates/ruststack-apigatewayv2-http/src/dispatch.rs +++ b/crates/ruststack-apigatewayv2-http/src/dispatch.rs @@ -3,16 +3,12 @@ //! Uses manual `Pin>` return types because `ApiGatewayV2Handler` //! requires object safety for dynamic dispatch (`Arc`). -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; +use ruststack_apigatewayv2_model::{error::ApiGatewayV2Error, operations::ApiGatewayV2Operation}; -use ruststack_apigatewayv2_model::error::ApiGatewayV2Error; -use ruststack_apigatewayv2_model::operations::ApiGatewayV2Operation; - -use crate::body::ApiGatewayV2ResponseBody; -use crate::router::PathParams; +use crate::{body::ApiGatewayV2ResponseBody, router::PathParams}; /// The boundary between HTTP and business logic for API Gateway v2. /// diff --git a/crates/ruststack-apigatewayv2-http/src/response.rs b/crates/ruststack-apigatewayv2-http/src/response.rs index 8bdb6e9..0b9e529 100644 --- a/crates/ruststack-apigatewayv2-http/src/response.rs +++ b/crates/ruststack-apigatewayv2-http/src/response.rs @@ -4,9 +4,8 @@ //! `{"message": "..."}` use bytes::Bytes; -use serde::Serialize; - use ruststack_apigatewayv2_model::error::ApiGatewayV2Error; +use serde::Serialize; /// Content type for API Gateway v2 JSON responses. pub const CONTENT_TYPE: &str = "application/json"; @@ -74,9 +73,10 @@ pub fn empty_response(status: u16) -> Result, ApiGatewayV2 #[cfg(test)] mod tests { - use super::*; use ruststack_apigatewayv2_model::error::ApiGatewayV2ErrorCode; + use super::*; + #[test] fn test_should_format_error_with_lowercase_message() { let err = ApiGatewayV2Error::with_message( diff --git a/crates/ruststack-apigatewayv2-http/src/router.rs b/crates/ruststack-apigatewayv2-http/src/router.rs index eb4b53d..2309edf 100644 --- a/crates/ruststack-apigatewayv2-http/src/router.rs +++ b/crates/ruststack-apigatewayv2-http/src/router.rs @@ -5,8 +5,10 @@ use std::collections::HashMap; -use ruststack_apigatewayv2_model::error::ApiGatewayV2Error; -use ruststack_apigatewayv2_model::operations::{APIGATEWAYV2_ROUTES, ApiGatewayV2Operation}; +use ruststack_apigatewayv2_model::{ + error::ApiGatewayV2Error, + operations::{APIGATEWAYV2_ROUTES, ApiGatewayV2Operation}, +}; /// Extracted path parameters from a matched route. #[derive(Debug, Clone, Default)] diff --git a/crates/ruststack-apigatewayv2-http/src/service.rs b/crates/ruststack-apigatewayv2-http/src/service.rs index cfd06b2..6a8f17d 100644 --- a/crates/ruststack-apigatewayv2-http/src/service.rs +++ b/crates/ruststack-apigatewayv2-http/src/service.rs @@ -1,20 +1,18 @@ //! API Gateway v2 HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; +use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - -use bytes::Bytes; use ruststack_apigatewayv2_model::error::ApiGatewayV2Error; -use crate::body::ApiGatewayV2ResponseBody; -use crate::dispatch::{ApiGatewayV2Handler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::ApiGatewayV2ResponseBody, + dispatch::{ApiGatewayV2Handler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the API Gateway v2 HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-auth/src/canonical.rs b/crates/ruststack-auth/src/canonical.rs index b6e700a..f350546 100644 --- a/crates/ruststack-auth/src/canonical.rs +++ b/crates/ruststack-auth/src/canonical.rs @@ -68,9 +68,11 @@ pub fn build_canonical_request( let canonical_headers = build_canonical_headers(headers, signed_headers); let signed_headers_str = build_signed_headers_string(signed_headers); - format!( + #[rustfmt::skip] + let result = format!( "{method}\n{canonical_uri}\n{canonical_query}\n{canonical_headers}\n\n{signed_headers_str}\n{payload_hash}" - ) + ); + result } /// Build the canonical URI by URI-encoding each path segment individually. @@ -300,6 +302,7 @@ mod tests { &headers.iter().map(|(k, v)| (*k, *v)).collect::>(), &signed, ); + #[rustfmt::skip] let expected = "host:examplebucket.s3.amazonaws.com\n\ range:bytes=0-9\n\ x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n\ @@ -351,6 +354,7 @@ mod tests { "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ); + #[rustfmt::skip] let expected = "GET\n\ /test.txt\n\ \n\ @@ -373,11 +377,9 @@ mod tests { #[test] fn test_should_handle_presigned_url_query_string() { - let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256\ - &X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request\ - &X-Amz-Date=20130524T000000Z\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host"; + let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%\ + 2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&\ + X-Amz-Expires=86400&X-Amz-SignedHeaders=host"; let result = build_canonical_query_string(query); // Should be sorted, raw values preserved assert!(result.contains("X-Amz-Algorithm=AWS4-HMAC-SHA256")); diff --git a/crates/ruststack-auth/src/credentials.rs b/crates/ruststack-auth/src/credentials.rs index 23d37db..811f383 100644 --- a/crates/ruststack-auth/src/credentials.rs +++ b/crates/ruststack-auth/src/credentials.rs @@ -44,7 +44,8 @@ pub struct StaticCredentialProvider { } impl StaticCredentialProvider { - /// Create a new `StaticCredentialProvider` from an iterable of (access_key_id, secret_key) pairs. + /// Create a new `StaticCredentialProvider` from an iterable of (access_key_id, secret_key) + /// pairs. pub fn new(credentials: impl IntoIterator) -> Self { Self { credentials: credentials.into_iter().collect(), diff --git a/crates/ruststack-auth/src/presigned.rs b/crates/ruststack-auth/src/presigned.rs index f8cc2fe..924ab15 100644 --- a/crates/ruststack-auth/src/presigned.rs +++ b/crates/ruststack-auth/src/presigned.rs @@ -19,13 +19,15 @@ use sha2::{Digest, Sha256}; use subtle::ConstantTimeEq; use tracing::debug; -use crate::canonical::{ - build_canonical_headers, build_canonical_query_string, build_canonical_uri, - build_signed_headers_string, +use crate::{ + canonical::{ + build_canonical_headers, build_canonical_query_string, build_canonical_uri, + build_signed_headers_string, + }, + credentials::CredentialProvider, + error::AuthError, + sigv4::{AuthResult, build_string_to_sign, compute_signature, derive_signing_key}, }; -use crate::credentials::CredentialProvider; -use crate::error::AuthError; -use crate::sigv4::{AuthResult, build_string_to_sign, compute_signature, derive_signing_key}; /// The payload hash value used for all presigned URL requests. const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD"; @@ -166,6 +168,7 @@ pub fn verify_presigned( let signed_headers_str = build_signed_headers_string(&signed_header_refs); // For presigned URLs, the payload hash is always UNSIGNED-PAYLOAD. + #[rustfmt::skip] let canonical_request = format!( "{method}\n{canonical_uri}\n{canonical_query}\n{canonical_headers}\n\n{signed_headers_str}\n{UNSIGNED_PAYLOAD}" ); @@ -292,12 +295,8 @@ mod tests { #[test] fn test_should_parse_presigned_params() { - let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256\ - &X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request\ - &X-Amz-Date=20130524T000000Z\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host\ - &X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; + #[rustfmt::skip] + let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404"; let parsed = parse_presigned_params(query).unwrap(); assert_eq!(parsed.algorithm, "AWS4-HMAC-SHA256"); @@ -316,11 +315,9 @@ mod tests { #[test] fn test_should_reject_missing_algorithm_param() { - let query = "X-Amz-Credential=AKID%2F20130524%2Fus-east-1%2Fs3%2Faws4_request\ - &X-Amz-Date=20130524T000000Z\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host\ - &X-Amz-Signature=abc"; + let query = "X-Amz-Credential=AKID%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&\ + X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&\ + X-Amz-Signature=abc"; let result = parse_presigned_params(query); assert!(matches!(result, Err(AuthError::MissingQueryParam(_)))); @@ -343,12 +340,9 @@ mod tests { #[test] fn test_should_build_query_string_without_signature() { - let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256\ - &X-Amz-Credential=AKID%2F20130524%2Fus-east-1%2Fs3%2Faws4_request\ - &X-Amz-Date=20130524T000000Z\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host\ - &X-Amz-Signature=abc123"; + let query = "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKID%2F20130524%\ + 2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&\ + X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Signature=abc123"; let result = build_canonical_query_string_without_signature(query); assert!(!result.contains("X-Amz-Signature")); @@ -367,17 +361,11 @@ mod tests { let signing_key = derive_signing_key(TEST_SECRET_KEY, "20130524", "us-east-1", "s3"); // Build canonical request for the presigned URL test vector. - let canonical_request = "GET\n\ - /test.txt\n\ - X-Amz-Algorithm=AWS4-HMAC-SHA256\ - &X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request\ - &X-Amz-Date=20130524T000000Z\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host\n\ - host:examplebucket.s3.amazonaws.com\n\ - \n\ - host\n\ - UNSIGNED-PAYLOAD"; + let canonical_request = "GET\n/test.txt\nX-Amz-Algorithm=AWS4-HMAC-SHA256&\ + X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%\ + 2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&\ + X-Amz-Expires=86400&X-Amz-SignedHeaders=host\nhost:examplebucket.\ + s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD"; let canonical_hash = hex::encode(Sha256::digest(canonical_request.as_bytes())); assert_eq!( @@ -411,16 +399,14 @@ mod tests { // Build the canonical request components. let canonical_uri = "/test.txt"; let query_without_sig = format!( - "X-Amz-Algorithm=AWS4-HMAC-SHA256\ - &X-Amz-Credential={}\ - &X-Amz-Date={timestamp}\ - &X-Amz-Expires=86400\ - &X-Amz-SignedHeaders=host", + "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential={}&X-Amz-Date={timestamp}&\ + X-Amz-Expires=86400&X-Amz-SignedHeaders=host", percent_encoding::utf8_percent_encode(&credential, percent_encoding::NON_ALPHANUMERIC) ); let canonical_query = build_canonical_query_string(&query_without_sig); + #[rustfmt::skip] let canonical_request = format!( "GET\n{canonical_uri}\n{canonical_query}\nhost:examplebucket.s3.amazonaws.com\n\nhost\nUNSIGNED-PAYLOAD" ); diff --git a/crates/ruststack-auth/src/sigv2.rs b/crates/ruststack-auth/src/sigv2.rs index 7cd3874..5c3c2e7 100644 --- a/crates/ruststack-auth/src/sigv2.rs +++ b/crates/ruststack-auth/src/sigv2.rs @@ -20,16 +20,13 @@ use std::collections::BTreeMap; -use base64::Engine; -use base64::engine::general_purpose::STANDARD as BASE64; +use base64::{Engine, engine::general_purpose::STANDARD as BASE64}; use hmac::{Hmac, Mac}; use sha1::Sha1; use subtle::ConstantTimeEq; use tracing::debug; -use crate::credentials::CredentialProvider; -use crate::error::AuthError; -use crate::sigv4::AuthResult; +use crate::{credentials::CredentialProvider, error::AuthError, sigv4::AuthResult}; type HmacSha1 = Hmac; diff --git a/crates/ruststack-auth/src/sigv4.rs b/crates/ruststack-auth/src/sigv4.rs index e699d6b..b94d0fd 100644 --- a/crates/ruststack-auth/src/sigv4.rs +++ b/crates/ruststack-auth/src/sigv4.rs @@ -2,13 +2,13 @@ //! //! This module implements the core SigV4 signature verification flow: //! -//! 1. Parse the `Authorization` header to extract the algorithm, credential scope, -//! signed headers, and provided signature. +//! 1. Parse the `Authorization` header to extract the algorithm, credential scope, signed headers, +//! and provided signature. //! 2. Reconstruct the canonical request from the HTTP request parts. //! 3. Build the string to sign from the timestamp, credential scope, and canonical request hash. //! 4. Derive the signing key using HMAC-SHA256 from the secret key and credential scope components. -//! 5. Compute the expected signature and compare it to the provided signature using -//! constant-time comparison. +//! 5. Compute the expected signature and compare it to the provided signature using constant-time +//! comparison. //! //! The main entry point is [`verify_sigv4`]. @@ -17,9 +17,9 @@ use sha2::{Digest, Sha256}; use subtle::ConstantTimeEq; use tracing::debug; -use crate::canonical::build_canonical_request; -use crate::credentials::CredentialProvider; -use crate::error::AuthError; +use crate::{ + canonical::build_canonical_request, credentials::CredentialProvider, error::AuthError, +}; /// The only algorithm supported by this implementation. const SUPPORTED_ALGORITHM: &str = "AWS4-HMAC-SHA256"; @@ -72,7 +72,8 @@ pub struct ParsedAuth { /// Returns [`AuthError::InvalidAuthHeader`] if the header format is invalid, /// or [`AuthError::UnsupportedAlgorithm`] if the algorithm is not `AWS4-HMAC-SHA256`. pub fn parse_authorization_header(header: &str) -> Result { - // Split algorithm from the rest: "AWS4-HMAC-SHA256 Credential=...,SignedHeaders=...,Signature=..." + // Split algorithm from the rest: "AWS4-HMAC-SHA256 + // Credential=...,SignedHeaders=...,Signature=..." let (algorithm, rest) = header.split_once(' ').ok_or(AuthError::InvalidAuthHeader)?; if algorithm != SUPPORTED_ALGORITHM { @@ -361,8 +362,7 @@ pub fn hash_payload(payload: &[u8]) -> String { #[cfg(test)] mod tests { use super::*; - use crate::canonical::build_signed_headers_string; - use crate::credentials::StaticCredentialProvider; + use crate::{canonical::build_signed_headers_string, credentials::StaticCredentialProvider}; const TEST_ACCESS_KEY: &str = "AKIAIOSFODNN7EXAMPLE"; const TEST_SECRET_KEY: &str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"; @@ -388,9 +388,9 @@ mod tests { #[test] fn test_should_parse_authorization_header() { let header = "AWS4-HMAC-SHA256 \ - Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,\ - SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,\ - Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41"; + Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,\ + SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,\ + Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41"; let parsed = parse_authorization_header(header).unwrap(); assert_eq!(parsed.algorithm, "AWS4-HMAC-SHA256"); @@ -410,16 +410,17 @@ mod tests { #[test] fn test_should_reject_unsupported_algorithm() { - let header = "AWS4-HMAC-SHA512 Credential=AKID/20130524/us-east-1/s3/aws4_request,\ - SignedHeaders=host,Signature=abc"; + let header = "AWS4-HMAC-SHA512 \ + Credential=AKID/20130524/us-east-1/s3/aws4_request,SignedHeaders=host,\ + Signature=abc"; let result = parse_authorization_header(header); assert!(matches!(result, Err(AuthError::UnsupportedAlgorithm(_)))); } #[test] fn test_should_reject_invalid_credential_format() { - let header = "AWS4-HMAC-SHA256 Credential=AKID/20130524/us-east-1,\ - SignedHeaders=host,Signature=abc"; + let header = + "AWS4-HMAC-SHA256 Credential=AKID/20130524/us-east-1,SignedHeaders=host,Signature=abc"; let result = parse_authorization_header(header); assert!(matches!(result, Err(AuthError::InvalidCredential))); } @@ -432,6 +433,7 @@ mod tests { "20130524/us-east-1/s3/aws4_request", canonical_hash, ); + #[rustfmt::skip] let expected = "AWS4-HMAC-SHA256\n\ 20130524T000000Z\n\ 20130524/us-east-1/s3/aws4_request\n\ @@ -444,6 +446,7 @@ mod tests { // Full end-to-end test using the AWS GET Object example. let signing_key = derive_signing_key(TEST_SECRET_KEY, TEST_DATE, TEST_REGION, TEST_SERVICE); + #[rustfmt::skip] let string_to_sign = "AWS4-HMAC-SHA256\n\ 20130524T000000Z\n\ 20130524/us-east-1/s3/aws4_request\n\ @@ -472,8 +475,9 @@ mod tests { // Compute the expected signature to build the auth header. let auth_value = format!( - "AWS4-HMAC-SHA256 Credential={TEST_ACCESS_KEY}/20130524/us-east-1/s3/aws4_request,\ - SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,\ + "AWS4-HMAC-SHA256 \ + Credential={TEST_ACCESS_KEY}/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;\ + range;x-amz-content-sha256;x-amz-date,\ Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41" ); builder = builder.header(http::header::AUTHORIZATION, &auth_value); @@ -497,8 +501,9 @@ mod tests { let empty_hash = hash_payload(b""); let auth_value = format!( - "AWS4-HMAC-SHA256 Credential={TEST_ACCESS_KEY}/20130524/us-east-1/s3/aws4_request,\ - SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,\ + "AWS4-HMAC-SHA256 \ + Credential={TEST_ACCESS_KEY}/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;\ + range;x-amz-content-sha256;x-amz-date,\ Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41" ); @@ -540,11 +545,10 @@ mod tests { let provider = StaticCredentialProvider::new(vec![]); let empty_hash = hash_payload(b""); - let auth_value = - "AWS4-HMAC-SHA256 Credential=UNKNOWN_KEY/20130524/us-east-1/s3/aws4_request,\ - SignedHeaders=host;x-amz-date,\ - Signature=abc123" - .to_owned(); + let auth_value = "AWS4-HMAC-SHA256 \ + Credential=UNKNOWN_KEY/20130524/us-east-1/s3/aws4_request,\ + SignedHeaders=host;x-amz-date,Signature=abc123" + .to_owned(); let (parts, _body) = http::Request::builder() .method("GET") diff --git a/crates/ruststack-cloudwatch-core/src/alarm_store.rs b/crates/ruststack-cloudwatch-core/src/alarm_store.rs index 1e78566..b62e5f5 100644 --- a/crates/ruststack-cloudwatch-core/src/alarm_store.rs +++ b/crates/ruststack-cloudwatch-core/src/alarm_store.rs @@ -225,10 +225,10 @@ impl Default for AlarmStore { #[cfg(test)] mod tests { - use super::*; - use ruststack_cloudwatch_model::types::StateValue; + use super::*; + fn make_alarm(name: &str) -> MetricAlarm { MetricAlarm { alarm_name: Some(name.to_owned()), diff --git a/crates/ruststack-cloudwatch-core/src/handler.rs b/crates/ruststack-cloudwatch-core/src/handler.rs index 2c1e14d..09ac8ec 100644 --- a/crates/ruststack-cloudwatch-core/src/handler.rs +++ b/crates/ruststack-cloudwatch-core/src/handler.rs @@ -7,42 +7,43 @@ //! Covers all 31 CloudWatch operations: metrics, alarms, dashboards, //! insight rules, anomaly detectors, metric streams, and tagging. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; -use serde::Serialize; - -use ruststack_cloudwatch_http::body::CloudWatchResponseBody; -use ruststack_cloudwatch_http::dispatch::{CloudWatchHandler, Protocol}; -use ruststack_cloudwatch_http::request::{ - get_optional_bool, get_optional_f64, get_optional_i32, get_optional_param, get_required_param, - parse_dimension_filters, parse_dimensions, parse_form_params, parse_string_list, - parse_struct_list, parse_tag_list, +use ruststack_cloudwatch_http::{ + body::CloudWatchResponseBody, + dispatch::{CloudWatchHandler, Protocol}, + request::{ + get_optional_bool, get_optional_f64, get_optional_i32, get_optional_param, + get_required_param, parse_dimension_filters, parse_dimensions, parse_form_params, + parse_string_list, parse_struct_list, parse_tag_list, + }, + response::{XmlWriter, cbor_response, xml_response}, }; -use ruststack_cloudwatch_http::response::{XmlWriter, cbor_response, xml_response}; -use ruststack_cloudwatch_model::error::{CloudWatchError, CloudWatchErrorCode}; -use ruststack_cloudwatch_model::input::{ - DeleteAlarmsInput, DeleteAnomalyDetectorInput, DeleteDashboardsInput, DeleteInsightRulesInput, - DeleteMetricStreamInput, DescribeAlarmHistoryInput, DescribeAlarmsForMetricInput, - DescribeAlarmsInput, DescribeAnomalyDetectorsInput, DescribeInsightRulesInput, - DisableAlarmActionsInput, EnableAlarmActionsInput, GetDashboardInput, GetMetricDataInput, - GetMetricStatisticsInput, GetMetricStreamInput, ListDashboardsInput, ListMetricStreamsInput, - ListMetricsInput, ListTagsForResourceInput, PutAnomalyDetectorInput, PutCompositeAlarmInput, - PutDashboardInput, PutInsightRuleInput, PutManagedInsightRulesInput, PutMetricAlarmInput, - PutMetricDataInput, PutMetricStreamInput, SetAlarmStateInput, TagResourceInput, - UntagResourceInput, -}; -use ruststack_cloudwatch_model::operations::CloudWatchOperation; -use ruststack_cloudwatch_model::types::{ - AlarmType, AnomalyDetectorType, ComparisonOperator, CompositeAlarm, Dimension, DimensionFilter, - HistoryItemType, LabelOptions, ManagedRule, MetricAlarm, MetricCharacteristics, - MetricDataQuery, MetricDatum, MetricMathAnomalyDetector, MetricStat, MetricStreamFilter, - MetricStreamOutputFormat, MetricStreamStatisticsConfiguration, MetricStreamStatisticsMetric, - RecentlyActive, ScanBy, SingleMetricAnomalyDetector, StandardUnit, StateValue, Statistic, - StatisticSet, Tag, +use ruststack_cloudwatch_model::{ + error::{CloudWatchError, CloudWatchErrorCode}, + input::{ + DeleteAlarmsInput, DeleteAnomalyDetectorInput, DeleteDashboardsInput, + DeleteInsightRulesInput, DeleteMetricStreamInput, DescribeAlarmHistoryInput, + DescribeAlarmsForMetricInput, DescribeAlarmsInput, DescribeAnomalyDetectorsInput, + DescribeInsightRulesInput, DisableAlarmActionsInput, EnableAlarmActionsInput, + GetDashboardInput, GetMetricDataInput, GetMetricStatisticsInput, GetMetricStreamInput, + ListDashboardsInput, ListMetricStreamsInput, ListMetricsInput, ListTagsForResourceInput, + PutAnomalyDetectorInput, PutCompositeAlarmInput, PutDashboardInput, PutInsightRuleInput, + PutManagedInsightRulesInput, PutMetricAlarmInput, PutMetricDataInput, PutMetricStreamInput, + SetAlarmStateInput, TagResourceInput, UntagResourceInput, + }, + operations::CloudWatchOperation, + types::{ + AlarmType, AnomalyDetectorType, ComparisonOperator, CompositeAlarm, Dimension, + DimensionFilter, HistoryItemType, LabelOptions, ManagedRule, MetricAlarm, + MetricCharacteristics, MetricDataQuery, MetricDatum, MetricMathAnomalyDetector, MetricStat, + MetricStreamFilter, MetricStreamOutputFormat, MetricStreamStatisticsConfiguration, + MetricStreamStatisticsMetric, RecentlyActive, ScanBy, SingleMetricAnomalyDetector, + StandardUnit, StateValue, Statistic, StatisticSet, Tag, + }, }; +use serde::Serialize; use crate::provider::RustStackCloudWatch; diff --git a/crates/ruststack-cloudwatch-core/src/provider.rs b/crates/ruststack-cloudwatch-core/src/provider.rs index 6e6f5b6..ed4baf3 100644 --- a/crates/ruststack-cloudwatch-core/src/provider.rs +++ b/crates/ruststack-cloudwatch-core/src/provider.rs @@ -4,51 +4,54 @@ //! alarm management, dashboard CRUD, insight rules, anomaly detectors, //! metric streams, and tagging. -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use chrono::Utc; -use tracing::instrument; - -use ruststack_cloudwatch_model::error::{CloudWatchError, CloudWatchErrorCode}; -use ruststack_cloudwatch_model::input::{ - DeleteAlarmsInput, DeleteAnomalyDetectorInput, DeleteDashboardsInput, DeleteInsightRulesInput, - DeleteMetricStreamInput, DescribeAlarmHistoryInput, DescribeAlarmsForMetricInput, - DescribeAlarmsInput, DescribeAnomalyDetectorsInput, DescribeInsightRulesInput, - DisableAlarmActionsInput, EnableAlarmActionsInput, GetDashboardInput, GetMetricDataInput, - GetMetricStatisticsInput, GetMetricStreamInput, ListDashboardsInput, ListMetricStreamsInput, - ListMetricsInput, ListTagsForResourceInput, PutAnomalyDetectorInput, PutCompositeAlarmInput, - PutDashboardInput, PutInsightRuleInput, PutManagedInsightRulesInput, PutMetricAlarmInput, - PutMetricDataInput, PutMetricStreamInput, SetAlarmStateInput, TagResourceInput, - UntagResourceInput, -}; -use ruststack_cloudwatch_model::output::{ - DeleteAnomalyDetectorOutput, DeleteDashboardsOutput, DeleteInsightRulesOutput, - DeleteMetricStreamOutput, DescribeAlarmHistoryOutput, DescribeAlarmsForMetricOutput, - DescribeAlarmsOutput, DescribeAnomalyDetectorsOutput, DescribeInsightRulesOutput, - GetDashboardOutput, GetMetricDataOutput, GetMetricStatisticsOutput, GetMetricStreamOutput, - ListDashboardsOutput, ListMetricStreamsOutput, ListMetricsOutput, ListTagsForResourceOutput, - PutAnomalyDetectorOutput, PutDashboardOutput, PutInsightRuleOutput, - PutManagedInsightRulesOutput, PutMetricStreamOutput, TagResourceOutput, UntagResourceOutput, -}; -use ruststack_cloudwatch_model::types::{ - AlarmHistoryItem, AlarmType, AnomalyDetector, CompositeAlarm, DashboardEntry, Datapoint, - HistoryItemType, InsightRule, Metric, MetricAlarm, MetricDataResult, MetricStreamEntry, - MetricStreamOutputFormat, StateValue, Statistic, StatusCode, +use ruststack_cloudwatch_model::{ + error::{CloudWatchError, CloudWatchErrorCode}, + input::{ + DeleteAlarmsInput, DeleteAnomalyDetectorInput, DeleteDashboardsInput, + DeleteInsightRulesInput, DeleteMetricStreamInput, DescribeAlarmHistoryInput, + DescribeAlarmsForMetricInput, DescribeAlarmsInput, DescribeAnomalyDetectorsInput, + DescribeInsightRulesInput, DisableAlarmActionsInput, EnableAlarmActionsInput, + GetDashboardInput, GetMetricDataInput, GetMetricStatisticsInput, GetMetricStreamInput, + ListDashboardsInput, ListMetricStreamsInput, ListMetricsInput, ListTagsForResourceInput, + PutAnomalyDetectorInput, PutCompositeAlarmInput, PutDashboardInput, PutInsightRuleInput, + PutManagedInsightRulesInput, PutMetricAlarmInput, PutMetricDataInput, PutMetricStreamInput, + SetAlarmStateInput, TagResourceInput, UntagResourceInput, + }, + output::{ + DeleteAnomalyDetectorOutput, DeleteDashboardsOutput, DeleteInsightRulesOutput, + DeleteMetricStreamOutput, DescribeAlarmHistoryOutput, DescribeAlarmsForMetricOutput, + DescribeAlarmsOutput, DescribeAnomalyDetectorsOutput, DescribeInsightRulesOutput, + GetDashboardOutput, GetMetricDataOutput, GetMetricStatisticsOutput, GetMetricStreamOutput, + ListDashboardsOutput, ListMetricStreamsOutput, ListMetricsOutput, + ListTagsForResourceOutput, PutAnomalyDetectorOutput, PutDashboardOutput, + PutInsightRuleOutput, PutManagedInsightRulesOutput, PutMetricStreamOutput, + TagResourceOutput, UntagResourceOutput, + }, + types::{ + AlarmHistoryItem, AlarmType, AnomalyDetector, CompositeAlarm, DashboardEntry, Datapoint, + HistoryItemType, InsightRule, Metric, MetricAlarm, MetricDataResult, MetricStreamEntry, + MetricStreamOutputFormat, StateValue, Statistic, StatusCode, + }, }; +use tracing::instrument; -use crate::aggregation::aggregate_statistics; -use crate::alarm_store::AlarmStore; -use crate::anomaly_store::AnomalyStore; -use crate::config::CloudWatchConfig; -use crate::dashboard_store::{DashboardRecord, DashboardStore}; -use crate::dimensions::{dimensions_match, normalize_dimensions}; -use crate::insight_store::InsightStore; -use crate::metric_store::{DataPoint, MetricKey, MetricStore, StatisticSet}; -use crate::metric_stream_store::{MetricStreamRecord, MetricStreamStore}; -use crate::validation::{ - validate_alarm_name, validate_dashboard_body, validate_dashboard_name, validate_dimensions, - validate_metric_name, validate_namespace, +use crate::{ + aggregation::aggregate_statistics, + alarm_store::AlarmStore, + anomaly_store::AnomalyStore, + config::CloudWatchConfig, + dashboard_store::{DashboardRecord, DashboardStore}, + dimensions::{dimensions_match, normalize_dimensions}, + insight_store::InsightStore, + metric_store::{DataPoint, MetricKey, MetricStore, StatisticSet}, + metric_stream_store::{MetricStreamRecord, MetricStreamStore}, + validation::{ + validate_alarm_name, validate_dashboard_body, validate_dashboard_name, validate_dimensions, + validate_metric_name, validate_namespace, + }, }; /// CloudWatch service provider implementing all 31 operations. @@ -1254,12 +1257,15 @@ impl RustStackCloudWatch { #[cfg(test)] mod tests { - use super::*; - use ruststack_cloudwatch_model::input::{ - DeleteAlarmsInput, DescribeAlarmsInput, PutMetricAlarmInput, PutMetricDataInput, - SetAlarmStateInput, + use ruststack_cloudwatch_model::{ + input::{ + DeleteAlarmsInput, DescribeAlarmsInput, PutMetricAlarmInput, PutMetricDataInput, + SetAlarmStateInput, + }, + types::{ComparisonOperator, MetricDatum, StateValue}, }; - use ruststack_cloudwatch_model::types::{ComparisonOperator, MetricDatum, StateValue}; + + use super::*; fn make_provider() -> RustStackCloudWatch { RustStackCloudWatch::new(Arc::new(CloudWatchConfig::default())) diff --git a/crates/ruststack-cloudwatch-core/src/validation.rs b/crates/ruststack-cloudwatch-core/src/validation.rs index 18a5a3f..bca8b3b 100644 --- a/crates/ruststack-cloudwatch-core/src/validation.rs +++ b/crates/ruststack-cloudwatch-core/src/validation.rs @@ -1,7 +1,9 @@ //! Input validation for CloudWatch operations. -use ruststack_cloudwatch_model::error::{CloudWatchError, CloudWatchErrorCode}; -use ruststack_cloudwatch_model::types::Dimension; +use ruststack_cloudwatch_model::{ + error::{CloudWatchError, CloudWatchErrorCode}, + types::Dimension, +}; /// Validate a namespace string. pub fn validate_namespace(namespace: &str) -> Result<(), CloudWatchError> { diff --git a/crates/ruststack-cloudwatch-http/src/body.rs b/crates/ruststack-cloudwatch-http/src/body.rs index ca145f5..79130d5 100644 --- a/crates/ruststack-cloudwatch-http/src/body.rs +++ b/crates/ruststack-cloudwatch-http/src/body.rs @@ -1,7 +1,9 @@ //! CloudWatch HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-cloudwatch-http/src/dispatch.rs b/crates/ruststack-cloudwatch-http/src/dispatch.rs index 6145e86..f9e46be 100644 --- a/crates/ruststack-cloudwatch-http/src/dispatch.rs +++ b/crates/ruststack-cloudwatch-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! CloudWatch handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_cloudwatch_model::error::CloudWatchError; -use ruststack_cloudwatch_model::operations::CloudWatchOperation; +use ruststack_cloudwatch_model::{error::CloudWatchError, operations::CloudWatchOperation}; use crate::body::CloudWatchResponseBody; diff --git a/crates/ruststack-cloudwatch-http/src/lib.rs b/crates/ruststack-cloudwatch-http/src/lib.rs index 5342e3c..91a38dd 100644 --- a/crates/ruststack-cloudwatch-http/src/lib.rs +++ b/crates/ruststack-cloudwatch-http/src/lib.rs @@ -1,12 +1,12 @@ //! CloudWatch Metrics HTTP service layer for RustStack. //! //! Supports two wire protocols: -//! - **awsQuery**: form-urlencoded requests, XML responses (legacy SDKs). -//! Requests are `POST /` with `Content-Type: application/x-www-form-urlencoded` -//! and the operation is dispatched via the `Action=` form parameter. -//! - **rpcv2Cbor**: CBOR requests/responses (AWS SDK v1.108+). -//! Requests are `POST /service/GraniteServiceVersion20100801/operation/{Op}` -//! with `Content-Type: application/cbor` and `smithy-protocol: rpc-v2-cbor`. +//! - **awsQuery**: form-urlencoded requests, XML responses (legacy SDKs). Requests are `POST /` +//! with `Content-Type: application/x-www-form-urlencoded` and the operation is dispatched via the +//! `Action=` form parameter. +//! - **rpcv2Cbor**: CBOR requests/responses (AWS SDK v1.108+). Requests are `POST +//! /service/GraniteServiceVersion20100801/operation/{Op}` with `Content-Type: application/cbor` +//! and `smithy-protocol: rpc-v2-cbor`. pub mod body; pub mod dispatch; diff --git a/crates/ruststack-cloudwatch-http/src/response.rs b/crates/ruststack-cloudwatch-http/src/response.rs index 5c503d5..c46d7aa 100644 --- a/crates/ruststack-cloudwatch-http/src/response.rs +++ b/crates/ruststack-cloudwatch-http/src/response.rs @@ -50,14 +50,9 @@ pub fn error_to_xml(error: &CloudWatchError, request_id: &str) -> String { "Sender" }; format!( - "\ - \ - {fault}\ - {}\ - {}\ - \ - {}\ - ", + "{fault}{}{}{}", xml_escape(&error.code.to_string()), xml_escape(&error.message), xml_escape(request_id), diff --git a/crates/ruststack-cloudwatch-http/src/router.rs b/crates/ruststack-cloudwatch-http/src/router.rs index a6a49df..09b06cb 100644 --- a/crates/ruststack-cloudwatch-http/src/router.rs +++ b/crates/ruststack-cloudwatch-http/src/router.rs @@ -4,8 +4,7 @@ //! `Content-Type: application/x-www-form-urlencoded`. The operation is //! specified by the `Action=` form parameter. -use ruststack_cloudwatch_model::error::CloudWatchError; -use ruststack_cloudwatch_model::operations::CloudWatchOperation; +use ruststack_cloudwatch_model::{error::CloudWatchError, operations::CloudWatchOperation}; /// Resolve a CloudWatch operation from parsed form parameters. /// diff --git a/crates/ruststack-cloudwatch-http/src/service.rs b/crates/ruststack-cloudwatch-http/src/service.rs index 0814292..a117b8a 100644 --- a/crates/ruststack-cloudwatch-http/src/service.rs +++ b/crates/ruststack-cloudwatch-http/src/service.rs @@ -6,23 +6,20 @@ //! //! The protocol is detected automatically from request headers and URL path. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; +use ruststack_cloudwatch_model::{error::CloudWatchError, operations::CloudWatchOperation}; -use ruststack_cloudwatch_model::error::CloudWatchError; -use ruststack_cloudwatch_model::operations::CloudWatchOperation; - -use crate::body::CloudWatchResponseBody; -use crate::dispatch::{CloudWatchHandler, Protocol, dispatch_operation}; -use crate::request::parse_form_params; -use crate::response::{CONTENT_TYPE, cbor_error_response, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::CloudWatchResponseBody, + dispatch::{CloudWatchHandler, Protocol, dispatch_operation}, + request::parse_form_params, + response::{CONTENT_TYPE, cbor_error_response, error_to_response}, + router::resolve_operation, +}; /// Configuration for the CloudWatch HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-dynamodb-core/src/expression/ast.rs b/crates/ruststack-dynamodb-core/src/expression/ast.rs index cfdd12b..a8a8fdf 100644 --- a/crates/ruststack-dynamodb-core/src/expression/ast.rs +++ b/crates/ruststack-dynamodb-core/src/expression/ast.rs @@ -8,8 +8,7 @@ //! name and value references from parsed ASTs, used for validating that all //! provided names/values are actually used in expressions. -use std::collections::HashSet; -use std::fmt; +use std::{collections::HashSet, fmt}; /// Expression AST node for condition, filter, and key-condition expressions. #[derive(Debug, Clone)] diff --git a/crates/ruststack-dynamodb-core/src/expression/evaluator.rs b/crates/ruststack-dynamodb-core/src/expression/evaluator.rs index a41572e..a1bea5a 100644 --- a/crates/ruststack-dynamodb-core/src/expression/evaluator.rs +++ b/crates/ruststack-dynamodb-core/src/expression/evaluator.rs @@ -8,11 +8,13 @@ use std::collections::HashMap; use ruststack_dynamodb_model::AttributeValue; -use super::ast::{ - AddAction, AttributePath, CompareOp, DeleteAction, Expr, FunctionName, LogicalOp, Operand, - PathElement, SetAction, SetValue, UpdateExpr, +use super::{ + ast::{ + AddAction, AttributePath, CompareOp, DeleteAction, Expr, FunctionName, LogicalOp, Operand, + PathElement, SetAction, SetValue, UpdateExpr, + }, + parser::ExpressionError, }; -use super::parser::ExpressionError; // --------------------------------------------------------------------------- // Evaluation context @@ -105,16 +107,16 @@ impl EvalContext<'_> { if is_query_constant(low) && is_query_constant(high) { if std::mem::discriminant(lo) != std::mem::discriminant(hi) { return Err(ExpressionError::TypeMismatch { - message: "BETWEEN bounds must have the same type when both \ - are expression attribute values" + message: "BETWEEN bounds must have the same type when both are expression \ + attribute values" .to_owned(), }); } // Check ordering: if low > high, it is a ValidationException. if compare_values(lo, hi, CompareOp::Gt)? { return Err(ExpressionError::TypeMismatch { - message: "BETWEEN bounds are in wrong order; \ - low bound must be less than or equal to high bound" + message: "BETWEEN bounds are in wrong order; low bound must be less than or \ + equal to high bound" .to_owned(), }); } @@ -196,8 +198,8 @@ impl EvalContext<'_> { if !is_valid_type_descriptor(&expected_type) { return Err(ExpressionError::TypeMismatch { message: format!( - "Invalid type: {expected_type}. \ - Valid types: S, SS, N, NS, B, BS, BOOL, NULL, L, M" + "Invalid type: {expected_type}. Valid types: S, SS, N, NS, B, BS, \ + BOOL, NULL, L, M" ), }); } @@ -224,8 +226,8 @@ impl EvalContext<'_> { return Err(ExpressionError::InvalidOperand { operation: "begins_with".to_owned(), message: format!( - "Incorrect operand type for operator or function; \ - operator or function: begins_with, operand type: {td}", + "Incorrect operand type for operator or function; operator or \ + function: begins_with, operand type: {td}", td = v.type_descriptor() ), }); @@ -504,8 +506,8 @@ impl EvalContext<'_> { return Err(ExpressionError::InvalidOperand { operation: "ADD".to_owned(), message: format!( - "Incorrect operand type for operator or function; \ - operator: ADD, operand type: {operand_type}" + "Incorrect operand type for operator or function; operator: ADD, operand \ + type: {operand_type}" ), }); } @@ -539,8 +541,8 @@ impl EvalContext<'_> { return Err(ExpressionError::InvalidOperand { operation: "DELETE".to_owned(), message: format!( - "Incorrect operand type for operator or function; \ - operator: DELETE, operand type: {operand_type}" + "Incorrect operand type for operator or function; operator: DELETE, operand \ + type: {operand_type}" ), }); } @@ -595,8 +597,8 @@ impl EvalContext<'_> { return Err(ExpressionError::InvalidOperand { operation: "DELETE".to_owned(), message: format!( - "Type mismatch for DELETE; operator type: {del_type}, \ - existing type: {existing_type}" + "Type mismatch for DELETE; operator type: {del_type}, existing type: \ + {existing_type}" ), }); } @@ -681,8 +683,8 @@ fn validate_ordering_operand_type( return Err(ExpressionError::InvalidOperand { operation: "operator".to_owned(), message: format!( - "Incorrect operand type for operator or function; \ - operator: {operator_name}, operand type: {type_desc}", + "Incorrect operand type for operator or function; operator: {operator_name}, \ + operand type: {type_desc}", type_desc = resolved_value.type_descriptor() ), }); @@ -973,7 +975,9 @@ fn precise_arithmetic(a: &str, b: &str, is_add: bool) -> Result 38 + 2 { // Precision would be lost in the result. return Err(ExpressionError::Validation { - message: "Number overflow. Attempting to store a number with magnitude larger than supported range".to_owned(), + message: "Number overflow. Attempting to store a number with magnitude larger than \ + supported range" + .to_owned(), }); } @@ -1012,7 +1016,9 @@ fn precise_arithmetic(a: &str, b: &str, is_add: bool) -> Result 38 { return Err(ExpressionError::Validation { - message: "Number overflow. Attempting to store a number with magnitude larger than supported range".to_owned(), + message: "Number overflow. Attempting to store a number with magnitude larger than \ + supported range" + .to_owned(), }); } @@ -1020,12 +1026,16 @@ fn precise_arithmetic(a: &str, b: &str, is_add: bool) -> Result 125 { return Err(ExpressionError::Validation { - message: "Number overflow. Attempting to store a number with magnitude larger than supported range".to_owned(), + message: "Number overflow. Attempting to store a number with magnitude larger than \ + supported range" + .to_owned(), }); } if magnitude < -130 { return Err(ExpressionError::Validation { - message: "Number underflow. Attempting to store a number with magnitude smaller than supported range".to_owned(), + message: "Number underflow. Attempting to store a number with magnitude smaller than \ + supported range" + .to_owned(), }); } @@ -1366,8 +1376,7 @@ fn validate_nested_path_for_set( // because DynamoDB requires existing intermediate containers. return Err(ExpressionError::InvalidOperand { operation: "SET".to_owned(), - message: "The document path provided in the update expression \ - is invalid for update" + message: "The document path provided in the update expression is invalid for update" .to_owned(), }); }; @@ -1399,8 +1408,8 @@ fn validate_path_type( } _ => Err(ExpressionError::InvalidOperand { operation: "SET".to_owned(), - message: "The document path provided in the update expression \ - is invalid for update" + message: "The document path provided in the update expression is invalid for \ + update" .to_owned(), }), } @@ -1416,8 +1425,8 @@ fn validate_path_type( } _ => Err(ExpressionError::InvalidOperand { operation: "SET".to_owned(), - message: "The document path provided in the update expression \ - is invalid for update" + message: "The document path provided in the update expression is invalid for \ + update" .to_owned(), }), }, @@ -1514,8 +1523,7 @@ fn apply_remove( // Path root doesn't exist - this is a validation error for nested paths. return Err(ExpressionError::InvalidOperand { operation: "REMOVE".to_owned(), - message: "The document path provided in the update expression \ - is invalid for update" + message: "The document path provided in the update expression is invalid for update" .to_owned(), }); } @@ -1662,8 +1670,8 @@ fn compute_add_result( Err(ExpressionError::InvalidOperand { operation: "ADD".to_owned(), message: format!( - "Type mismatch for ADD; operator type: {add_type}, \ - existing type: {existing_type}" + "Type mismatch for ADD; operator type: {add_type}, existing type: \ + {existing_type}" ), }) } diff --git a/crates/ruststack-dynamodb-core/src/expression/parser.rs b/crates/ruststack-dynamodb-core/src/expression/parser.rs index cc98932..edd49cc 100644 --- a/crates/ruststack-dynamodb-core/src/expression/parser.rs +++ b/crates/ruststack-dynamodb-core/src/expression/parser.rs @@ -4,9 +4,7 @@ //! projection expressions. Keywords and function names are matched //! case-insensitively per DynamoDB specification. -use std::fmt; -use std::iter::Peekable; -use std::str::Chars; +use std::{fmt, iter::Peekable, str::Chars}; use super::ast::{ AddAction, AttributePath, CompareOp, DeleteAction, Expr, FunctionName, LogicalOp, Operand, @@ -565,8 +563,8 @@ impl Parser { return Err(ExpressionError::InvalidOperand { operation: name.to_owned(), message: format!( - "The function is not allowed to be used this way in an expression; \ - function: {name}" + "The function is not allowed to be used this way in an expression; function: \ + {name}" ), }); } @@ -662,16 +660,15 @@ impl Parser { expected: "valid function name".to_owned(), found: format!( "'{func_name}' is not a valid function; valid functions are: \ - attribute_exists, attribute_not_exists, attribute_type, begins_with, \ - contains, size" + attribute_exists, attribute_not_exists, attribute_type, begins_with, \ + contains, size" ), }) } _ => Err(ExpressionError::UnexpectedToken { expected: "comparison operator, BETWEEN, or IN after operand".to_owned(), found: format!( - "Syntax error; a standalone value or path is not a valid condition; \ - found: {}", + "Syntax error; a standalone value or path is not a valid condition; found: {}", self.peek() ), }), @@ -726,8 +723,8 @@ impl Parser { return Err(ExpressionError::InvalidOperand { operation: "size".to_owned(), message: format!( - "Incorrect number of operands for operator or function; \ - operator or function: size, number of operands: {count}" + "Incorrect number of operands for operator or function; operator or \ + function: size, number of operands: {count}" ), }); } @@ -830,9 +827,9 @@ impl Parser { if seen_set { return Err(ExpressionError::InvalidOperand { operation: "UpdateExpression".to_owned(), - message: - "The \"SET\" section can only be used once in an update expression" - .to_owned(), + message: "The \"SET\" section can only be used once in an update \ + expression" + .to_owned(), }); } seen_set = true; @@ -843,7 +840,9 @@ impl Parser { if seen_remove { return Err(ExpressionError::InvalidOperand { operation: "UpdateExpression".to_owned(), - message: "The \"REMOVE\" section can only be used once in an update expression".to_owned(), + message: "The \"REMOVE\" section can only be used once in an update \ + expression" + .to_owned(), }); } seen_remove = true; @@ -854,9 +853,9 @@ impl Parser { if seen_add { return Err(ExpressionError::InvalidOperand { operation: "UpdateExpression".to_owned(), - message: - "The \"ADD\" section can only be used once in an update expression" - .to_owned(), + message: "The \"ADD\" section can only be used once in an update \ + expression" + .to_owned(), }); } seen_add = true; @@ -867,7 +866,9 @@ impl Parser { if seen_delete { return Err(ExpressionError::InvalidOperand { operation: "UpdateExpression".to_owned(), - message: "The \"DELETE\" section can only be used once in an update expression".to_owned(), + message: "The \"DELETE\" section can only be used once in an update \ + expression" + .to_owned(), }); } seen_delete = true; @@ -1150,8 +1151,8 @@ pub fn parse_projection(input: &str) -> Result, ExpressionErr if path.elements.len() > 32 { return Err(ExpressionError::Validation { message: format!( - "Invalid ProjectionExpression: The document path has too many nesting \ - levels; nesting levels: {}", + "Invalid ProjectionExpression: The document path has too many nesting levels; \ + nesting levels: {}", path.elements.len() ), }); @@ -1166,9 +1167,9 @@ pub fn parse_projection(input: &str) -> Result, ExpressionErr if seen.contains(&repr) { return Err(ExpressionError::Validation { message: format!( - "Invalid ProjectionExpression: Two document paths overlap with \ - each other; must remove or rewrite one of these paths; path one: \ - [{repr}], path two: [{repr}]" + "Invalid ProjectionExpression: Two document paths overlap with each \ + other; must remove or rewrite one of these paths; path one: [{repr}], \ + path two: [{repr}]" ), }); } @@ -1205,9 +1206,8 @@ fn path_to_resolved(path: &AttributePath) -> Vec { /// Validate that no two projection paths overlap or conflict. /// /// - **Overlap**: One path is a prefix of another, or two paths are identical. -/// - **Conflict**: At a shared prefix point, one path accesses via dot (map key) -/// and the other via index (list), meaning the same node would need to be both -/// a map and a list simultaneously. +/// - **Conflict**: At a shared prefix point, one path accesses via dot (map key) and the other via +/// index (list), meaning the same node would need to be both a map and a list simultaneously. fn validate_projection_paths(paths: &[AttributePath]) -> Result<(), ExpressionError> { let resolved: Vec> = paths.iter().map(path_to_resolved).collect(); @@ -1238,9 +1238,9 @@ fn validate_projection_paths(paths: &[AttributePath]) -> Result<(), ExpressionEr _ => { return Err(ExpressionError::Validation { message: format!( - "Invalid ProjectionExpression: Two document paths conflict \ - with each other; must remove or rewrite one of these paths; \ - path one: [{}], path two: [{}]", + "Invalid ProjectionExpression: Two document paths conflict with \ + each other; must remove or rewrite one of these paths; path one: \ + [{}], path two: [{}]", paths[i], paths[j] ), }); @@ -1253,9 +1253,9 @@ fn validate_projection_paths(paths: &[AttributePath]) -> Result<(), ExpressionEr if prefix_matches && (a.len() != b.len()) { return Err(ExpressionError::Validation { message: format!( - "Invalid ProjectionExpression: Two document paths overlap with \ - each other; must remove or rewrite one of these paths; \ - path one: [{}], path two: [{}]", + "Invalid ProjectionExpression: Two document paths overlap with each \ + other; must remove or rewrite one of these paths; path one: [{}], path \ + two: [{}]", paths[i], paths[j] ), }); diff --git a/crates/ruststack-dynamodb-core/src/handler.rs b/crates/ruststack-dynamodb-core/src/handler.rs index ff4eb2d..926170f 100644 --- a/crates/ruststack-dynamodb-core/src/handler.rs +++ b/crates/ruststack-dynamodb-core/src/handler.rs @@ -1,16 +1,12 @@ //! DynamoDB handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_dynamodb_http::body::DynamoDBResponseBody; -use ruststack_dynamodb_http::dispatch::DynamoDBHandler; -use ruststack_dynamodb_http::response::json_response; -use ruststack_dynamodb_model::error::DynamoDBError; -use ruststack_dynamodb_model::operations::DynamoDBOperation; +use ruststack_dynamodb_http::{ + body::DynamoDBResponseBody, dispatch::DynamoDBHandler, response::json_response, +}; +use ruststack_dynamodb_model::{error::DynamoDBError, operations::DynamoDBOperation}; use crate::provider::RustStackDynamoDB; @@ -44,6 +40,7 @@ impl DynamoDBHandler for RustStackDynamoDBHandler { } /// Dispatch a DynamoDB operation to the appropriate handler method. +#[allow(clippy::too_many_lines)] fn dispatch( provider: &RustStackDynamoDB, op: DynamoDBOperation, @@ -118,6 +115,51 @@ fn dispatch( let output = provider.handle_batch_write_item(input)?; serialize(&output, &request_id) } + DynamoDBOperation::TagResource => { + let input = deserialize(body)?; + let output = provider.handle_tag_resource(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::UntagResource => { + let input = deserialize(body)?; + let output = provider.handle_untag_resource(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::ListTagsOfResource => { + let input = deserialize(body)?; + let output = provider.handle_list_tags_of_resource(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::DescribeTimeToLive => { + let input = deserialize(body)?; + let output = provider.handle_describe_time_to_live(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::UpdateTimeToLive => { + let input = deserialize(body)?; + let output = provider.handle_update_time_to_live(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::TransactGetItems => { + let input = deserialize(body)?; + let output = provider.handle_transact_get_items(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::TransactWriteItems => { + let input = deserialize(body)?; + let output = provider.handle_transact_write_items(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::DescribeLimits => { + let input = deserialize(body)?; + let output = provider.handle_describe_limits(input)?; + serialize(&output, &request_id) + } + DynamoDBOperation::DescribeEndpoints => { + let input = deserialize(body)?; + let output = provider.handle_describe_endpoints(input)?; + serialize(&output, &request_id) + } } } diff --git a/crates/ruststack-dynamodb-core/src/provider.rs b/crates/ruststack-dynamodb-core/src/provider.rs index 2d63b99..f3d3ba9 100644 --- a/crates/ruststack-dynamodb-core/src/provider.rs +++ b/crates/ruststack-dynamodb-core/src/provider.rs @@ -1,38 +1,51 @@ //! DynamoDB provider implementing all MVP operations. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; - -use ruststack_dynamodb_model::AttributeValue; -use ruststack_dynamodb_model::error::DynamoDBError; -use ruststack_dynamodb_model::input::{ - BatchGetItemInput, BatchWriteItemInput, CreateTableInput, DeleteItemInput, DeleteTableInput, - DescribeTableInput, GetItemInput, ListTablesInput, PutItemInput, QueryInput, ScanInput, - UpdateItemInput, UpdateTableInput, -}; -use ruststack_dynamodb_model::output::{ - BatchGetItemOutput, BatchWriteItemOutput, CreateTableOutput, DeleteItemOutput, - DeleteTableOutput, DescribeTableOutput, GetItemOutput, ListTablesOutput, PutItemOutput, - QueryOutput, ScanOutput, UpdateItemOutput, UpdateTableOutput, -}; -use ruststack_dynamodb_model::types::{ - AttributeAction, AttributeDefinition, AttributeValueUpdate, BillingMode, ComparisonOperator, - Condition, ConditionalOperator, ExpectedAttributeValue, KeyType, ReturnValue, - ScalarAttributeType, Select, TableStatus, +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; -use crate::config::DynamoDBConfig; -use crate::error::{expression_error_to_dynamodb, storage_error_to_dynamodb}; -use crate::expression::{ - AttributePath, EvalContext, PathElement, UpdateExpr, collect_names_from_expr, - collect_names_from_projection, collect_names_from_update, collect_paths_from_expr, - collect_values_from_expr, collect_values_from_update, parse_condition, parse_projection, - parse_update, +use ruststack_dynamodb_model::{ + AttributeValue, + error::DynamoDBError, + input::{ + BatchGetItemInput, BatchWriteItemInput, CreateTableInput, DeleteItemInput, + DeleteTableInput, DescribeEndpointsInput, DescribeLimitsInput, DescribeTableInput, + DescribeTimeToLiveInput, GetItemInput, ListTablesInput, ListTagsOfResourceInput, + PutItemInput, QueryInput, ScanInput, TagResourceInput, TransactGetItemsInput, + TransactWriteItemsInput, UntagResourceInput, UpdateItemInput, UpdateTableInput, + UpdateTimeToLiveInput, + }, + output::{ + BatchGetItemOutput, BatchWriteItemOutput, CreateTableOutput, DeleteItemOutput, + DeleteTableOutput, DescribeEndpointsOutput, DescribeLimitsOutput, DescribeTableOutput, + DescribeTimeToLiveOutput, Endpoint, GetItemOutput, ListTablesOutput, + ListTagsOfResourceOutput, PutItemOutput, QueryOutput, ScanOutput, TagResourceOutput, + TransactGetItemsOutput, TransactWriteItemsOutput, UntagResourceOutput, UpdateItemOutput, + UpdateTableOutput, UpdateTimeToLiveOutput, + }, + types::{ + AttributeAction, AttributeDefinition, AttributeValueUpdate, BillingMode, + CancellationReason, ComparisonOperator, Condition, ConditionalOperator, + ExpectedAttributeValue, ItemResponse, KeyType, ReturnValue, ScalarAttributeType, Select, + TableStatus, TimeToLiveDescription, + }, }; -use crate::state::{DynamoDBServiceState, DynamoDBTable}; -use crate::storage::{ - KeyAttribute, KeySchema, PrimaryKey, SortKeyCondition, SortableAttributeValue, TableStorage, - calculate_item_size, extract_primary_key, partition_key_segment, + +use crate::{ + config::DynamoDBConfig, + error::{expression_error_to_dynamodb, storage_error_to_dynamodb}, + expression::{ + AttributePath, EvalContext, PathElement, UpdateExpr, collect_names_from_expr, + collect_names_from_projection, collect_names_from_update, collect_paths_from_expr, + collect_values_from_expr, collect_values_from_update, parse_condition, parse_projection, + parse_update, + }, + state::{DynamoDBServiceState, DynamoDBTable}, + storage::{ + KeyAttribute, KeySchema, PrimaryKey, SortKeyCondition, SortableAttributeValue, + TableStorage, calculate_item_size, extract_primary_key, partition_key_segment, + }, }; /// Maximum item size in bytes (400 KB). @@ -98,7 +111,8 @@ fn validate_number_string(s: &str) -> Result<(), DynamoDBError> { if ch == '.' { if has_dot { return Err(DynamoDBError::validation( - "The parameter cannot be converted to a numeric value: numeric value is not valid", + "The parameter cannot be converted to a numeric value: numeric value is not \ + valid", )); } has_dot = true; @@ -136,7 +150,8 @@ fn validate_number_string(s: &str) -> Result<(), DynamoDBError> { } // Compute the actual magnitude of the number. - // The number is: significant_digits * 10^(explicit_exp - frac_digits + trailing_zeros_in_significant) + // The number is: significant_digits * 10^(explicit_exp - frac_digits + + // trailing_zeros_in_significant) let dot_pos = mantissa.find('.'); #[allow(clippy::cast_possible_wrap)] let frac_digits = if let Some(pos) = dot_pos { @@ -153,7 +168,8 @@ fn validate_number_string(s: &str) -> Result<(), DynamoDBError> { if magnitude > 125 { return Err(DynamoDBError::validation( - "Number overflow. Attempting to store a number with magnitude larger than supported range", + "Number overflow. Attempting to store a number with magnitude larger than supported \ + range", )); } // The smallest allowed magnitude is -130. @@ -161,7 +177,8 @@ fn validate_number_string(s: &str) -> Result<(), DynamoDBError> { // A number like 1e-131 has magnitude = -131, which is NOT allowed. if magnitude < -130 { return Err(DynamoDBError::validation( - "Number underflow. Attempting to store a number with magnitude smaller than supported range", + "Number underflow. Attempting to store a number with magnitude smaller than supported \ + range", )); } @@ -206,8 +223,8 @@ fn validate_numbers_in_value(val: &AttributeValue) -> Result<(), DynamoDBError> fn validate_table_name(name: &str) -> Result<(), DynamoDBError> { if name.len() < 3 || name.len() > 255 { return Err(DynamoDBError::validation(format!( - "TableName must be at least 3 characters long and at most 255 characters long, \ - but was {} characters", + "TableName must be at least 3 characters long and at most 255 characters long, but \ + was {} characters", name.len() ))); } @@ -233,17 +250,15 @@ fn validate_key_not_empty( match val { AttributeValue::S(s) if s.is_empty() => { return Err(DynamoDBError::validation(format!( - "One or more parameter values are not valid. \ - The AttributeValue for a key attribute cannot contain an \ - empty string value. Key: {}", + "One or more parameter values are not valid. The AttributeValue for a key \ + attribute cannot contain an empty string value. Key: {}", ka.name ))); } AttributeValue::B(b) if b.is_empty() => { return Err(DynamoDBError::validation(format!( - "One or more parameter values are not valid. \ - The AttributeValue for a key attribute cannot contain an \ - empty string value. Key: {}", + "One or more parameter values are not valid. The AttributeValue for a key \ + attribute cannot contain an empty string value. Key: {}", ka.name ))); } @@ -262,9 +277,8 @@ fn validate_key_only_has_key_attrs( for attr_name in key.keys() { if !is_key_attribute(attr_name, key_schema) { return Err(DynamoDBError::validation(format!( - "One or more parameter values are not valid. \ - Number of user supplied keys don't match number of table schema keys. \ - Keys provided: [{}], schema keys: [{}]", + "One or more parameter values are not valid. Number of user supplied keys don't \ + match number of table schema keys. Keys provided: [{}], schema keys: [{}]", format_key_names(key), format_schema_key_names(key_schema), ))); @@ -291,8 +305,8 @@ fn validate_key_types( }; if !type_matches { return Err(DynamoDBError::validation(format!( - "The provided key element does not match the schema. \ - Expected type {expected} for key column {name}, got type {actual}", + "The provided key element does not match the schema. Expected type {expected} \ + for key column {name}, got type {actual}", expected = ka.attr_type, name = ka.name, actual = val.type_descriptor(), @@ -309,8 +323,8 @@ fn validate_no_duplicate_attributes_to_get(attrs: &[String]) -> Result<(), Dynam for attr in attrs { if !seen.insert(attr.as_str()) { return Err(DynamoDBError::validation(format!( - "One or more parameter values are not valid. \ - Duplicate value in AttributesToGet: {attr}" + "One or more parameter values are not valid. Duplicate value in AttributesToGet: \ + {attr}" ))); } } @@ -356,7 +370,8 @@ fn validate_select( Select::SpecificAttributes => { if !has_projection && !has_attributes_to_get { return Err(DynamoDBError::validation( - "SPECIFIC_ATTRIBUTES requires either ProjectionExpression or AttributesToGet", + "SPECIFIC_ATTRIBUTES requires either ProjectionExpression or \ + AttributesToGet", )); } } @@ -395,8 +410,8 @@ fn validate_parallel_scan( // TotalSegments must be in [1, MAX_TOTAL_SEGMENTS]. if total > MAX_TOTAL_SEGMENTS { return Err(DynamoDBError::validation(format!( - "1 validation error detected: Value '{total}' at 'totalSegments' failed \ - to satisfy constraint: Member must have value less than or equal to \ + "1 validation error detected: Value '{total}' at 'totalSegments' failed to \ + satisfy constraint: Member must have value less than or equal to \ {MAX_TOTAL_SEGMENTS}. The Segment parameter is required but was not present \ in the request when parameter TotalSegments is present" ))); @@ -404,8 +419,8 @@ fn validate_parallel_scan( // Segment must be in [0, TotalSegments). if seg >= total { return Err(DynamoDBError::validation(format!( - "The Segment parameter is zero-indexed and must be less than \ - parameter TotalSegments. Segment: {seg}, TotalSegments: {total}" + "The Segment parameter is zero-indexed and must be less than parameter \ + TotalSegments. Segment: {seg}, TotalSegments: {total}" ))); } // ExclusiveStartKey must map to the same segment. @@ -415,8 +430,8 @@ fn validate_parallel_scan( #[allow(clippy::cast_sign_loss)] if key_segment != seg as u32 { return Err(DynamoDBError::validation( - "The provided Exclusive start key does not map to the provided \ - Segment and TotalSegments values." + "The provided Exclusive start key does not map to the provided Segment \ + and TotalSegments values." .to_owned(), )); } @@ -429,12 +444,12 @@ fn validate_parallel_scan( // missing one, but boto3 rejects this client-side. We still // handle it for raw API callers. (Some(_), None) => Err(DynamoDBError::validation( - "The TotalSegments parameter is required but was not present in the request \ - when parameter Segment is present", + "The TotalSegments parameter is required but was not present in the request when \ + parameter Segment is present", )), (None, Some(_)) => Err(DynamoDBError::validation( - "The Segment parameter is required but was not present in the request \ - when parameter TotalSegments is present", + "The Segment parameter is required but was not present in the request when parameter \ + TotalSegments is present", )), } } @@ -510,9 +525,8 @@ impl RustStackDynamoDB { // Validate attribute definitions are present. if input.attribute_definitions.is_empty() { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: \ - Some AttributeDefinitions are not valid. \ - AttributeDefinitions must be provided for all key attributes", + "One or more parameter values were invalid: Some AttributeDefinitions are not \ + valid. AttributeDefinitions must be provided for all key attributes", )); } @@ -563,6 +577,7 @@ impl RustStackDynamoDB { stream_specification: input.stream_specification, sse_specification: input.sse_specification, tags: parking_lot::RwLock::new(input.tags), + ttl: parking_lot::RwLock::new(None), arn, table_id: uuid::Uuid::new_v4().to_string(), created_at: chrono::Utc::now(), @@ -728,7 +743,8 @@ impl RustStackDynamoDB { if !input.expected.is_empty() && input.condition_expression.is_some() { return Err(DynamoDBError::validation( "Can not use both expression and non-expression parameters in the same request: \ - Non-expression parameters: {Expected} Expression parameters: {ConditionExpression}", + Non-expression parameters: {Expected} Expression parameters: \ + {ConditionExpression}", )); } @@ -951,7 +967,8 @@ impl RustStackDynamoDB { if !input.expected.is_empty() && input.condition_expression.is_some() { return Err(DynamoDBError::validation( "Can not use both expression and non-expression parameters in the same request: \ - Non-expression parameters: {Expected} Expression parameters: {ConditionExpression}", + Non-expression parameters: {Expected} Expression parameters: \ + {ConditionExpression}", )); } @@ -1077,13 +1094,15 @@ impl RustStackDynamoDB { if !input.attribute_updates.is_empty() && input.update_expression.is_some() { return Err(DynamoDBError::validation( "Can not use both expression and non-expression parameters in the same request: \ - Non-expression parameters: {AttributeUpdates} Expression parameters: {UpdateExpression}", + Non-expression parameters: {AttributeUpdates} Expression parameters: \ + {UpdateExpression}", )); } if !input.attribute_updates.is_empty() && input.condition_expression.is_some() { return Err(DynamoDBError::validation( "Can not use both expression and non-expression parameters in the same request: \ - Non-expression parameters: {AttributeUpdates} Expression parameters: {ConditionExpression}", + Non-expression parameters: {AttributeUpdates} Expression parameters: \ + {ConditionExpression}", )); } if !input.expected.is_empty() && input.update_expression.is_some() { @@ -1095,7 +1114,8 @@ impl RustStackDynamoDB { if !input.expected.is_empty() && input.condition_expression.is_some() { return Err(DynamoDBError::validation( "Can not use both expression and non-expression parameters in the same request: \ - Non-expression parameters: {Expected} Expression parameters: {ConditionExpression}", + Non-expression parameters: {Expected} Expression parameters: \ + {ConditionExpression}", )); } @@ -1150,8 +1170,7 @@ impl RustStackDynamoDB { } Some(existing_val) => { return Err(DynamoDBError::validation(format!( - "Type mismatch for ADD; operator type: L, \ - existing type: {}", + "Type mismatch for ADD; operator type: L, existing type: {}", existing_val.type_descriptor(), ))); } @@ -1891,8 +1910,8 @@ impl RustStackDynamoDB { let total_writes: usize = input.request_items.values().map(Vec::len).sum(); if total_writes > 25 { return Err(DynamoDBError::validation(format!( - "Too many items in the BatchWriteItem request; \ - the request length {total_writes} exceeds the limit of 25" + "Too many items in the BatchWriteItem request; the request length {total_writes} \ + exceeds the limit of 25" ))); } @@ -1995,6 +2014,681 @@ impl RustStackDynamoDB { } } +// --------------------------------------------------------------------------- +// Tagging operations +// --------------------------------------------------------------------------- + +/// Maximum number of tags per resource. +const MAX_TAGS_PER_RESOURCE: usize = 50; + +/// Maximum tag key length (characters). +const MAX_TAG_KEY_LENGTH: usize = 128; + +/// Maximum tag value length (characters). +const MAX_TAG_VALUE_LENGTH: usize = 256; + +impl RustStackDynamoDB { + /// Resolve a DynamoDB resource ARN to a table name. + fn resolve_table_from_arn(arn: &str) -> Result<&str, DynamoDBError> { + arn.rsplit_once('/') + .map(|(_, name)| name) + .filter(|name| !name.is_empty()) + .ok_or_else(|| DynamoDBError::validation("Invalid resource ARN")) + } + + /// Handle `TagResource`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_tag_resource( + &self, + input: TagResourceInput, + ) -> Result { + let table_name = Self::resolve_table_from_arn(&input.resource_arn)?; + let table = self.state.require_table(table_name)?; + + // Validate each tag. + for tag in &input.tags { + if tag.key.is_empty() || tag.key.len() > MAX_TAG_KEY_LENGTH { + return Err(DynamoDBError::validation(format!( + "Tag key must be between 1 and {MAX_TAG_KEY_LENGTH} characters long" + ))); + } + if tag.value.len() > MAX_TAG_VALUE_LENGTH { + return Err(DynamoDBError::validation(format!( + "Tag value must be no more than {MAX_TAG_VALUE_LENGTH} characters long" + ))); + } + if tag.key.starts_with("aws:") { + return Err(DynamoDBError::validation( + "Tag keys starting with 'aws:' are reserved for system use", + )); + } + } + + let mut tags = table.tags.write(); + + // Clone, merge, validate count, then commit — avoids TOCTOU where + // over-limit tags persist if validation fails after mutation. + let mut merged = tags.clone(); + for new_tag in &input.tags { + if let Some(existing) = merged.iter_mut().find(|t| t.key == new_tag.key) { + existing.value.clone_from(&new_tag.value); + } else { + merged.push(new_tag.clone()); + } + } + + if merged.len() > MAX_TAGS_PER_RESOURCE { + return Err(DynamoDBError::validation(format!( + "The number of tags exceeds the limit of {MAX_TAGS_PER_RESOURCE}" + ))); + } + + *tags = merged; + + Ok(TagResourceOutput {}) + } + + /// Handle `UntagResource`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_untag_resource( + &self, + input: UntagResourceInput, + ) -> Result { + let table_name = Self::resolve_table_from_arn(&input.resource_arn)?; + let table = self.state.require_table(table_name)?; + + let keys_to_remove: HashSet<&str> = input.tag_keys.iter().map(String::as_str).collect(); + let mut tags = table.tags.write(); + tags.retain(|t| !keys_to_remove.contains(t.key.as_str())); + + Ok(UntagResourceOutput {}) + } + + /// Handle `ListTagsOfResource`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_list_tags_of_resource( + &self, + input: ListTagsOfResourceInput, + ) -> Result { + let table_name = Self::resolve_table_from_arn(&input.resource_arn)?; + let table = self.state.require_table(table_name)?; + + let tags = table.tags.read().clone(); + Ok(ListTagsOfResourceOutput { + tags: Some(tags), + next_token: None, + }) + } +} + +// --------------------------------------------------------------------------- +// Time to Live operations +// --------------------------------------------------------------------------- + +impl RustStackDynamoDB { + /// Handle `UpdateTimeToLive`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_update_time_to_live( + &self, + input: UpdateTimeToLiveInput, + ) -> Result { + validate_table_name(&input.table_name)?; + let table = self.state.require_table(&input.table_name)?; + + if input.time_to_live_specification.attribute_name.is_empty() { + return Err(DynamoDBError::validation( + "TimeToLiveSpecification AttributeName must not be empty", + )); + } + + let spec = input.time_to_live_specification; + *table.ttl.write() = Some(spec.clone()); + + Ok(UpdateTimeToLiveOutput { + time_to_live_specification: Some(spec), + }) + } + + /// Handle `DescribeTimeToLive`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_describe_time_to_live( + &self, + input: DescribeTimeToLiveInput, + ) -> Result { + validate_table_name(&input.table_name)?; + let table = self.state.require_table(&input.table_name)?; + + let ttl_guard = table.ttl.read(); + let description = match ttl_guard.as_ref() { + Some(spec) => TimeToLiveDescription { + attribute_name: Some(spec.attribute_name.clone()), + time_to_live_status: Some(if spec.enabled { + "ENABLED".to_owned() + } else { + "DISABLED".to_owned() + }), + }, + None => TimeToLiveDescription { + attribute_name: None, + time_to_live_status: Some("DISABLED".to_owned()), + }, + }; + + Ok(DescribeTimeToLiveOutput { + time_to_live_description: Some(description), + }) + } +} + +// --------------------------------------------------------------------------- +// Describe operations +// --------------------------------------------------------------------------- + +impl RustStackDynamoDB { + /// Handle `DescribeLimits`. + /// + /// Returns hardcoded account and table capacity limits matching the default + /// AWS DynamoDB provisioned-mode limits. + pub fn handle_describe_limits( + &self, + _input: DescribeLimitsInput, + ) -> Result { + Ok(DescribeLimitsOutput { + account_max_read_capacity_units: Some(80_000), + account_max_write_capacity_units: Some(80_000), + table_max_read_capacity_units: Some(40_000), + table_max_write_capacity_units: Some(40_000), + }) + } + + /// Handle `DescribeEndpoints`. + /// + /// Returns a single endpoint for the configured region with a 1440-minute + /// (24 hour) cache period, matching the real DynamoDB behaviour. + pub fn handle_describe_endpoints( + &self, + _input: DescribeEndpointsInput, + ) -> Result { + let address = format!("dynamodb.{}.amazonaws.com", self.config.default_region); + Ok(DescribeEndpointsOutput { + endpoints: vec![Endpoint { + address, + cache_period_in_minutes: 1440, + }], + }) + } +} + +// --------------------------------------------------------------------------- +// Transaction operations +// --------------------------------------------------------------------------- + +/// Maximum number of items in a transaction. +const MAX_TRANSACT_ITEMS: usize = 100; + +impl RustStackDynamoDB { + /// Handle `TransactGetItems`. + #[allow(clippy::needless_pass_by_value)] + pub fn handle_transact_get_items( + &self, + input: TransactGetItemsInput, + ) -> Result { + if input.transact_items.is_empty() { + return Err(DynamoDBError::validation( + "1 validation error detected: Value null at 'transactItems' failed to satisfy \ + constraint: Member must not be null", + )); + } + if input.transact_items.len() > MAX_TRANSACT_ITEMS { + return Err(DynamoDBError::validation(format!( + "1 validation error detected: Value '[TransactGetItem]' at 'transactItems' failed \ + to satisfy constraint: Member must have length less than or equal to \ + {MAX_TRANSACT_ITEMS}" + ))); + } + + let mut responses = Vec::with_capacity(input.transact_items.len()); + + for transact_item in &input.transact_items { + let get = &transact_item.get; + let table = self.state.require_table(&get.table_name)?; + let pk = extract_primary_key(&table.key_schema, &get.key) + .map_err(storage_error_to_dynamodb)?; + + let item = table.storage.get_item(&pk); + + // Apply projection if specified. + let result_item = match (item, &get.projection_expression) { + (Some(found_item), Some(projection)) => { + let names = get.expression_attribute_names.as_ref(); + let empty_names = HashMap::new(); + let names_ref = names.unwrap_or(&empty_names); + let paths = + parse_projection(projection).map_err(projection_error_to_dynamodb)?; + let empty_values = HashMap::new(); + let ctx = EvalContext { + item: &found_item, + names: names_ref, + values: &empty_values, + }; + let projected = ctx.apply_projection(&paths); + if projected.is_empty() { + None + } else { + Some(projected) + } + } + (Some(found_item), None) => Some(found_item), + (None, _) => None, + }; + + responses.push(ItemResponse { item: result_item }); + } + + Ok(TransactGetItemsOutput { + consumed_capacity: Vec::new(), + responses: Some(responses), + }) + } + + /// Handle `TransactWriteItems`. + #[allow(clippy::too_many_lines, clippy::needless_pass_by_value)] + pub fn handle_transact_write_items( + &self, + input: TransactWriteItemsInput, + ) -> Result { + if input.transact_items.is_empty() { + return Err(DynamoDBError::validation( + "1 validation error detected: Value null at 'transactItems' failed to satisfy \ + constraint: Member must not be null", + )); + } + if input.transact_items.len() > MAX_TRANSACT_ITEMS { + return Err(DynamoDBError::validation(format!( + "1 validation error detected: Value '[TransactWriteItem]' at 'transactItems' \ + failed to satisfy constraint: Member must have length less than or equal to \ + {MAX_TRANSACT_ITEMS}" + ))); + } + + // Phase 1: Validate each item has exactly one action and collect + // (table_name, primary_key) pairs for duplicate detection. + let mut seen_keys: HashSet<(String, PrimaryKey)> = HashSet::new(); + + for (idx, item) in input.transact_items.iter().enumerate() { + let action_count = [ + item.condition_check.is_some(), + item.put.is_some(), + item.delete.is_some(), + item.update.is_some(), + ] + .iter() + .filter(|&&b| b) + .count(); + + if action_count != 1 { + return Err(DynamoDBError::validation(format!( + "TransactItems[{idx}] must specify exactly one of ConditionCheck, Put, \ + Delete, or Update" + ))); + } + + // Extract (table_name, key) for duplicate detection. + let (table_name, key_map) = if let Some(ref cc) = item.condition_check { + (cc.table_name.as_str(), &cc.key) + } else if let Some(ref put) = item.put { + let table = self.state.require_table(&put.table_name)?; + let pk = extract_primary_key(&table.key_schema, &put.item) + .map_err(storage_error_to_dynamodb)?; + if !seen_keys.insert((put.table_name.clone(), pk)) { + return Err(DynamoDBError::validation( + "Transaction request cannot include multiple operations on one item", + )); + } + continue; + } else if let Some(ref del) = item.delete { + (del.table_name.as_str(), &del.key) + } else if let Some(ref upd) = item.update { + (upd.table_name.as_str(), &upd.key) + } else { + continue; + }; + + let table = self.state.require_table(table_name)?; + let pk = extract_primary_key(&table.key_schema, key_map) + .map_err(storage_error_to_dynamodb)?; + if !seen_keys.insert((table_name.to_owned(), pk)) { + return Err(DynamoDBError::validation( + "Transaction request cannot include multiple operations on one item", + )); + } + } + + // Phase 2: Evaluate all condition expressions. + let mut cancellation_reasons: Vec = vec![ + CancellationReason { + code: Some("None".to_owned()), + message: Some("None".to_owned()), + item: None, + }; + input.transact_items.len() + ]; + let mut any_cancelled = false; + + for (idx, item) in input.transact_items.iter().enumerate() { + let condition_result = self.evaluate_transact_write_condition(item); + match condition_result { + Ok(()) => {} + Err(reason) => { + cancellation_reasons[idx] = reason; + any_cancelled = true; + } + } + } + + if any_cancelled { + return Err(DynamoDBError::transaction_cancelled(cancellation_reasons)); + } + + // Phase 3: Apply all writes. + for item in &input.transact_items { + if let Some(ref put) = item.put { + let table = self.state.require_table(&put.table_name)?; + let old = table + .storage + .put_item(put.item.clone()) + .map_err(storage_error_to_dynamodb)?; + + if table + .stream_specification + .as_ref() + .is_some_and(|s| s.stream_enabled) + { + let event_name = if old.is_some() { + crate::stream::ChangeEventName::Modify + } else { + crate::stream::ChangeEventName::Insert + }; + let keys = extract_key_attributes(&put.item, &table.key_schema_elements); + let size = calculate_item_size(&put.item); + self.emitter.emit(crate::stream::ChangeEvent { + table_name: table.name.clone(), + event_name, + keys, + old_image: old, + new_image: Some(put.item.clone()), + size_bytes: size, + }); + } + } else if let Some(ref del) = item.delete { + let table = self.state.require_table(&del.table_name)?; + let pk = extract_primary_key(&table.key_schema, &del.key) + .map_err(storage_error_to_dynamodb)?; + let old = table.storage.delete_item(&pk); + + if table + .stream_specification + .as_ref() + .is_some_and(|s| s.stream_enabled) + { + if let Some(ref old_item) = old { + let keys = extract_key_attributes(old_item, &table.key_schema_elements); + let size = calculate_item_size(old_item); + self.emitter.emit(crate::stream::ChangeEvent { + table_name: table.name.clone(), + event_name: crate::stream::ChangeEventName::Remove, + keys, + old_image: Some(old_item.clone()), + new_image: None, + size_bytes: size, + }); + } + } + } else if let Some(ref upd) = item.update { + let table = self.state.require_table(&upd.table_name)?; + let pk = extract_primary_key(&table.key_schema, &upd.key) + .map_err(storage_error_to_dynamodb)?; + let existing = table.storage.get_item(&pk); + let current = existing.clone().unwrap_or_else(|| upd.key.clone()); + + let names = upd.expression_attribute_names.as_ref(); + let values = upd.expression_attribute_values.as_ref(); + let empty_names = HashMap::new(); + let empty_values = HashMap::new(); + let names_ref = names.unwrap_or(&empty_names); + let values_ref = values.unwrap_or(&empty_values); + + let parsed = + parse_update(&upd.update_expression).map_err(expression_error_to_dynamodb)?; + let ctx = EvalContext { + item: ¤t, + names: names_ref, + values: values_ref, + }; + let updated = ctx + .apply_update(&parsed) + .map_err(expression_error_to_dynamodb)?; + + let old = table + .storage + .put_item(updated.clone()) + .map_err(storage_error_to_dynamodb)?; + + if table + .stream_specification + .as_ref() + .is_some_and(|s| s.stream_enabled) + { + let event_name = if existing.is_some() { + crate::stream::ChangeEventName::Modify + } else { + crate::stream::ChangeEventName::Insert + }; + let keys = extract_key_attributes(&updated, &table.key_schema_elements); + let size = calculate_item_size(&updated); + self.emitter.emit(crate::stream::ChangeEvent { + table_name: table.name.clone(), + event_name, + keys, + old_image: old, + new_image: Some(updated), + size_bytes: size, + }); + } + } + // ConditionCheck: no mutation needed. + } + + Ok(TransactWriteItemsOutput { + consumed_capacity: Vec::new(), + item_collection_metrics: HashMap::new(), + }) + } + + /// Evaluate a condition expression for a single transaction write item. + /// + /// Returns `Ok(())` if the condition passes (or no condition exists), + /// or a `CancellationReason` if the condition fails. + fn evaluate_transact_write_condition( + &self, + item: &ruststack_dynamodb_model::types::TransactWriteItem, + ) -> Result<(), CancellationReason> { + if let Some(ref cc) = item.condition_check { + self.evaluate_condition_for_key( + &cc.table_name, + &cc.key, + Some(&cc.condition_expression), + cc.expression_attribute_names.as_ref(), + cc.expression_attribute_values.as_ref(), + cc.return_values_on_condition_check_failure.as_deref(), + ) + } else if let Some(ref put) = item.put { + if let Some(ref condition) = put.condition_expression { + self.evaluate_transact_put_condition(put, condition) + } else { + Ok(()) + } + } else if let Some(ref del) = item.delete { + self.evaluate_condition_for_key( + &del.table_name, + &del.key, + del.condition_expression.as_deref(), + del.expression_attribute_names.as_ref(), + del.expression_attribute_values.as_ref(), + del.return_values_on_condition_check_failure.as_deref(), + ) + } else if let Some(ref upd) = item.update { + self.evaluate_condition_for_key( + &upd.table_name, + &upd.key, + upd.condition_expression.as_deref(), + upd.expression_attribute_names.as_ref(), + upd.expression_attribute_values.as_ref(), + upd.return_values_on_condition_check_failure.as_deref(), + ) + } else { + Ok(()) + } + } + + /// Evaluate a condition expression for a `TransactPut` action. + /// + /// Put actions derive their primary key from the item rather than a + /// separate `Key` field, requiring special handling. + fn evaluate_transact_put_condition( + &self, + put: &ruststack_dynamodb_model::types::TransactPut, + condition: &str, + ) -> Result<(), CancellationReason> { + let table = self + .state + .require_table(&put.table_name) + .map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.message.clone()), + item: None, + })?; + let pk = + extract_primary_key(&table.key_schema, &put.item).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + let key_map: HashMap = + extract_key_attributes(&put.item, &table.key_schema_elements); + let existing = table.storage.get_item(&pk); + let empty = HashMap::new(); + let item_ref = existing.as_ref().unwrap_or(&empty); + let empty_names = HashMap::new(); + let empty_values = HashMap::new(); + let names = put + .expression_attribute_names + .as_ref() + .unwrap_or(&empty_names); + let values = put + .expression_attribute_values + .as_ref() + .unwrap_or(&empty_values); + + let expr = parse_condition(condition).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + let ctx = EvalContext { + item: item_ref, + names, + values, + }; + let result = ctx.evaluate(&expr).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + if !result { + let return_item = + if put.return_values_on_condition_check_failure.as_deref() == Some("ALL_OLD") { + existing + } else { + None + }; + return Err(CancellationReason { + code: Some("ConditionalCheckFailed".to_owned()), + message: Some("The conditional request failed".to_owned()), + item: return_item.or(Some(key_map)), + }); + } + Ok(()) + } + + /// Evaluate a condition expression for a given table and key. + fn evaluate_condition_for_key( + &self, + table_name: &str, + key: &HashMap, + condition_expression: Option<&str>, + expression_names: Option<&HashMap>, + expression_values: Option<&HashMap>, + return_values_on_failure: Option<&str>, + ) -> Result<(), CancellationReason> { + let Some(condition_str) = condition_expression else { + return Ok(()); + }; + + let table = self + .state + .require_table(table_name) + .map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.message.clone()), + item: None, + })?; + let pk = extract_primary_key(&table.key_schema, key).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + let existing = table.storage.get_item(&pk); + let empty = HashMap::new(); + let item_ref = existing.as_ref().unwrap_or(&empty); + let empty_names = HashMap::new(); + let empty_values = HashMap::new(); + let names = expression_names.unwrap_or(&empty_names); + let values = expression_values.unwrap_or(&empty_values); + + let expr = parse_condition(condition_str).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + let ctx = EvalContext { + item: item_ref, + names, + values, + }; + let result = ctx.evaluate(&expr).map_err(|e| CancellationReason { + code: Some("ValidationException".to_owned()), + message: Some(e.to_string()), + item: None, + })?; + + if !result { + let return_item = if return_values_on_failure == Some("ALL_OLD") { + existing + } else { + None + }; + return Err(CancellationReason { + code: Some("ConditionalCheckFailed".to_owned()), + message: Some("The conditional request failed".to_owned()), + item: return_item, + }); + } + + Ok(()) + } +} + // --------------------------------------------------------------------------- // Legacy API conversion functions // --------------------------------------------------------------------------- @@ -2287,7 +2981,8 @@ fn build_condition_fragment( values.insert(val_ph.clone(), v.clone()); } format!( - "(attribute_exists({name_placeholder}) AND NOT contains({name_placeholder}, {val_ph}))" + "(attribute_exists({name_placeholder}) AND NOT contains({name_placeholder}, \ + {val_ph}))" ) } ComparisonOperator::BeginsWith => { @@ -2380,8 +3075,8 @@ fn validate_return_values_on_condition_check_failure( if v != "NONE" && v != "ALL_OLD" { return Err(DynamoDBError::validation(format!( "1 validation error detected: Value '{v}' at \ - 'returnValuesOnConditionCheckFailure' failed to satisfy constraint: \ - Member must satisfy enum value set: [NONE, ALL_OLD]" + 'returnValuesOnConditionCheckFailure' failed to satisfy constraint: Member must \ + satisfy enum value set: [NONE, ALL_OLD]" ))); } } @@ -2422,27 +3117,24 @@ fn validate_comparison_operator( | ComparisonOperator::BeginsWith => { if count != 1 { return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - Invalid number of argument(s) for the {comp_op} \ - ComparisonOperator" + "One or more parameter values were invalid: Invalid number of argument(s) for \ + the {comp_op} ComparisonOperator" ))); } } ComparisonOperator::Contains | ComparisonOperator::NotContains => { if count != 1 { return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - Invalid number of argument(s) for the {comp_op} \ - ComparisonOperator" + "One or more parameter values were invalid: Invalid number of argument(s) for \ + the {comp_op} ComparisonOperator" ))); } // CONTAINS/NOT_CONTAINS only accept scalar types (S, N, B). if let Some(val) = value_list.first() { if !is_scalar_attribute_value(val) { return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - ComparisonOperator {comp_op} is not valid for {val_type} \ - AttributeValue type", + "One or more parameter values were invalid: ComparisonOperator {comp_op} \ + is not valid for {val_type} AttributeValue type", val_type = val.type_descriptor(), ))); } @@ -2451,27 +3143,24 @@ fn validate_comparison_operator( ComparisonOperator::Between => { if count != 2 { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: \ - Invalid number of argument(s) for the BETWEEN \ - ComparisonOperator", + "One or more parameter values were invalid: Invalid number of argument(s) for \ + the BETWEEN ComparisonOperator", )); } } ComparisonOperator::In => { if count == 0 { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: \ - Invalid number of argument(s) for the IN \ - ComparisonOperator", + "One or more parameter values were invalid: Invalid number of argument(s) for \ + the IN ComparisonOperator", )); } // IN requires all values to be scalar and of the same type. for val in value_list { if !is_scalar_attribute_value(val) { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: \ - ComparisonOperator IN is not valid for non-scalar \ - AttributeValue type", + "One or more parameter values were invalid: ComparisonOperator IN is not \ + valid for non-scalar AttributeValue type", )); } } @@ -2481,9 +3170,8 @@ fn validate_comparison_operator( for val in &value_list[1..] { if val.type_descriptor() != first_type { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: \ - AttributeValues inside AttributeValueList must all \ - be of the same type", + "One or more parameter values were invalid: AttributeValues inside \ + AttributeValueList must all be of the same type", )); } } @@ -2492,9 +3180,8 @@ fn validate_comparison_operator( ComparisonOperator::Null | ComparisonOperator::NotNull => { if count != 0 { return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - Invalid number of argument(s) for the {comp_op} \ - ComparisonOperator" + "One or more parameter values were invalid: Invalid number of argument(s) for \ + the {comp_op} ComparisonOperator" ))); } } @@ -2525,15 +3212,14 @@ fn validate_expected( } else if exp.exists == Some(true) && exp.value.is_none() { // Exists:True without Value is a validation error. return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - Exists is set to TRUE for attribute ({attr_name}), \ - Value must also be set" + "One or more parameter values were invalid: Exists is set to TRUE for attribute \ + ({attr_name}), Value must also be set" ))); } else if exp.exists == Some(false) && exp.value.is_some() { // Exists:False with Value is a validation error. return Err(DynamoDBError::validation(format!( - "One or more parameter values were invalid: \ - Value cannot be used when Exists is set to FALSE for attribute ({attr_name})" + "One or more parameter values were invalid: Value cannot be used when Exists is \ + set to FALSE for attribute ({attr_name})" ))); } } @@ -2552,8 +3238,8 @@ fn validate_no_empty_sets(values: &HashMap) -> Result<() for (key, val) in values { if is_empty_set(val) { return Err(DynamoDBError::validation(format!( - "One or more parameter values are not valid. The AttributeValue for a member \ - of the ExpressionAttributeValues ({key}) contains an empty set" + "One or more parameter values are not valid. The AttributeValue for a member of \ + the ExpressionAttributeValues ({key}) contains an empty set" ))); } } @@ -2619,8 +3305,8 @@ fn validate_item_no_empty_sets( for val in item.values() { if contains_empty_set(val) { return Err(DynamoDBError::validation( - "One or more parameter values were invalid: An number of elements of the \ - input set is empty", + "One or more parameter values were invalid: An number of elements of the input \ + set is empty", )); } } @@ -2698,8 +3384,8 @@ fn validate_no_unresolved_names( for name in used_names { if name.starts_with('#') && !provided_names.contains_key(name.as_str()) { return Err(DynamoDBError::validation(format!( - "Value provided in ExpressionAttributeNames unused in expressions: \ - unresolved attribute name reference: {name}" + "Value provided in ExpressionAttributeNames unused in expressions: unresolved \ + attribute name reference: {name}" ))); } } @@ -2845,14 +3531,14 @@ fn validate_update_paths( for j in (i + 1)..resolved.len() { if paths_overlap(&resolved[i], &resolved[j]) { return Err(DynamoDBError::validation( - "Invalid UpdateExpression: Two document paths overlap with each other; \ - must remove or rewrite one of these paths", + "Invalid UpdateExpression: Two document paths overlap with each other; must \ + remove or rewrite one of these paths", )); } if paths_conflict(&resolved[i], &resolved[j]) { return Err(DynamoDBError::validation( - "Invalid UpdateExpression: Two document paths conflict with each other; \ - must remove or rewrite one of these paths", + "Invalid UpdateExpression: Two document paths conflict with each other; must \ + remove or rewrite one of these paths", )); } } @@ -3047,12 +3733,12 @@ fn merge_attribute_values(target: &mut AttributeValue, source: AttributeValue) { /// - `NONE` / `None`: empty map /// - `ALL_OLD`: all attributes of the old item (or empty if no old item) /// - `ALL_NEW`: all attributes of the new item -/// - `UPDATED_OLD`: for each path targeted by the update expression, return -/// the old value if it existed (before the update). Only returns the -/// specific nested sub-path, not the entire top-level attribute. -/// - `UPDATED_NEW`: for each path targeted by the update expression, return -/// the new value if it exists (after the update). For REMOVE'd attributes, -/// the path no longer exists so it is not returned. +/// - `UPDATED_OLD`: for each path targeted by the update expression, return the old value if it +/// existed (before the update). Only returns the specific nested sub-path, not the entire +/// top-level attribute. +/// - `UPDATED_NEW`: for each path targeted by the update expression, return the new value if it +/// exists (after the update). For REMOVE'd attributes, the path no longer exists so it is not +/// returned. fn compute_update_return_values( return_values: Option<&ReturnValue>, old_item: Option<&HashMap>, @@ -4045,14 +4731,17 @@ fn gsi_build_last_key( #[cfg(test)] mod tests { - use super::*; - use ruststack_dynamodb_model::error::DynamoDBErrorCode; - use ruststack_dynamodb_model::input::{BatchWriteItemInput, CreateTableInput, UpdateItemInput}; - use ruststack_dynamodb_model::types::{ - AttributeDefinition, KeySchemaElement, KeyType, PutRequest, ScalarAttributeType, - WriteRequest, + use ruststack_dynamodb_model::{ + error::DynamoDBErrorCode, + input::{BatchWriteItemInput, CreateTableInput, UpdateItemInput}, + types::{ + AttributeDefinition, KeySchemaElement, KeyType, PutRequest, ScalarAttributeType, + WriteRequest, + }, }; + use super::*; + /// Create a provider with a pre-configured test table named "TestTable". fn setup_provider_with_table() -> RustStackDynamoDB { let provider = RustStackDynamoDB::new(DynamoDBConfig::default()); diff --git a/crates/ruststack-dynamodb-core/src/state.rs b/crates/ruststack-dynamodb-core/src/state.rs index e51a9c7..ff997c8 100644 --- a/crates/ruststack-dynamodb-core/src/state.rs +++ b/crates/ruststack-dynamodb-core/src/state.rs @@ -3,14 +3,15 @@ use std::sync::Arc; use dashmap::DashMap; - -use ruststack_dynamodb_model::error::DynamoDBError; -use ruststack_dynamodb_model::types::{ - AttributeDefinition, BillingMode, BillingModeSummary, GlobalSecondaryIndex, - GlobalSecondaryIndexDescription, IndexStatus, KeySchemaElement, LocalSecondaryIndex, - LocalSecondaryIndexDescription, ProvisionedThroughput, ProvisionedThroughputDescription, - SSEDescription, SSESpecification, SseStatus, SseType, StreamSpecification, TableDescription, - TableStatus, Tag, +use ruststack_dynamodb_model::{ + error::DynamoDBError, + types::{ + AttributeDefinition, BillingMode, BillingModeSummary, GlobalSecondaryIndex, + GlobalSecondaryIndexDescription, IndexStatus, KeySchemaElement, LocalSecondaryIndex, + LocalSecondaryIndexDescription, ProvisionedThroughput, ProvisionedThroughputDescription, + SSEDescription, SSESpecification, SseStatus, SseType, StreamSpecification, + TableDescription, TableStatus, Tag, TimeToLiveSpecification, + }, }; use crate::storage::{KeySchema, TableStorage}; @@ -119,6 +120,8 @@ pub struct DynamoDBTable { pub sse_specification: Option, /// Tags. pub tags: parking_lot::RwLock>, + /// Time-to-Live specification. + pub ttl: parking_lot::RwLock>, /// Table ARN. pub arn: String, /// Stable table ID (UUID v4), assigned at creation time. diff --git a/crates/ruststack-dynamodb-core/src/storage.rs b/crates/ruststack-dynamodb-core/src/storage.rs index dba032d..b5e3f09 100644 --- a/crates/ruststack-dynamodb-core/src/storage.rs +++ b/crates/ruststack-dynamodb-core/src/storage.rs @@ -10,27 +10,26 @@ //! DashMap> //! ``` //! -//! - Partition-level concurrency: different partitions can be read/written -//! concurrently without contention. -//! - Sort key ordering: within each partition, items are stored in a `BTreeMap` -//! keyed by [`SortableAttributeValue`], which implements [`Ord`] following -//! DynamoDB comparison rules. -//! - For tables without a sort key, a sentinel value is used as the single -//! BTreeMap key per partition. - -use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap}; -use std::hash::{DefaultHasher, Hash, Hasher}; -use std::ops::Bound; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +//! - Partition-level concurrency: different partitions can be read/written concurrently without +//! contention. +//! - Sort key ordering: within each partition, items are stored in a `BTreeMap` keyed by +//! [`SortableAttributeValue`], which implements [`Ord`] following DynamoDB comparison rules. +//! - For tables without a sort key, a sentinel value is used as the single BTreeMap key per +//! partition. + +use std::{ + cmp::Ordering, + collections::{BTreeMap, HashMap}, + hash::{DefaultHasher, Hash, Hasher}, + ops::Bound, + sync::atomic::{AtomicU64, Ordering as AtomicOrdering}, +}; use dashmap::DashMap; +use ruststack_dynamodb_model::{AttributeValue, types::ScalarAttributeType}; use thiserror::Error; use tracing::debug; -use ruststack_dynamodb_model::AttributeValue; -use ruststack_dynamodb_model::types::ScalarAttributeType; - // --------------------------------------------------------------------------- // Errors // --------------------------------------------------------------------------- @@ -97,12 +96,11 @@ pub struct PrimaryKey { /// /// Ordering rules follow DynamoDB semantics: /// - **Strings (S)**: UTF-8 byte ordering. -/// - **Numbers (N)**: Numeric ordering (parsed as `f64`). Full 38-digit -/// precision would require `bigdecimal`, but `f64` is sufficient for -/// local development use cases. +/// - **Numbers (N)**: Numeric ordering (parsed as `f64`). Full 38-digit precision would require +/// `bigdecimal`, but `f64` is sufficient for local development use cases. /// - **Binary (B)**: Byte-by-byte unsigned ordering. -/// - **Sentinel**: A special value used when the table has no sort key. -/// It always compares equal to itself. +/// - **Sentinel**: A special value used when the table has no sort key. It always compares equal to +/// itself. #[derive(Debug, Clone)] pub enum SortableAttributeValue { /// String sort key. diff --git a/crates/ruststack-dynamodb-http/src/body.rs b/crates/ruststack-dynamodb-http/src/body.rs index 881aaa0..fb8e065 100644 --- a/crates/ruststack-dynamodb-http/src/body.rs +++ b/crates/ruststack-dynamodb-http/src/body.rs @@ -1,7 +1,9 @@ //! DynamoDB HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-dynamodb-http/src/dispatch.rs b/crates/ruststack-dynamodb-http/src/dispatch.rs index 423d57a..81a0af0 100644 --- a/crates/ruststack-dynamodb-http/src/dispatch.rs +++ b/crates/ruststack-dynamodb-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! DynamoDB handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_dynamodb_model::error::DynamoDBError; -use ruststack_dynamodb_model::operations::DynamoDBOperation; +use ruststack_dynamodb_model::{error::DynamoDBError, operations::DynamoDBOperation}; use crate::body::DynamoDBResponseBody; diff --git a/crates/ruststack-dynamodb-http/src/response.rs b/crates/ruststack-dynamodb-http/src/response.rs index 48f5b94..386b185 100644 --- a/crates/ruststack-dynamodb-http/src/response.rs +++ b/crates/ruststack-dynamodb-http/src/response.rs @@ -26,6 +26,10 @@ pub fn error_to_json(error: &DynamoDBError) -> Vec { if let Some(ref item) = error.item { obj["Item"] = serde_json::to_value(item).expect("Item serialization cannot fail"); } + if !error.cancellation_reasons.is_empty() { + obj["CancellationReasons"] = serde_json::to_value(&error.cancellation_reasons) + .expect("CancellationReasons serialization cannot fail"); + } serde_json::to_vec(&obj).expect("JSON serialization of error cannot fail") } @@ -75,9 +79,10 @@ pub fn json_response(json: Vec, request_id: &str) -> http::Response>, + /// Cancellation reasons for `TransactionCanceledException`. + pub cancellation_reasons: Vec, } impl fmt::Display for DynamoDBError { @@ -176,6 +177,7 @@ impl DynamoDBError { code, source: None, item: None, + cancellation_reasons: Vec::new(), } } @@ -188,6 +190,7 @@ impl DynamoDBError { code, source: None, item: None, + cancellation_reasons: Vec::new(), } } @@ -206,6 +209,13 @@ impl DynamoDBError { self } + /// Attach cancellation reasons to a `TransactionCanceledException`. + #[must_use] + pub fn with_cancellation_reasons(mut self, reasons: Vec) -> Self { + self.cancellation_reasons = reasons; + self + } + /// Returns the `__type` string for the JSON error response. #[must_use] pub fn error_type(&self) -> &'static str { @@ -259,6 +269,17 @@ impl DynamoDBError { ) } + /// Transaction cancelled with cancellation reasons. + #[must_use] + pub fn transaction_cancelled(reasons: Vec) -> Self { + Self::with_message( + DynamoDBErrorCode::TransactionCanceledException, + "Transaction cancelled, please refer cancellation reasons for specific reasons [See \ + the CancellationReasons field]", + ) + .with_cancellation_reasons(reasons) + } + /// Unknown operation. #[must_use] pub fn unknown_operation(target: &str) -> Self { diff --git a/crates/ruststack-dynamodb-model/src/input.rs b/crates/ruststack-dynamodb-model/src/input.rs index 896f290..43c4c88 100644 --- a/crates/ruststack-dynamodb-model/src/input.rs +++ b/crates/ruststack-dynamodb-model/src/input.rs @@ -8,13 +8,15 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::attribute_value::AttributeValue; -use crate::types::{ - AttributeDefinition, AttributeValueUpdate, BillingMode, Condition, ConditionalOperator, - ExpectedAttributeValue, GlobalSecondaryIndex, KeySchemaElement, KeysAndAttributes, - LocalSecondaryIndex, ProvisionedThroughput, ReturnConsumedCapacity, - ReturnItemCollectionMetrics, ReturnValue, SSESpecification, Select, StreamSpecification, Tag, - WriteRequest, +use crate::{ + attribute_value::AttributeValue, + types::{ + AttributeDefinition, AttributeValueUpdate, BillingMode, Condition, ConditionalOperator, + ExpectedAttributeValue, GlobalSecondaryIndex, KeySchemaElement, KeysAndAttributes, + LocalSecondaryIndex, ProvisionedThroughput, ReturnConsumedCapacity, + ReturnItemCollectionMetrics, ReturnValue, SSESpecification, Select, StreamSpecification, + Tag, TimeToLiveSpecification, TransactGetItem, TransactWriteItem, WriteRequest, + }, }; // --------------------------------------------------------------------------- @@ -487,3 +489,106 @@ pub struct BatchWriteItemInput { #[serde(skip_serializing_if = "Option::is_none")] pub return_item_collection_metrics: Option, } + +// --------------------------------------------------------------------------- +// Tagging +// --------------------------------------------------------------------------- + +/// Input for the `TagResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TagResourceInput { + /// The ARN of the resource to tag. + pub resource_arn: String, + /// The tags to add to the resource. + pub tags: Vec, +} + +/// Input for the `UntagResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct UntagResourceInput { + /// The ARN of the resource to untag. + pub resource_arn: String, + /// The tag keys to remove. + pub tag_keys: Vec, +} + +/// Input for the `ListTagsOfResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListTagsOfResourceInput { + /// The ARN of the resource. + pub resource_arn: String, + /// An optional pagination token. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +// --------------------------------------------------------------------------- +// Time to Live +// --------------------------------------------------------------------------- + +/// Input for the `UpdateTimeToLive` operation. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct UpdateTimeToLiveInput { + /// The name of the table. + pub table_name: String, + /// The TTL specification to apply. + pub time_to_live_specification: TimeToLiveSpecification, +} + +/// Input for the `DescribeTimeToLive` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeTimeToLiveInput { + /// The name of the table. + pub table_name: String, +} + +// --------------------------------------------------------------------------- +// Transactions +// --------------------------------------------------------------------------- + +/// Input for the `TransactWriteItems` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactWriteItemsInput { + /// The list of write actions to perform atomically. + pub transact_items: Vec, + /// Determines the level of detail about provisioned throughput consumption. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_consumed_capacity: Option, + /// Determines whether item collection metrics are returned. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_item_collection_metrics: Option, + /// An idempotency token for the transaction. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_request_token: Option, +} + +/// Input for the `TransactGetItems` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactGetItemsInput { + /// The list of get actions to perform. + pub transact_items: Vec, + /// Determines the level of detail about provisioned throughput consumption. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_consumed_capacity: Option, +} + +// --------------------------------------------------------------------------- +// Describe operations +// --------------------------------------------------------------------------- + +/// Input for the `DescribeLimits` operation (empty). +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeLimitsInput {} + +/// Input for the `DescribeEndpoints` operation (empty). +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeEndpointsInput {} diff --git a/crates/ruststack-dynamodb-model/src/operations.rs b/crates/ruststack-dynamodb-model/src/operations.rs index 5c6dae5..124c7ad 100644 --- a/crates/ruststack-dynamodb-model/src/operations.rs +++ b/crates/ruststack-dynamodb-model/src/operations.rs @@ -38,6 +38,32 @@ pub enum DynamoDBOperation { BatchGetItem, /// Batch write (put/delete) items to multiple tables. BatchWriteItem, + + // Tagging + /// Add tags to a resource. + TagResource, + /// Remove tags from a resource. + UntagResource, + /// List tags for a resource. + ListTagsOfResource, + + // Time to Live + /// Describe the TTL settings for a table. + DescribeTimeToLive, + /// Update the TTL settings for a table. + UpdateTimeToLive, + + // Transactions + /// Get items atomically across tables. + TransactGetItems, + /// Write items atomically across tables. + TransactWriteItems, + + // Describe + /// Describe account limits for DynamoDB. + DescribeLimits, + /// Describe regional endpoints for DynamoDB. + DescribeEndpoints, } impl DynamoDBOperation { @@ -58,6 +84,15 @@ impl DynamoDBOperation { Self::Scan => "Scan", Self::BatchGetItem => "BatchGetItem", Self::BatchWriteItem => "BatchWriteItem", + Self::TagResource => "TagResource", + Self::UntagResource => "UntagResource", + Self::ListTagsOfResource => "ListTagsOfResource", + Self::DescribeTimeToLive => "DescribeTimeToLive", + Self::UpdateTimeToLive => "UpdateTimeToLive", + Self::TransactGetItems => "TransactGetItems", + Self::TransactWriteItems => "TransactWriteItems", + Self::DescribeLimits => "DescribeLimits", + Self::DescribeEndpoints => "DescribeEndpoints", } } @@ -78,6 +113,15 @@ impl DynamoDBOperation { "Scan" => Some(Self::Scan), "BatchGetItem" => Some(Self::BatchGetItem), "BatchWriteItem" => Some(Self::BatchWriteItem), + "TagResource" => Some(Self::TagResource), + "UntagResource" => Some(Self::UntagResource), + "ListTagsOfResource" => Some(Self::ListTagsOfResource), + "DescribeTimeToLive" => Some(Self::DescribeTimeToLive), + "UpdateTimeToLive" => Some(Self::UpdateTimeToLive), + "TransactGetItems" => Some(Self::TransactGetItems), + "TransactWriteItems" => Some(Self::TransactWriteItems), + "DescribeLimits" => Some(Self::DescribeLimits), + "DescribeEndpoints" => Some(Self::DescribeEndpoints), _ => None, } } diff --git a/crates/ruststack-dynamodb-model/src/output.rs b/crates/ruststack-dynamodb-model/src/output.rs index 1e4a752..d712317 100644 --- a/crates/ruststack-dynamodb-model/src/output.rs +++ b/crates/ruststack-dynamodb-model/src/output.rs @@ -8,9 +8,12 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::attribute_value::AttributeValue; -use crate::types::{ - ConsumedCapacity, ItemCollectionMetrics, KeysAndAttributes, TableDescription, WriteRequest, +use crate::{ + attribute_value::AttributeValue, + types::{ + ConsumedCapacity, ItemCollectionMetrics, ItemResponse, KeysAndAttributes, TableDescription, + Tag, TimeToLiveDescription, TimeToLiveSpecification, WriteRequest, + }, }; // --------------------------------------------------------------------------- @@ -236,3 +239,119 @@ pub struct BatchWriteItemOutput { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub consumed_capacity: Vec, } + +// --------------------------------------------------------------------------- +// Tagging +// --------------------------------------------------------------------------- + +/// Output for the `TagResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TagResourceOutput {} + +/// Output for the `UntagResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct UntagResourceOutput {} + +/// Output for the `ListTagsOfResource` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListTagsOfResourceOutput { + /// The tags associated with the resource. + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, + /// A pagination token for subsequent requests. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_token: Option, +} + +// --------------------------------------------------------------------------- +// Time to Live +// --------------------------------------------------------------------------- + +/// Output for the `UpdateTimeToLive` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct UpdateTimeToLiveOutput { + /// The TTL specification that was applied. + #[serde(skip_serializing_if = "Option::is_none")] + pub time_to_live_specification: Option, +} + +/// Output for the `DescribeTimeToLive` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeTimeToLiveOutput { + /// The current TTL description for the table. + #[serde(skip_serializing_if = "Option::is_none")] + pub time_to_live_description: Option, +} + +// --------------------------------------------------------------------------- +// Transactions +// --------------------------------------------------------------------------- + +/// Output for the `TransactWriteItems` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactWriteItemsOutput { + /// The capacity units consumed by the operation for each table. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub consumed_capacity: Vec, + /// Item collection metrics for the affected tables. + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub item_collection_metrics: HashMap>, +} + +/// Output for the `TransactGetItems` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactGetItemsOutput { + /// The capacity units consumed by the operation for each table. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub consumed_capacity: Vec, + /// The items retrieved, in the same order as the input. + #[serde(skip_serializing_if = "Option::is_none")] + pub responses: Option>, +} + +// --------------------------------------------------------------------------- +// Describe operations +// --------------------------------------------------------------------------- + +/// Output for the `DescribeLimits` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeLimitsOutput { + /// Maximum read capacity units per account. + #[serde(skip_serializing_if = "Option::is_none")] + pub account_max_read_capacity_units: Option, + /// Maximum write capacity units per account. + #[serde(skip_serializing_if = "Option::is_none")] + pub account_max_write_capacity_units: Option, + /// Maximum read capacity units per table. + #[serde(skip_serializing_if = "Option::is_none")] + pub table_max_read_capacity_units: Option, + /// Maximum write capacity units per table. + #[serde(skip_serializing_if = "Option::is_none")] + pub table_max_write_capacity_units: Option, +} + +/// Output for the `DescribeEndpoints` operation. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DescribeEndpointsOutput { + /// The list of endpoints. + pub endpoints: Vec, +} + +/// A DynamoDB endpoint descriptor. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct Endpoint { + /// The endpoint address. + pub address: String, + /// The cache period in minutes. + pub cache_period_in_minutes: i64, +} diff --git a/crates/ruststack-dynamodb-model/src/types.rs b/crates/ruststack-dynamodb-model/src/types.rs index 96e0646..2306af1 100644 --- a/crates/ruststack-dynamodb-model/src/types.rs +++ b/crates/ruststack-dynamodb-model/src/types.rs @@ -898,6 +898,193 @@ pub struct SSEDescription { pub inaccessible_encryption_date_time: Option, } +// --------------------------------------------------------------------------- +// Structs - Time to Live +// --------------------------------------------------------------------------- + +/// Time-to-Live specification for enabling or disabling TTL on a table. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TimeToLiveSpecification { + /// The name of the TTL attribute used to store the expiration time. + pub attribute_name: String, + /// Whether TTL is enabled (`true`) or disabled (`false`). + pub enabled: bool, +} + +/// Time-to-Live description with status information. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TimeToLiveDescription { + /// The name of the TTL attribute. + #[serde(skip_serializing_if = "Option::is_none")] + pub attribute_name: Option, + /// The TTL status: `ENABLED`, `DISABLED`, `ENABLING`, or `DISABLING`. + #[serde(skip_serializing_if = "Option::is_none")] + pub time_to_live_status: Option, +} + +// --------------------------------------------------------------------------- +// Structs - Transaction Types +// --------------------------------------------------------------------------- + +/// A single write action within a `TransactWriteItems` request. +/// +/// Exactly one of the four fields must be set. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactWriteItem { + /// A condition check against an existing item (no mutation). + #[serde(skip_serializing_if = "Option::is_none")] + pub condition_check: Option, + /// A put (insert or replace) action. + #[serde(skip_serializing_if = "Option::is_none")] + pub put: Option, + /// A delete action. + #[serde(skip_serializing_if = "Option::is_none")] + pub delete: Option, + /// An update action. + #[serde(skip_serializing_if = "Option::is_none")] + pub update: Option, +} + +/// A condition check within a transaction (no mutation). +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ConditionCheck { + /// The table containing the item. + pub table_name: String, + /// The primary key of the item to check. + pub key: HashMap, + /// The condition expression that must evaluate to true. + pub condition_expression: String, + /// Substitution tokens for attribute names. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_names: Option>, + /// Substitution tokens for attribute values. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_values: Option>, + /// Determines whether to return item attributes on condition check failure. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_values_on_condition_check_failure: Option, +} + +/// A put action within a transaction. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactPut { + /// The table to put the item into. + pub table_name: String, + /// The item to put. + pub item: HashMap, + /// An optional condition expression. + #[serde(skip_serializing_if = "Option::is_none")] + pub condition_expression: Option, + /// Substitution tokens for attribute names. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_names: Option>, + /// Substitution tokens for attribute values. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_values: Option>, + /// Determines whether to return item attributes on condition check failure. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_values_on_condition_check_failure: Option, +} + +/// A delete action within a transaction. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactDelete { + /// The table containing the item to delete. + pub table_name: String, + /// The primary key of the item to delete. + pub key: HashMap, + /// An optional condition expression. + #[serde(skip_serializing_if = "Option::is_none")] + pub condition_expression: Option, + /// Substitution tokens for attribute names. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_names: Option>, + /// Substitution tokens for attribute values. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_values: Option>, + /// Determines whether to return item attributes on condition check failure. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_values_on_condition_check_failure: Option, +} + +/// An update action within a transaction. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactUpdate { + /// The table containing the item to update. + pub table_name: String, + /// The primary key of the item to update. + pub key: HashMap, + /// The update expression defining the mutations. + pub update_expression: String, + /// An optional condition expression. + #[serde(skip_serializing_if = "Option::is_none")] + pub condition_expression: Option, + /// Substitution tokens for attribute names. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_names: Option>, + /// Substitution tokens for attribute values. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_values: Option>, + /// Determines whether to return item attributes on condition check failure. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_values_on_condition_check_failure: Option, +} + +/// A single get action within a `TransactGetItems` request. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TransactGetItem { + /// The get operation to perform. + pub get: Get, +} + +/// A get operation targeting a single item by primary key. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct Get { + /// The table containing the item. + pub table_name: String, + /// The primary key of the item to retrieve. + pub key: HashMap, + /// A projection expression to limit returned attributes. + #[serde(skip_serializing_if = "Option::is_none")] + pub projection_expression: Option, + /// Substitution tokens for attribute names. + #[serde(skip_serializing_if = "Option::is_none")] + pub expression_attribute_names: Option>, +} + +/// A reason why a transaction item was cancelled. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct CancellationReason { + /// The cancellation reason code. + #[serde(skip_serializing_if = "Option::is_none")] + pub code: Option, + /// A human-readable cancellation reason message. + #[serde(skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The item that failed the condition check, if applicable. + #[serde(skip_serializing_if = "Option::is_none")] + pub item: Option>, +} + +/// A response item from a `TransactGetItems` operation. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ItemResponse { + /// The retrieved item, or `None` if not found. + #[serde(skip_serializing_if = "Option::is_none")] + pub item: Option>, +} + // --------------------------------------------------------------------------- // Structs - Tags // --------------------------------------------------------------------------- diff --git a/crates/ruststack-dynamodbstreams-core/src/handler.rs b/crates/ruststack-dynamodbstreams-core/src/handler.rs index b23ef5e..7cf1808 100644 --- a/crates/ruststack-dynamodbstreams-core/src/handler.rs +++ b/crates/ruststack-dynamodbstreams-core/src/handler.rs @@ -1,16 +1,14 @@ //! DynamoDB Streams handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_dynamodbstreams_http::body::DynamoDBStreamsResponseBody; -use ruststack_dynamodbstreams_http::dispatch::DynamoDBStreamsHandler; -use ruststack_dynamodbstreams_http::response::json_response; -use ruststack_dynamodbstreams_model::error::DynamoDBStreamsError; -use ruststack_dynamodbstreams_model::operations::DynamoDBStreamsOperation; +use ruststack_dynamodbstreams_http::{ + body::DynamoDBStreamsResponseBody, dispatch::DynamoDBStreamsHandler, response::json_response, +}; +use ruststack_dynamodbstreams_model::{ + error::DynamoDBStreamsError, operations::DynamoDBStreamsOperation, +}; use crate::provider::RustStackDynamoDBStreams; diff --git a/crates/ruststack-dynamodbstreams-core/src/provider.rs b/crates/ruststack-dynamodbstreams-core/src/provider.rs index a4621da..820cd86 100644 --- a/crates/ruststack-dynamodbstreams-core/src/provider.rs +++ b/crates/ruststack-dynamodbstreams-core/src/provider.rs @@ -1,24 +1,23 @@ //! DynamoDB Streams provider implementing all 4 operations. -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use ruststack_dynamodb_model::AttributeValue; -use ruststack_dynamodbstreams_model::error::DynamoDBStreamsError; -use ruststack_dynamodbstreams_model::input::{ - DescribeStreamInput, GetRecordsInput, GetShardIteratorInput, ListStreamsInput, -}; -use ruststack_dynamodbstreams_model::output::{ - DescribeStreamOutput, GetRecordsOutput, GetShardIteratorOutput, ListStreamsOutput, -}; -use ruststack_dynamodbstreams_model::types::{ - self as streams_types, OperationType, Record, SequenceNumberRange, Shard, Stream, - StreamDescription, StreamRecord, StreamStatus, +use ruststack_dynamodbstreams_model::{ + error::DynamoDBStreamsError, + input::{DescribeStreamInput, GetRecordsInput, GetShardIteratorInput, ListStreamsInput}, + output::{DescribeStreamOutput, GetRecordsOutput, GetShardIteratorOutput, ListStreamsOutput}, + types::{ + self as streams_types, OperationType, Record, SequenceNumberRange, Shard, Stream, + StreamDescription, StreamRecord, StreamStatus, + }, }; -use crate::config::DynamoDBStreamsConfig; -use crate::iterator::{decode_iterator, encode_iterator}; -use crate::storage::{StreamChangeRecord, StreamStore}; +use crate::{ + config::DynamoDBStreamsConfig, + iterator::{decode_iterator, encode_iterator}, + storage::{StreamChangeRecord, StreamStore}, +}; /// Main DynamoDB Streams provider implementing all 4 operations. #[derive(Debug)] diff --git a/crates/ruststack-dynamodbstreams-core/src/storage.rs b/crates/ruststack-dynamodbstreams-core/src/storage.rs index c3c2a14..5c7b928 100644 --- a/crates/ruststack-dynamodbstreams-core/src/storage.rs +++ b/crates/ruststack-dynamodbstreams-core/src/storage.rs @@ -6,7 +6,6 @@ use std::collections::{HashMap, VecDeque}; use dashmap::DashMap; use parking_lot::RwLock; - use ruststack_dynamodb_core::stream::ChangeEvent; use ruststack_dynamodb_model::AttributeValue; use ruststack_dynamodbstreams_model::types::{ @@ -363,9 +362,10 @@ fn convert_stream_view_type( #[cfg(test)] mod tests { - use super::*; use ruststack_dynamodb_core::stream::ChangeEventName; + use super::*; + #[test] fn test_should_create_and_list_streams() { let store = StreamStore::new(); diff --git a/crates/ruststack-dynamodbstreams-http/src/body.rs b/crates/ruststack-dynamodbstreams-http/src/body.rs index c2b9504..078a888 100644 --- a/crates/ruststack-dynamodbstreams-http/src/body.rs +++ b/crates/ruststack-dynamodbstreams-http/src/body.rs @@ -1,7 +1,9 @@ //! DynamoDB Streams HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-dynamodbstreams-http/src/dispatch.rs b/crates/ruststack-dynamodbstreams-http/src/dispatch.rs index 30ff9f3..d39fdb5 100644 --- a/crates/ruststack-dynamodbstreams-http/src/dispatch.rs +++ b/crates/ruststack-dynamodbstreams-http/src/dispatch.rs @@ -1,12 +1,11 @@ //! DynamoDB Streams handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_dynamodbstreams_model::error::DynamoDBStreamsError; -use ruststack_dynamodbstreams_model::operations::DynamoDBStreamsOperation; +use ruststack_dynamodbstreams_model::{ + error::DynamoDBStreamsError, operations::DynamoDBStreamsOperation, +}; use crate::body::DynamoDBStreamsResponseBody; diff --git a/crates/ruststack-dynamodbstreams-http/src/response.rs b/crates/ruststack-dynamodbstreams-http/src/response.rs index 9bb3fd7..e72a518 100644 --- a/crates/ruststack-dynamodbstreams-http/src/response.rs +++ b/crates/ruststack-dynamodbstreams-http/src/response.rs @@ -77,9 +77,10 @@ pub fn json_response( #[cfg(test)] mod tests { - use super::*; use ruststack_dynamodbstreams_model::error::DynamoDBStreamsErrorCode; + use super::*; + #[test] fn test_should_format_error_json() { let err = DynamoDBStreamsError::with_message( diff --git a/crates/ruststack-dynamodbstreams-http/src/router.rs b/crates/ruststack-dynamodbstreams-http/src/router.rs index 6719482..cb40a49 100644 --- a/crates/ruststack-dynamodbstreams-http/src/router.rs +++ b/crates/ruststack-dynamodbstreams-http/src/router.rs @@ -7,8 +7,9 @@ //! X-Amz-Target: DynamoDBStreams_20120810.DescribeStream //! ``` -use ruststack_dynamodbstreams_model::error::DynamoDBStreamsError; -use ruststack_dynamodbstreams_model::operations::DynamoDBStreamsOperation; +use ruststack_dynamodbstreams_model::{ + error::DynamoDBStreamsError, operations::DynamoDBStreamsOperation, +}; /// The expected prefix for the `X-Amz-Target` header value. const TARGET_PREFIX: &str = "DynamoDBStreams_20120810."; diff --git a/crates/ruststack-dynamodbstreams-http/src/service.rs b/crates/ruststack-dynamodbstreams-http/src/service.rs index 7140984..52a2780 100644 --- a/crates/ruststack-dynamodbstreams-http/src/service.rs +++ b/crates/ruststack-dynamodbstreams-http/src/service.rs @@ -1,20 +1,18 @@ //! DynamoDB Streams HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_dynamodbstreams_model::error::DynamoDBStreamsError; -use crate::body::DynamoDBStreamsResponseBody; -use crate::dispatch::{DynamoDBStreamsHandler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::DynamoDBStreamsResponseBody, + dispatch::{DynamoDBStreamsHandler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the DynamoDB Streams HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-events-core/src/handler.rs b/crates/ruststack-events-core/src/handler.rs index 753569c..d9ad2af 100644 --- a/crates/ruststack-events-core/src/handler.rs +++ b/crates/ruststack-events-core/src/handler.rs @@ -1,16 +1,12 @@ //! EventBridge handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_events_http::body::EventsResponseBody; -use ruststack_events_http::dispatch::EventsHandler; -use ruststack_events_http::response::json_response; -use ruststack_events_model::error::EventsError; -use ruststack_events_model::operations::EventsOperation; +use ruststack_events_http::{ + body::EventsResponseBody, dispatch::EventsHandler, response::json_response, +}; +use ruststack_events_model::{error::EventsError, operations::EventsOperation}; use crate::provider::RustStackEvents; @@ -180,33 +176,143 @@ fn dispatch( serialize(&output, &request_id) } - // Phase 3: Stubs - return empty JSON object - EventsOperation::CreateArchive - | EventsOperation::DeleteArchive - | EventsOperation::DescribeArchive - | EventsOperation::ListArchives - | EventsOperation::UpdateArchive - | EventsOperation::StartReplay - | EventsOperation::CancelReplay - | EventsOperation::DescribeReplay - | EventsOperation::ListReplays - | EventsOperation::CreateApiDestination - | EventsOperation::DeleteApiDestination - | EventsOperation::DescribeApiDestination - | EventsOperation::ListApiDestinations - | EventsOperation::UpdateApiDestination - | EventsOperation::CreateConnection - | EventsOperation::DeleteConnection - | EventsOperation::DescribeConnection - | EventsOperation::ListConnections - | EventsOperation::UpdateConnection - | EventsOperation::DeauthorizeConnection - | EventsOperation::CreateEndpoint - | EventsOperation::DeleteEndpoint - | EventsOperation::DescribeEndpoint - | EventsOperation::ListEndpoints - | EventsOperation::UpdateEndpoint - | EventsOperation::ActivateEventSource + // Phase 3: Archives + EventsOperation::CreateArchive => { + let input = deserialize(body)?; + let output = provider.handle_create_archive(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DeleteArchive => { + let input = deserialize(body)?; + let output = provider.handle_delete_archive(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DescribeArchive => { + let input = deserialize(body)?; + let output = provider.handle_describe_archive(&input)?; + serialize(&output, &request_id) + } + EventsOperation::ListArchives => { + let input = deserialize(body)?; + let output = provider.handle_list_archives(&input)?; + serialize(&output, &request_id) + } + EventsOperation::UpdateArchive => { + let input = deserialize(body)?; + let output = provider.handle_update_archive(&input)?; + serialize(&output, &request_id) + } + + // Phase 3: Replays + EventsOperation::StartReplay => { + let input = deserialize(body)?; + let output = provider.handle_start_replay(&input)?; + serialize(&output, &request_id) + } + EventsOperation::CancelReplay => { + let input = deserialize(body)?; + let output = provider.handle_cancel_replay(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DescribeReplay => { + let input = deserialize(body)?; + let output = provider.handle_describe_replay(&input)?; + serialize(&output, &request_id) + } + EventsOperation::ListReplays => { + let input = deserialize(body)?; + let output = provider.handle_list_replays(&input)?; + serialize(&output, &request_id) + } + + // Phase 3: API Destinations + EventsOperation::CreateApiDestination => { + let input = deserialize(body)?; + let output = provider.handle_create_api_destination(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DeleteApiDestination => { + let input = deserialize(body)?; + let output = provider.handle_delete_api_destination(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DescribeApiDestination => { + let input = deserialize(body)?; + let output = provider.handle_describe_api_destination(&input)?; + serialize(&output, &request_id) + } + EventsOperation::ListApiDestinations => { + let input = deserialize(body)?; + let output = provider.handle_list_api_destinations(&input)?; + serialize(&output, &request_id) + } + EventsOperation::UpdateApiDestination => { + let input = deserialize(body)?; + let output = provider.handle_update_api_destination(&input)?; + serialize(&output, &request_id) + } + + // Phase 3: Connections + EventsOperation::CreateConnection => { + let input = deserialize(body)?; + let output = provider.handle_create_connection(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DeleteConnection => { + let input = deserialize(body)?; + let output = provider.handle_delete_connection(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DescribeConnection => { + let input = deserialize(body)?; + let output = provider.handle_describe_connection(&input)?; + serialize(&output, &request_id) + } + EventsOperation::ListConnections => { + let input = deserialize(body)?; + let output = provider.handle_list_connections(&input)?; + serialize(&output, &request_id) + } + EventsOperation::UpdateConnection => { + let input = deserialize(body)?; + let output = provider.handle_update_connection(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DeauthorizeConnection => { + let input = deserialize(body)?; + let output = provider.handle_deauthorize_connection(&input)?; + serialize(&output, &request_id) + } + + // Phase 3: Endpoints + EventsOperation::CreateEndpoint => { + let input = deserialize(body)?; + let output = provider.handle_create_endpoint(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DeleteEndpoint => { + let input = deserialize(body)?; + let output = provider.handle_delete_endpoint(&input)?; + serialize(&output, &request_id) + } + EventsOperation::DescribeEndpoint => { + let input = deserialize(body)?; + let output = provider.handle_describe_endpoint(&input)?; + serialize(&output, &request_id) + } + EventsOperation::ListEndpoints => { + let input = deserialize(body)?; + let output = provider.handle_list_endpoints(&input)?; + serialize(&output, &request_id) + } + EventsOperation::UpdateEndpoint => { + let input = deserialize(body)?; + let output = provider.handle_update_endpoint(&input)?; + serialize(&output, &request_id) + } + + // Phase 3: Partner event sources stubs - return empty JSON + EventsOperation::ActivateEventSource | EventsOperation::CreatePartnerEventSource | EventsOperation::DeactivateEventSource | EventsOperation::DeletePartnerEventSource diff --git a/crates/ruststack-events-core/src/pattern/engine.rs b/crates/ruststack-events-core/src/pattern/engine.rs index bdfd829..3c82cc4 100644 --- a/crates/ruststack-events-core/src/pattern/engine.rs +++ b/crates/ruststack-events-core/src/pattern/engine.rs @@ -7,8 +7,10 @@ use serde_json::Value; -use super::operators::match_single_value; -use super::value::{EventPattern, FieldMatcher, MatchCondition, PatternNode}; +use super::{ + operators::match_single_value, + value::{EventPattern, FieldMatcher, MatchCondition, PatternNode}, +}; /// Match an event pattern against a JSON event. /// diff --git a/crates/ruststack-events-core/src/provider.rs b/crates/ruststack-events-core/src/provider.rs index 089cb08..1363291 100644 --- a/crates/ruststack-events-core/src/provider.rs +++ b/crates/ruststack-events-core/src/provider.rs @@ -4,36 +4,31 @@ //! the design simple without an actor model. Pattern matching is synchronous; //! only delivery to targets is asynchronous. -use std::collections::HashMap; -use std::sync::Arc; - -use dashmap::DashMap; -use dashmap::mapref::entry::Entry; - -use ruststack_events_model::error::EventsError; -use ruststack_events_model::input::{ - CreateEventBusInput, DeleteEventBusInput, DeleteRuleInput, DescribeEventBusInput, - DescribeRuleInput, DisableRuleInput, EnableRuleInput, ListEventBusesInput, - ListRuleNamesByTargetInput, ListRulesInput, ListTagsForResourceInput, ListTargetsByRuleInput, - PutEventsInput, PutPermissionInput, PutRuleInput, PutTargetsInput, RemovePermissionInput, - RemoveTargetsInput, TagResourceInput, TestEventPatternInput, UntagResourceInput, - UpdateEventBusInput, -}; -use ruststack_events_model::output::{ - CreateEventBusOutput, DeleteEventBusOutput, DeleteRuleOutput, DescribeEventBusOutput, - DescribeRuleOutput, DisableRuleOutput, EnableRuleOutput, ListEventBusesOutput, - ListRuleNamesByTargetOutput, ListRulesOutput, ListTagsForResourceOutput, - ListTargetsByRuleOutput, PutEventsOutput, PutPermissionOutput, PutRuleOutput, PutTargetsOutput, - RemovePermissionOutput, RemoveTargetsOutput, TagResourceOutput, TestEventPatternOutput, - UntagResourceOutput, UpdateEventBusOutput, -}; -use ruststack_events_model::types::{ - EventBus, InputTransformer, PutEventsResultEntry, Rule, Tag, Target, +use std::{collections::HashMap, sync::Arc}; + +use dashmap::{DashMap, mapref::entry::Entry}; +use ruststack_events_model::{ + error::EventsError, + input::{ + CreateEventBusInput, DeleteEventBusInput, DeleteRuleInput, DescribeEventBusInput, + DescribeRuleInput, DisableRuleInput, EnableRuleInput, GenericInput, ListEventBusesInput, + ListRuleNamesByTargetInput, ListRulesInput, ListTagsForResourceInput, + ListTargetsByRuleInput, PutEventsInput, PutPermissionInput, PutRuleInput, PutTargetsInput, + RemovePermissionInput, RemoveTargetsInput, TagResourceInput, TestEventPatternInput, + UntagResourceInput, UpdateEventBusInput, + }, + output::{ + CreateEventBusOutput, DeleteEventBusOutput, DeleteRuleOutput, DescribeEventBusOutput, + DescribeRuleOutput, DisableRuleOutput, EnableRuleOutput, GenericOutput, + ListEventBusesOutput, ListRuleNamesByTargetOutput, ListRulesOutput, + ListTagsForResourceOutput, ListTargetsByRuleOutput, PutEventsOutput, PutPermissionOutput, + PutRuleOutput, PutTargetsOutput, RemovePermissionOutput, RemoveTargetsOutput, + TagResourceOutput, TestEventPatternOutput, UntagResourceOutput, UpdateEventBusOutput, + }, + types::{EventBus, InputTransformer, PutEventsResultEntry, Rule, Tag, Target}, }; -use crate::config::EventsConfig; -use crate::delivery::TargetDelivery; -use crate::pattern::EventPattern; +use crate::{config::EventsConfig, delivery::TargetDelivery, pattern::EventPattern}; /// Maximum number of entries per `PutEvents` call. const MAX_PUT_EVENTS_ENTRIES: usize = 10; @@ -108,6 +103,16 @@ pub struct RustStackEvents { config: EventsConfig, buses: DashMap, delivery: Arc, + /// Phase 3: Archive metadata storage (key = archive name). + archives: DashMap, + /// Phase 3: Connection metadata storage (key = connection name). + connections: DashMap, + /// Phase 3: API Destination metadata storage (key = destination name). + api_destinations: DashMap, + /// Phase 3: Endpoint metadata storage (key = endpoint name). + endpoints: DashMap, + /// Phase 3: Replay metadata storage (key = replay name). + replays: DashMap, } impl std::fmt::Debug for RustStackEvents { @@ -115,6 +120,11 @@ impl std::fmt::Debug for RustStackEvents { f.debug_struct("RustStackEvents") .field("config", &self.config) .field("bus_count", &self.buses.len()) + .field("archive_count", &self.archives.len()) + .field("connection_count", &self.connections.len()) + .field("api_destination_count", &self.api_destinations.len()) + .field("endpoint_count", &self.endpoints.len()) + .field("replay_count", &self.replays.len()) .finish_non_exhaustive() } } @@ -128,6 +138,11 @@ impl RustStackEvents { config, buses: DashMap::new(), delivery, + archives: DashMap::new(), + connections: DashMap::new(), + api_destinations: DashMap::new(), + endpoints: DashMap::new(), + replays: DashMap::new(), }; provider.create_default_bus(); provider @@ -1175,6 +1190,679 @@ impl RustStackEvents { ..Default::default() }) } + + // ----------------------------------------------------------------------- + // Phase 3: Generic resource helpers + // ----------------------------------------------------------------------- + + fn build_resource_arn(&self, resource_type: &str, name: &str) -> String { + format!( + "arn:aws:events:{}:{}:{}/{}", + self.config.default_region, self.config.account_id, resource_type, name, + ) + } + + fn now_timestamp() -> serde_json::Value { + // EventBridge returns timestamps as epoch seconds in JSON. + serde_json::Value::Number(serde_json::Number::from(chrono::Utc::now().timestamp())) + } + + /// Extract a required string field from JSON input, returning a validation error if missing. + fn require_name(input: &serde_json::Value, field: &str) -> Result { + input + .get(field) + .and_then(serde_json::Value::as_str) + .map(ToOwned::to_owned) + .ok_or_else(|| { + EventsError::validation(format!( + "1 validation error detected: Value at '{field}' failed to satisfy \ + constraint: Member must not be null" + )) + }) + } + + /// Generic create for a `DashMap` resource collection. + fn generic_create( + store: &DashMap, + name: &str, + mut record: serde_json::Value, + arn: &str, + initial_state: &str, + resource_label: &str, + ) -> Result { + let now = Self::now_timestamp(); + if let Some(obj) = record.as_object_mut() { + obj.insert("Arn".to_owned(), serde_json::Value::String(arn.to_owned())); + obj.insert( + "State".to_owned(), + serde_json::Value::String(initial_state.to_owned()), + ); + obj.insert("CreationTime".to_owned(), now.clone()); + obj.insert("LastModifiedTime".to_owned(), now); + } + + match store.entry(name.to_owned()) { + Entry::Occupied(_) => Err(EventsError::resource_already_exists(format!( + "{resource_label} {name} already exists." + ))), + Entry::Vacant(v) => { + let response = record.clone(); + v.insert(record); + Ok(response) + } + } + } + + /// Generic describe: look up a resource by name. + fn generic_describe( + store: &DashMap, + name: &str, + resource_label: &str, + ) -> Result { + store.get(name).map(|r| r.value().clone()).ok_or_else(|| { + EventsError::resource_not_found(format!("{resource_label} {name} does not exist.")) + }) + } + + /// Generic delete: remove a resource by name. + fn generic_delete( + store: &DashMap, + name: &str, + resource_label: &str, + ) -> Result { + store + .remove(name) + .map(|_| serde_json::json!({})) + .ok_or_else(|| { + EventsError::resource_not_found(format!("{resource_label} {name} does not exist.")) + }) + } + + /// Generic update: merge fields from input JSON into stored record. + fn generic_update( + store: &DashMap, + name: &str, + updates: &serde_json::Value, + resource_label: &str, + ) -> Result { + let mut entry = store.get_mut(name).ok_or_else(|| { + EventsError::resource_not_found(format!("{resource_label} {name} does not exist.")) + })?; + + let now = Self::now_timestamp(); + if let (Some(stored), Some(input_obj)) = (entry.as_object_mut(), updates.as_object()) { + for (k, v) in input_obj { + // Skip the name/key field itself, don't overwrite ARN or creation time. + if k == "Name" + || k == "ReplayName" + || k == "ConnectionName" + || k == "ArchiveName" + || k == "ApiDestinationName" + || k == "EndpointName" + || k == "Arn" + || k == "CreationTime" + { + continue; + } + stored.insert(k.clone(), v.clone()); + } + stored.insert("LastModifiedTime".to_owned(), now); + } + + Ok(entry.clone()) + } + + /// Generic list: iterate all items in a collection, optionally filter by name prefix. + fn generic_list( + store: &DashMap, + name_prefix: Option<&str>, + list_key: &str, + ) -> serde_json::Value { + let items: Vec = store + .iter() + .filter(|entry| name_prefix.is_none_or(|prefix| entry.key().starts_with(prefix))) + .map(|entry| entry.value().clone()) + .collect(); + + serde_json::json!({ list_key: items }) + } + + // ----------------------------------------------------------------------- + // Phase 3: Archives + // ----------------------------------------------------------------------- + + /// Handle `CreateArchive`. + pub fn handle_create_archive( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "ArchiveName")?; + let arn = self.build_resource_arn("archive", &name); + let result = Self::generic_create( + &self.archives, + &name, + input.value.clone(), + &arn, + "ENABLED", + "Archive", + )?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ArchiveArn": result.get("Arn"), + "ArchiveName": name, + "State": "ENABLED", + "CreationTime": result.get("CreationTime"), + }), + }) + } + + /// Handle `DeleteArchive`. + pub fn handle_delete_archive( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "ArchiveName")?; + let result = Self::generic_delete(&self.archives, &name, "Archive")?; + Ok(GenericOutput { value: result }) + } + + /// Handle `DescribeArchive`. + pub fn handle_describe_archive( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "ArchiveName")?; + let stored = Self::generic_describe(&self.archives, &name, "Archive")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ArchiveArn": stored.get("Arn"), + "ArchiveName": name, + "Description": stored.get("Description"), + "EventSourceArn": stored.get("EventSourceArn"), + "EventPattern": stored.get("EventPattern"), + "State": stored.get("State"), + "RetentionDays": stored.get("RetentionDays"), + "SizeBytes": 0, + "EventCount": 0, + "CreationTime": stored.get("CreationTime"), + }), + }) + } + + /// Handle `ListArchives`. + pub fn handle_list_archives(&self, input: &GenericInput) -> Result { + let prefix = input + .value + .get("NamePrefix") + .and_then(serde_json::Value::as_str); + let result = Self::generic_list(&self.archives, prefix, "Archives"); + Ok(GenericOutput { value: result }) + } + + /// Handle `UpdateArchive`. + pub fn handle_update_archive( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "ArchiveName")?; + let updated = Self::generic_update(&self.archives, &name, &input.value, "Archive")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ArchiveArn": updated.get("Arn"), + "ArchiveName": name, + "State": updated.get("State"), + "CreationTime": updated.get("CreationTime"), + }), + }) + } + + // ----------------------------------------------------------------------- + // Phase 3: Replays + // ----------------------------------------------------------------------- + + /// Handle `StartReplay`. + pub fn handle_start_replay(&self, input: &GenericInput) -> Result { + let name = Self::require_name(&input.value, "ReplayName")?; + let arn = self.build_resource_arn("replay", &name); + let result = Self::generic_create( + &self.replays, + &name, + input.value.clone(), + &arn, + "STARTING", + "Replay", + )?; + + // Transition immediately to RUNNING for local dev simulation. + if let Some(mut entry) = self.replays.get_mut(&name) { + if let Some(obj) = entry.as_object_mut() { + obj.insert( + "State".to_owned(), + serde_json::Value::String("RUNNING".to_owned()), + ); + } + } + + Ok(GenericOutput { + value: serde_json::json!({ + "ReplayArn": result.get("Arn"), + "ReplayName": name, + "State": "STARTING", + "StateReason": "Replay starting", + "ReplayStartTime": result.get("CreationTime"), + }), + }) + } + + /// Handle `CancelReplay`. + pub fn handle_cancel_replay(&self, input: &GenericInput) -> Result { + let name = Self::require_name(&input.value, "ReplayName")?; + + let mut entry = self.replays.get_mut(&name).ok_or_else(|| { + EventsError::resource_not_found(format!("Replay {name} does not exist.")) + })?; + + if let Some(obj) = entry.as_object_mut() { + obj.insert( + "State".to_owned(), + serde_json::Value::String("CANCELLED".to_owned()), + ); + obj.insert("LastModifiedTime".to_owned(), Self::now_timestamp()); + } + + Ok(GenericOutput { + value: serde_json::json!({ + "ReplayArn": entry.get("Arn"), + "ReplayName": name, + "State": "CANCELLING", + "StateReason": "Replay is being cancelled", + }), + }) + } + + /// Handle `DescribeReplay`. + pub fn handle_describe_replay( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "ReplayName")?; + let stored = Self::generic_describe(&self.replays, &name, "Replay")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ReplayArn": stored.get("Arn"), + "ReplayName": name, + "Description": stored.get("Description"), + "State": stored.get("State"), + "StateReason": stored.get("StateReason"), + "EventSourceArn": stored.get("EventSourceArn"), + "Destination": stored.get("Destination"), + "EventStartTime": stored.get("EventStartTime"), + "EventEndTime": stored.get("EventEndTime"), + "EventLastReplayedTime": stored.get("EventLastReplayedTime"), + "ReplayStartTime": stored.get("CreationTime"), + "ReplayEndTime": stored.get("ReplayEndTime"), + }), + }) + } + + /// Handle `ListReplays`. + pub fn handle_list_replays(&self, input: &GenericInput) -> Result { + let prefix = input + .value + .get("NamePrefix") + .and_then(serde_json::Value::as_str); + let result = Self::generic_list(&self.replays, prefix, "Replays"); + Ok(GenericOutput { value: result }) + } + + // ----------------------------------------------------------------------- + // Phase 3: API Destinations + // ----------------------------------------------------------------------- + + /// Handle `CreateApiDestination`. + pub fn handle_create_api_destination( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let arn = self.build_resource_arn("api-destination", &name); + let result = Self::generic_create( + &self.api_destinations, + &name, + input.value.clone(), + &arn, + "ACTIVE", + "ApiDestination", + )?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ApiDestinationArn": result.get("Arn"), + "ApiDestinationState": "ACTIVE", + "CreationTime": result.get("CreationTime"), + "LastModifiedTime": result.get("LastModifiedTime"), + }), + }) + } + + /// Handle `DeleteApiDestination`. + pub fn handle_delete_api_destination( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let result = Self::generic_delete(&self.api_destinations, &name, "ApiDestination")?; + Ok(GenericOutput { value: result }) + } + + /// Handle `DescribeApiDestination`. + pub fn handle_describe_api_destination( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let stored = Self::generic_describe(&self.api_destinations, &name, "ApiDestination")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ApiDestinationArn": stored.get("Arn"), + "Name": name, + "Description": stored.get("Description"), + "ApiDestinationState": stored.get("State"), + "ConnectionArn": stored.get("ConnectionArn"), + "InvocationEndpoint": stored.get("InvocationEndpoint"), + "HttpMethod": stored.get("HttpMethod"), + "InvocationRateLimitPerSecond": stored.get("InvocationRateLimitPerSecond"), + "CreationTime": stored.get("CreationTime"), + "LastModifiedTime": stored.get("LastModifiedTime"), + }), + }) + } + + /// Handle `ListApiDestinations`. + pub fn handle_list_api_destinations( + &self, + input: &GenericInput, + ) -> Result { + let prefix = input + .value + .get("NamePrefix") + .and_then(serde_json::Value::as_str); + let result = Self::generic_list(&self.api_destinations, prefix, "ApiDestinations"); + Ok(GenericOutput { value: result }) + } + + /// Handle `UpdateApiDestination`. + pub fn handle_update_api_destination( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let updated = Self::generic_update( + &self.api_destinations, + &name, + &input.value, + "ApiDestination", + )?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ApiDestinationArn": updated.get("Arn"), + "ApiDestinationState": updated.get("State"), + "CreationTime": updated.get("CreationTime"), + "LastModifiedTime": updated.get("LastModifiedTime"), + }), + }) + } + + // ----------------------------------------------------------------------- + // Phase 3: Connections + // ----------------------------------------------------------------------- + + /// Handle `CreateConnection`. + pub fn handle_create_connection( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let arn = self.build_resource_arn("connection", &name); + let result = Self::generic_create( + &self.connections, + &name, + input.value.clone(), + &arn, + "AUTHORIZED", + "Connection", + )?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ConnectionArn": result.get("Arn"), + "ConnectionState": "AUTHORIZED", + "CreationTime": result.get("CreationTime"), + "LastModifiedTime": result.get("LastModifiedTime"), + }), + }) + } + + /// Handle `DeleteConnection`. + pub fn handle_delete_connection( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let stored = self.connections.remove(&name).ok_or_else(|| { + EventsError::resource_not_found(format!("Connection {name} does not exist.")) + })?; + let (_, val) = stored; + + Ok(GenericOutput { + value: serde_json::json!({ + "ConnectionArn": val.get("Arn"), + "ConnectionState": "DELETING", + "CreationTime": val.get("CreationTime"), + "LastModifiedTime": val.get("LastModifiedTime"), + "LastAuthorizedTime": val.get("LastAuthorizedTime"), + }), + }) + } + + /// Handle `DescribeConnection`. + pub fn handle_describe_connection( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let stored = Self::generic_describe(&self.connections, &name, "Connection")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ConnectionArn": stored.get("Arn"), + "Name": name, + "Description": stored.get("Description"), + "ConnectionState": stored.get("State"), + "AuthorizationType": stored.get("AuthorizationType"), + "AuthParameters": stored.get("AuthParameters"), + "SecretArn": stored.get("SecretArn"), + "CreationTime": stored.get("CreationTime"), + "LastModifiedTime": stored.get("LastModifiedTime"), + "LastAuthorizedTime": stored.get("LastAuthorizedTime"), + }), + }) + } + + /// Handle `ListConnections`. + pub fn handle_list_connections( + &self, + input: &GenericInput, + ) -> Result { + let prefix = input + .value + .get("NamePrefix") + .and_then(serde_json::Value::as_str); + let result = Self::generic_list(&self.connections, prefix, "Connections"); + Ok(GenericOutput { value: result }) + } + + /// Handle `UpdateConnection`. + pub fn handle_update_connection( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let updated = Self::generic_update(&self.connections, &name, &input.value, "Connection")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "ConnectionArn": updated.get("Arn"), + "ConnectionState": updated.get("State"), + "CreationTime": updated.get("CreationTime"), + "LastModifiedTime": updated.get("LastModifiedTime"), + "LastAuthorizedTime": updated.get("LastAuthorizedTime"), + }), + }) + } + + /// Handle `DeauthorizeConnection`. + pub fn handle_deauthorize_connection( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + + let mut entry = self.connections.get_mut(&name).ok_or_else(|| { + EventsError::resource_not_found(format!("Connection {name} does not exist.")) + })?; + + let now = Self::now_timestamp(); + if let Some(obj) = entry.as_object_mut() { + obj.insert( + "State".to_owned(), + serde_json::Value::String("DEAUTHORIZING".to_owned()), + ); + obj.insert("LastModifiedTime".to_owned(), now); + } + + Ok(GenericOutput { + value: serde_json::json!({ + "ConnectionArn": entry.get("Arn"), + "ConnectionState": "DEAUTHORIZING", + "CreationTime": entry.get("CreationTime"), + "LastModifiedTime": entry.get("LastModifiedTime"), + "LastAuthorizedTime": entry.get("LastAuthorizedTime"), + }), + }) + } + + // ----------------------------------------------------------------------- + // Phase 3: Endpoints + // ----------------------------------------------------------------------- + + /// Handle `CreateEndpoint`. + pub fn handle_create_endpoint( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let arn = self.build_resource_arn("endpoint", &name); + let result = Self::generic_create( + &self.endpoints, + &name, + input.value.clone(), + &arn, + "ACTIVE", + "Endpoint", + )?; + + Ok(GenericOutput { + value: serde_json::json!({ + "Arn": result.get("Arn"), + "Name": name, + "State": "CREATING", + "EventBuses": result.get("EventBuses"), + "RoutingConfig": result.get("RoutingConfig"), + "ReplicationConfig": result.get("ReplicationConfig"), + "RoleArn": result.get("RoleArn"), + }), + }) + } + + /// Handle `DeleteEndpoint`. + pub fn handle_delete_endpoint( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let result = Self::generic_delete(&self.endpoints, &name, "Endpoint")?; + Ok(GenericOutput { value: result }) + } + + /// Handle `DescribeEndpoint`. + pub fn handle_describe_endpoint( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let stored = Self::generic_describe(&self.endpoints, &name, "Endpoint")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "Arn": stored.get("Arn"), + "Name": name, + "Description": stored.get("Description"), + "State": stored.get("State"), + "StateReason": stored.get("StateReason"), + "EventBuses": stored.get("EventBuses"), + "RoutingConfig": stored.get("RoutingConfig"), + "ReplicationConfig": stored.get("ReplicationConfig"), + "RoleArn": stored.get("RoleArn"), + "EndpointId": stored.get("EndpointId"), + "EndpointUrl": stored.get("EndpointUrl"), + "CreationTime": stored.get("CreationTime"), + "LastModifiedTime": stored.get("LastModifiedTime"), + }), + }) + } + + /// Handle `ListEndpoints`. + pub fn handle_list_endpoints( + &self, + input: &GenericInput, + ) -> Result { + let prefix = input + .value + .get("NamePrefix") + .and_then(serde_json::Value::as_str); + let result = Self::generic_list(&self.endpoints, prefix, "Endpoints"); + Ok(GenericOutput { value: result }) + } + + /// Handle `UpdateEndpoint`. + pub fn handle_update_endpoint( + &self, + input: &GenericInput, + ) -> Result { + let name = Self::require_name(&input.value, "Name")?; + let updated = Self::generic_update(&self.endpoints, &name, &input.value, "Endpoint")?; + + Ok(GenericOutput { + value: serde_json::json!({ + "Arn": updated.get("Arn"), + "Name": name, + "State": "UPDATING", + "EventBuses": updated.get("EventBuses"), + "RoutingConfig": updated.get("RoutingConfig"), + "ReplicationConfig": updated.get("ReplicationConfig"), + "RoleArn": updated.get("RoleArn"), + "EndpointId": updated.get("EndpointId"), + "EndpointUrl": updated.get("EndpointUrl"), + }), + }) + } } // --------------------------------------------------------------------------- diff --git a/crates/ruststack-events-http/src/body.rs b/crates/ruststack-events-http/src/body.rs index 2e61ae4..e858131 100644 --- a/crates/ruststack-events-http/src/body.rs +++ b/crates/ruststack-events-http/src/body.rs @@ -1,7 +1,9 @@ //! EventBridge HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-events-http/src/dispatch.rs b/crates/ruststack-events-http/src/dispatch.rs index 6a81e2e..bf20a55 100644 --- a/crates/ruststack-events-http/src/dispatch.rs +++ b/crates/ruststack-events-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! EventBridge handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_events_model::error::EventsError; -use ruststack_events_model::operations::EventsOperation; +use ruststack_events_model::{error::EventsError, operations::EventsOperation}; use crate::body::EventsResponseBody; diff --git a/crates/ruststack-events-http/src/response.rs b/crates/ruststack-events-http/src/response.rs index bff60bc..31db8d6 100644 --- a/crates/ruststack-events-http/src/response.rs +++ b/crates/ruststack-events-http/src/response.rs @@ -74,9 +74,10 @@ pub fn json_response(json: Vec, request_id: &str) -> http::Response bool { matches!( self, + // Phase 0 Self::CreateEventBus | Self::DeleteEventBus | Self::DescribeEventBus @@ -292,13 +293,45 @@ impl EventsOperation { | Self::ListTargetsByRule | Self::PutEvents | Self::TestEventPattern + // Phase 1 | Self::TagResource | Self::UntagResource | Self::ListTagsForResource | Self::PutPermission | Self::RemovePermission | Self::ListRuleNamesByTarget + // Phase 2 | Self::UpdateEventBus + // Phase 3: Archives + | Self::CreateArchive + | Self::DeleteArchive + | Self::DescribeArchive + | Self::ListArchives + | Self::UpdateArchive + // Phase 3: Replays + | Self::StartReplay + | Self::CancelReplay + | Self::DescribeReplay + | Self::ListReplays + // Phase 3: API Destinations + | Self::CreateApiDestination + | Self::DeleteApiDestination + | Self::DescribeApiDestination + | Self::ListApiDestinations + | Self::UpdateApiDestination + // Phase 3: Connections + | Self::CreateConnection + | Self::DeleteConnection + | Self::DescribeConnection + | Self::ListConnections + | Self::UpdateConnection + | Self::DeauthorizeConnection + // Phase 3: Endpoints + | Self::CreateEndpoint + | Self::DeleteEndpoint + | Self::DescribeEndpoint + | Self::ListEndpoints + | Self::UpdateEndpoint ) } } diff --git a/crates/ruststack-iam-core/src/handler.rs b/crates/ruststack-iam-core/src/handler.rs index d8b49c6..b361431 100644 --- a/crates/ruststack-iam-core/src/handler.rs +++ b/crates/ruststack-iam-core/src/handler.rs @@ -6,18 +6,13 @@ //! Covers all four phases: users/roles/policies, groups/instance profiles, //! policy versions/inline policies, and tagging/service-linked roles. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_iam_http::body::IamResponseBody; -use ruststack_iam_http::dispatch::IamHandler; -use ruststack_iam_http::request::parse_form_params; -use ruststack_iam_http::response::xml_response; -use ruststack_iam_model::error::IamError; -use ruststack_iam_model::operations::IamOperation; +use ruststack_iam_http::{ + body::IamResponseBody, dispatch::IamHandler, request::parse_form_params, response::xml_response, +}; +use ruststack_iam_model::{error::IamError, operations::IamOperation}; use crate::provider::RustStackIam; @@ -180,6 +175,28 @@ fn dispatch( IamOperation::GetAccountAuthorizationDetails => { provider.get_account_authorization_details(¶ms)? } + + // Phase 4: OIDC Providers + IamOperation::CreateOpenIDConnectProvider => { + provider.create_open_id_connect_provider(¶ms)? + } + IamOperation::GetOpenIDConnectProvider => provider.get_open_id_connect_provider(¶ms)?, + IamOperation::DeleteOpenIDConnectProvider => { + provider.delete_open_id_connect_provider(¶ms)? + } + IamOperation::ListOpenIDConnectProviders => { + provider.list_open_id_connect_providers(¶ms)? + } + + // Phase 4: Policy Tags + IamOperation::TagPolicy => provider.tag_policy(¶ms)?, + IamOperation::UntagPolicy => provider.untag_policy(¶ms)?, + IamOperation::ListPolicyTags => provider.list_policy_tags(¶ms)?, + + // Phase 4: Instance Profile Tags + IamOperation::TagInstanceProfile => provider.tag_instance_profile(¶ms)?, + IamOperation::UntagInstanceProfile => provider.untag_instance_profile(¶ms)?, + IamOperation::ListInstanceProfileTags => provider.list_instance_profile_tags(¶ms)?, }; Ok(xml_response(xml, &request_id)) diff --git a/crates/ruststack-iam-core/src/provider.rs b/crates/ruststack-iam-core/src/provider.rs index b4d2e7d..3cbb86d 100644 --- a/crates/ruststack-iam-core/src/provider.rs +++ b/crates/ruststack-iam-core/src/provider.rs @@ -11,30 +11,36 @@ //! - **Phase 3**: Tagging, service-linked roles, simulation stubs, authorization details #![allow(clippy::too_many_lines)] -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use dashmap::mapref::entry::Entry; use percent_encoding::{AsciiSet, NON_ALPHANUMERIC, utf8_percent_encode}; -use tracing::debug; - -use ruststack_iam_http::request::{ - get_optional_bool, get_optional_i32, get_optional_param, get_required_param, parse_string_list, - parse_tag_list, +use ruststack_iam_http::{ + request::{ + get_optional_bool, get_optional_i32, get_optional_param, get_required_param, + parse_string_list, parse_tag_list, + }, + response::XmlWriter, }; -use ruststack_iam_http::response::XmlWriter; use ruststack_iam_model::error::IamError; +use tracing::debug; -use crate::arn::iam_arn; -use crate::config::IamConfig; -use crate::id_gen::{generate_access_key_id, generate_iam_id, generate_secret_access_key}; -use crate::store::IamStore; -use crate::types::{ - AccessKeyRecord, GroupRecord, InstanceProfileRecord, ManagedPolicyRecord, PolicyVersionRecord, - RoleRecord, UserRecord, -}; -use crate::validation::{ - validate_entity_name, validate_max_session_duration, validate_path, validate_policy_document, +use crate::{ + arn::iam_arn, + config::IamConfig, + id_gen::{generate_access_key_id, generate_iam_id, generate_secret_access_key}, + store::IamStore, + types::{ + AccessKeyRecord, GroupRecord, InstanceProfileRecord, ManagedPolicyRecord, + OidcProviderRecord, PolicyVersionRecord, RoleRecord, UserRecord, + }, + validation::{ + validate_entity_name, validate_max_session_duration, validate_path, + validate_policy_document, + }, }; /// Characters that must be percent-encoded in policy document output. @@ -3491,3 +3497,343 @@ fn capitalize_service_name(name: &str) -> String { None => String::new(), } } + +// ============================================================================ +// Phase 4 operations +// ============================================================================ + +impl RustStackIam { + // ---- OIDC Providers ---- + + /// Normalize an OIDC provider URL by stripping the `https://` scheme. + fn normalize_oidc_url(url: &str) -> String { + url.strip_prefix("https://") + .or_else(|| url.strip_prefix("http://")) + .unwrap_or(url) + .trim_end_matches('/') + .to_owned() + } + + /// Create an OIDC identity provider. + pub fn create_open_id_connect_provider( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let url = get_required_param(params, "Url")?; + let client_id_list = parse_string_list(params, "ClientIDList"); + let thumbprint_list = parse_string_list(params, "ThumbprintList"); + let tags = parse_tag_list(params); + let request_id = uuid::Uuid::new_v4().to_string(); + + if url.is_empty() { + return Err(IamError::invalid_input("Url must not be empty")); + } + + if thumbprint_list.is_empty() { + return Err(IamError::invalid_input( + "ThumbprintList must contain at least one thumbprint", + )); + } + + if tags.len() > MAX_TAGS_PER_ENTITY { + return Err(IamError::limit_exceeded(format!( + "Cannot exceed {MAX_TAGS_PER_ENTITY} tags per entity" + ))); + } + + let normalized = Self::normalize_oidc_url(url); + let arn = format!( + "arn:aws:iam::{}:oidc-provider/{}", + self.config.account_id, normalized + ); + + if self.store.oidc_providers.contains_key(&arn) { + return Err(IamError::entity_already_exists(format!( + "Provider with url {url} already exists." + ))); + } + + let record = OidcProviderRecord { + arn: arn.clone(), + url: url.to_owned(), + client_id_list, + thumbprint_list, + tags, + create_date: now_iso8601(), + }; + + self.store.oidc_providers.insert(arn.clone(), record); + + debug!(url, arn, "created OIDC provider"); + + let mut w = XmlWriter::new(); + w.start_response("CreateOpenIDConnectProvider"); + w.start_result("CreateOpenIDConnectProvider"); + w.write_element("OpenIDConnectProviderArn", &arn); + w.end_element("CreateOpenIDConnectProviderResult"); + w.write_response_metadata(&request_id); + w.end_element("CreateOpenIDConnectProviderResponse"); + + Ok((w.into_string(), request_id)) + } + + /// Get an OIDC identity provider. + pub fn get_open_id_connect_provider( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let arn = get_required_param(params, "OpenIDConnectProviderArn")?; + let request_id = uuid::Uuid::new_v4().to_string(); + + let provider = self.store.oidc_providers.get(arn).ok_or_else(|| { + IamError::no_such_entity(format!("OpenIDConnect provider not found for ARN: {arn}")) + })?; + + let mut w = XmlWriter::new(); + w.start_response("GetOpenIDConnectProvider"); + w.start_result("GetOpenIDConnectProvider"); + w.write_element("Url", &provider.url); + w.start_element("ClientIDList"); + for cid in &provider.client_id_list { + w.write_element("member", cid); + } + w.end_element("ClientIDList"); + w.start_element("ThumbprintList"); + for tp in &provider.thumbprint_list { + w.write_element("member", tp); + } + w.end_element("ThumbprintList"); + w.write_element("CreateDate", &provider.create_date); + if !provider.tags.is_empty() { + write_tags_xml(&mut w, &provider.tags); + } + w.end_element("GetOpenIDConnectProviderResult"); + w.write_response_metadata(&request_id); + w.end_element("GetOpenIDConnectProviderResponse"); + + Ok((w.into_string(), request_id)) + } + + /// Delete an OIDC identity provider. + pub fn delete_open_id_connect_provider( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let arn = get_required_param(params, "OpenIDConnectProviderArn")?; + let request_id = uuid::Uuid::new_v4().to_string(); + + if self.store.oidc_providers.remove(arn).is_none() { + return Err(IamError::no_such_entity(format!( + "OpenIDConnect provider not found for ARN: {arn}" + ))); + } + + debug!(arn, "deleted OIDC provider"); + Ok(( + empty_response("DeleteOpenIDConnectProvider", &request_id), + request_id, + )) + } + + /// List all OIDC identity providers. + pub fn list_open_id_connect_providers( + &self, + _params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let request_id = uuid::Uuid::new_v4().to_string(); + + let mut w = XmlWriter::new(); + w.start_response("ListOpenIDConnectProviders"); + w.start_result("ListOpenIDConnectProviders"); + w.start_element("OpenIDConnectProviderList"); + for entry in &self.store.oidc_providers { + w.start_element("member"); + w.write_element("Arn", &entry.value().arn); + w.end_element("member"); + } + w.end_element("OpenIDConnectProviderList"); + w.end_element("ListOpenIDConnectProvidersResult"); + w.write_response_metadata(&request_id); + w.end_element("ListOpenIDConnectProvidersResponse"); + + Ok((w.into_string(), request_id)) + } + + // ---- Policy Tags ---- + + /// Tag a managed policy. + pub fn tag_policy(&self, params: &[(String, String)]) -> Result<(String, String), IamError> { + let policy_arn = get_required_param(params, "PolicyArn")?; + let new_tags = parse_tag_list(params); + let request_id = uuid::Uuid::new_v4().to_string(); + + let mut policy = self.store.policies.get_mut(policy_arn).ok_or_else(|| { + IamError::no_such_entity(format!("Policy {policy_arn} does not exist.")) + })?; + + let mut merged_tags = policy.tags.clone(); + for (key, value) in &new_tags { + if let Some(existing) = merged_tags.iter_mut().find(|(k, _)| k == key) { + existing.1.clone_from(value); + } else { + merged_tags.push((key.clone(), value.clone())); + } + } + + if merged_tags.len() > MAX_TAGS_PER_ENTITY { + return Err(IamError::limit_exceeded(format!( + "Cannot exceed {MAX_TAGS_PER_ENTITY} tags per entity" + ))); + } + + policy.tags = merged_tags; + + debug!(policy_arn, count = new_tags.len(), "tagged policy"); + Ok((empty_response("TagPolicy", &request_id), request_id)) + } + + /// Remove tags from a managed policy. + pub fn untag_policy(&self, params: &[(String, String)]) -> Result<(String, String), IamError> { + let policy_arn = get_required_param(params, "PolicyArn")?; + let tag_keys = parse_string_list(params, "TagKeys"); + let request_id = uuid::Uuid::new_v4().to_string(); + + let mut policy = self.store.policies.get_mut(policy_arn).ok_or_else(|| { + IamError::no_such_entity(format!("Policy {policy_arn} does not exist.")) + })?; + + policy.tags.retain(|(k, _)| !tag_keys.contains(k)); + + debug!(policy_arn, count = tag_keys.len(), "untagged policy"); + Ok((empty_response("UntagPolicy", &request_id), request_id)) + } + + /// List tags for a managed policy. + pub fn list_policy_tags( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let policy_arn = get_required_param(params, "PolicyArn")?; + let (marker, max_items) = parse_pagination(params); + let request_id = uuid::Uuid::new_v4().to_string(); + + let policy = self.store.policies.get(policy_arn).ok_or_else(|| { + IamError::no_such_entity(format!("Policy {policy_arn} does not exist.")) + })?; + + let (page, is_truncated, next_marker) = paginate(&policy.tags, marker, max_items); + + let mut w = XmlWriter::new(); + w.start_response("ListPolicyTags"); + w.start_result("ListPolicyTags"); + write_tags_xml(&mut w, page); + w.write_bool_element("IsTruncated", is_truncated); + if let Some(ref m) = next_marker { + w.write_element("Marker", m); + } + w.end_element("ListPolicyTagsResult"); + w.write_response_metadata(&request_id); + w.end_element("ListPolicyTagsResponse"); + + Ok((w.into_string(), request_id)) + } + + // ---- Instance Profile Tags ---- + + /// Tag an instance profile. + pub fn tag_instance_profile( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let ip_name = get_required_param(params, "InstanceProfileName")?; + let new_tags = parse_tag_list(params); + let request_id = uuid::Uuid::new_v4().to_string(); + + let mut ip = self + .store + .instance_profiles + .get_mut(ip_name) + .ok_or_else(|| { + IamError::no_such_entity(format!("Instance profile {ip_name} does not exist.")) + })?; + + let mut merged_tags = ip.tags.clone(); + for (key, value) in &new_tags { + if let Some(existing) = merged_tags.iter_mut().find(|(k, _)| k == key) { + existing.1.clone_from(value); + } else { + merged_tags.push((key.clone(), value.clone())); + } + } + + if merged_tags.len() > MAX_TAGS_PER_ENTITY { + return Err(IamError::limit_exceeded(format!( + "Cannot exceed {MAX_TAGS_PER_ENTITY} tags per entity" + ))); + } + + ip.tags = merged_tags; + + debug!(ip_name, count = new_tags.len(), "tagged instance profile"); + Ok(( + empty_response("TagInstanceProfile", &request_id), + request_id, + )) + } + + /// Remove tags from an instance profile. + pub fn untag_instance_profile( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let ip_name = get_required_param(params, "InstanceProfileName")?; + let tag_keys = parse_string_list(params, "TagKeys"); + let request_id = uuid::Uuid::new_v4().to_string(); + + let mut ip = self + .store + .instance_profiles + .get_mut(ip_name) + .ok_or_else(|| { + IamError::no_such_entity(format!("Instance profile {ip_name} does not exist.")) + })?; + + ip.tags.retain(|(k, _)| !tag_keys.contains(k)); + + debug!(ip_name, count = tag_keys.len(), "untagged instance profile"); + Ok(( + empty_response("UntagInstanceProfile", &request_id), + request_id, + )) + } + + /// List tags for an instance profile. + pub fn list_instance_profile_tags( + &self, + params: &[(String, String)], + ) -> Result<(String, String), IamError> { + let ip_name = get_required_param(params, "InstanceProfileName")?; + let (marker, max_items) = parse_pagination(params); + let request_id = uuid::Uuid::new_v4().to_string(); + + let ip = self.store.instance_profiles.get(ip_name).ok_or_else(|| { + IamError::no_such_entity(format!("Instance profile {ip_name} does not exist.")) + })?; + + let (page, is_truncated, next_marker) = paginate(&ip.tags, marker, max_items); + + let mut w = XmlWriter::new(); + w.start_response("ListInstanceProfileTags"); + w.start_result("ListInstanceProfileTags"); + write_tags_xml(&mut w, page); + w.write_bool_element("IsTruncated", is_truncated); + if let Some(ref m) = next_marker { + w.write_element("Marker", m); + } + w.end_element("ListInstanceProfileTagsResult"); + w.write_response_metadata(&request_id); + w.end_element("ListInstanceProfileTagsResponse"); + + Ok((w.into_string(), request_id)) + } +} diff --git a/crates/ruststack-iam-core/src/store.rs b/crates/ruststack-iam-core/src/store.rs index dee9b57..bb1e46d 100644 --- a/crates/ruststack-iam-core/src/store.rs +++ b/crates/ruststack-iam-core/src/store.rs @@ -5,8 +5,8 @@ use dashmap::DashMap; use crate::types::{ - AccessKeyRecord, GroupRecord, InstanceProfileRecord, ManagedPolicyRecord, RoleRecord, - UserRecord, + AccessKeyRecord, GroupRecord, InstanceProfileRecord, ManagedPolicyRecord, OidcProviderRecord, + RoleRecord, UserRecord, }; /// Concurrent in-memory store holding all IAM entity collections. @@ -24,6 +24,8 @@ pub struct IamStore { pub instance_profiles: DashMap, /// Access keys keyed by access key ID. pub access_keys: DashMap, + /// OIDC providers keyed by provider ARN. + pub oidc_providers: DashMap, } impl IamStore { @@ -37,6 +39,7 @@ impl IamStore { policies: DashMap::new(), instance_profiles: DashMap::new(), access_keys: DashMap::new(), + oidc_providers: DashMap::new(), } } } diff --git a/crates/ruststack-iam-core/src/types.rs b/crates/ruststack-iam-core/src/types.rs index ed97365..791be66 100644 --- a/crates/ruststack-iam-core/src/types.rs +++ b/crates/ruststack-iam-core/src/types.rs @@ -146,6 +146,23 @@ pub struct InstanceProfileRecord { pub roles: Vec, } +/// Internal record for an OIDC identity provider. +#[derive(Debug, Clone)] +pub struct OidcProviderRecord { + /// The Amazon Resource Name (ARN) that identifies the provider. + pub arn: String, + /// The URL of the identity provider (e.g., `https://accounts.google.com`). + pub url: String, + /// A list of client IDs (audiences) registered with the provider. + pub client_id_list: Vec, + /// A list of server certificate thumbprints for the provider. + pub thumbprint_list: Vec, + /// Tags attached to this provider as `(key, value)` pairs. + pub tags: Vec<(String, String)>, + /// ISO 8601 date-time when the provider was created. + pub create_date: String, +} + /// Internal record for an IAM access key. #[derive(Debug, Clone)] pub struct AccessKeyRecord { diff --git a/crates/ruststack-iam-core/src/validation.rs b/crates/ruststack-iam-core/src/validation.rs index e6f9922..7956da7 100644 --- a/crates/ruststack-iam-core/src/validation.rs +++ b/crates/ruststack-iam-core/src/validation.rs @@ -28,8 +28,8 @@ pub fn validate_entity_name(name: &str, max_len: usize) -> Result<(), IamError> .all(|c| c.is_ascii_alphanumeric() || "+=,.@_-".contains(c)) { return Err(IamError::invalid_input(format!( - "Entity name '{name}' contains invalid characters. \ - Only alphanumeric characters and +=,.@_- are allowed." + "Entity name '{name}' contains invalid characters. Only alphanumeric characters and \ + +=,.@_- are allowed." ))); } Ok(()) diff --git a/crates/ruststack-iam-http/src/body.rs b/crates/ruststack-iam-http/src/body.rs index e89e577..0075bd7 100644 --- a/crates/ruststack-iam-http/src/body.rs +++ b/crates/ruststack-iam-http/src/body.rs @@ -1,7 +1,9 @@ //! IAM HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-iam-http/src/dispatch.rs b/crates/ruststack-iam-http/src/dispatch.rs index b36d9c0..911c24e 100644 --- a/crates/ruststack-iam-http/src/dispatch.rs +++ b/crates/ruststack-iam-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! IAM handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_iam_model::error::IamError; -use ruststack_iam_model::operations::IamOperation; +use ruststack_iam_model::{error::IamError, operations::IamOperation}; use crate::body::IamResponseBody; diff --git a/crates/ruststack-iam-http/src/response.rs b/crates/ruststack-iam-http/src/response.rs index 3f76e50..8cc6ec9 100644 --- a/crates/ruststack-iam-http/src/response.rs +++ b/crates/ruststack-iam-http/src/response.rs @@ -59,14 +59,9 @@ pub fn xml_response(xml: String, request_id: &str) -> http::Response String { format!( - "\ - \ - {}\ - {}\ - {}\ - \ - {}\ - ", + "{}{}{}{}", error.code.fault(), error.code.code(), xml_escape(&error.message), diff --git a/crates/ruststack-iam-http/src/router.rs b/crates/ruststack-iam-http/src/router.rs index e37cd20..ee021cc 100644 --- a/crates/ruststack-iam-http/src/router.rs +++ b/crates/ruststack-iam-http/src/router.rs @@ -4,8 +4,7 @@ //! `Content-Type: application/x-www-form-urlencoded`. The operation is //! specified by the `Action=` form parameter. -use ruststack_iam_model::error::IamError; -use ruststack_iam_model::operations::IamOperation; +use ruststack_iam_model::{error::IamError, operations::IamOperation}; /// Resolve an IAM operation from parsed form parameters. /// diff --git a/crates/ruststack-iam-http/src/service.rs b/crates/ruststack-iam-http/src/service.rs index a87dffe..5142906 100644 --- a/crates/ruststack-iam-http/src/service.rs +++ b/crates/ruststack-iam-http/src/service.rs @@ -4,22 +4,20 @@ //! `application/x-www-form-urlencoded` and the response is `text/xml`. //! Like SNS, IAM uses the `Action=` form parameter for operation routing. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_iam_model::error::IamError; -use crate::body::IamResponseBody; -use crate::dispatch::{IamHandler, dispatch_operation}; -use crate::request::parse_form_params; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::IamResponseBody, + dispatch::{IamHandler, dispatch_operation}, + request::parse_form_params, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the IAM HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-iam-model/src/operations.rs b/crates/ruststack-iam-model/src/operations.rs index 4be519f..d2d5e22 100644 --- a/crates/ruststack-iam-model/src/operations.rs +++ b/crates/ruststack-iam-model/src/operations.rs @@ -151,6 +151,26 @@ pub enum IamOperation { ListEntitiesForPolicy, /// The GetAccountAuthorizationDetails operation. GetAccountAuthorizationDetails, + /// The CreateOpenIDConnectProvider operation. + CreateOpenIDConnectProvider, + /// The GetOpenIDConnectProvider operation. + GetOpenIDConnectProvider, + /// The DeleteOpenIDConnectProvider operation. + DeleteOpenIDConnectProvider, + /// The ListOpenIDConnectProviders operation. + ListOpenIDConnectProviders, + /// The TagPolicy operation. + TagPolicy, + /// The UntagPolicy operation. + UntagPolicy, + /// The ListPolicyTags operation. + ListPolicyTags, + /// The TagInstanceProfile operation. + TagInstanceProfile, + /// The UntagInstanceProfile operation. + UntagInstanceProfile, + /// The ListInstanceProfileTags operation. + ListInstanceProfileTags, } impl IamOperation { @@ -232,6 +252,16 @@ impl IamOperation { Self::SimulateCustomPolicy => "SimulateCustomPolicy", Self::ListEntitiesForPolicy => "ListEntitiesForPolicy", Self::GetAccountAuthorizationDetails => "GetAccountAuthorizationDetails", + Self::CreateOpenIDConnectProvider => "CreateOpenIDConnectProvider", + Self::GetOpenIDConnectProvider => "GetOpenIDConnectProvider", + Self::DeleteOpenIDConnectProvider => "DeleteOpenIDConnectProvider", + Self::ListOpenIDConnectProviders => "ListOpenIDConnectProviders", + Self::TagPolicy => "TagPolicy", + Self::UntagPolicy => "UntagPolicy", + Self::ListPolicyTags => "ListPolicyTags", + Self::TagInstanceProfile => "TagInstanceProfile", + Self::UntagInstanceProfile => "UntagInstanceProfile", + Self::ListInstanceProfileTags => "ListInstanceProfileTags", } } @@ -313,6 +343,16 @@ impl IamOperation { "SimulateCustomPolicy" => Some(Self::SimulateCustomPolicy), "ListEntitiesForPolicy" => Some(Self::ListEntitiesForPolicy), "GetAccountAuthorizationDetails" => Some(Self::GetAccountAuthorizationDetails), + "CreateOpenIDConnectProvider" => Some(Self::CreateOpenIDConnectProvider), + "GetOpenIDConnectProvider" => Some(Self::GetOpenIDConnectProvider), + "DeleteOpenIDConnectProvider" => Some(Self::DeleteOpenIDConnectProvider), + "ListOpenIDConnectProviders" => Some(Self::ListOpenIDConnectProviders), + "TagPolicy" => Some(Self::TagPolicy), + "UntagPolicy" => Some(Self::UntagPolicy), + "ListPolicyTags" => Some(Self::ListPolicyTags), + "TagInstanceProfile" => Some(Self::TagInstanceProfile), + "UntagInstanceProfile" => Some(Self::UntagInstanceProfile), + "ListInstanceProfileTags" => Some(Self::ListInstanceProfileTags), _ => None, } } diff --git a/crates/ruststack-kinesis-core/src/handler.rs b/crates/ruststack-kinesis-core/src/handler.rs index 386f10b..4f7c064 100644 --- a/crates/ruststack-kinesis-core/src/handler.rs +++ b/crates/ruststack-kinesis-core/src/handler.rs @@ -1,16 +1,15 @@ //! Kinesis handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_kinesis_http::body::KinesisResponseBody; -use ruststack_kinesis_http::dispatch::KinesisHandler; -use ruststack_kinesis_http::response::json_response; -use ruststack_kinesis_model::error::{KinesisError, KinesisErrorCode}; -use ruststack_kinesis_model::operations::KinesisOperation; +use ruststack_kinesis_http::{ + body::KinesisResponseBody, dispatch::KinesisHandler, response::json_response, +}; +use ruststack_kinesis_model::{ + error::{KinesisError, KinesisErrorCode}, + operations::KinesisOperation, +}; use crate::provider::RustStackKinesis; diff --git a/crates/ruststack-kinesis-core/src/provider.rs b/crates/ruststack-kinesis-core/src/provider.rs index a009656..bd62fba 100644 --- a/crates/ruststack-kinesis-core/src/provider.rs +++ b/crates/ruststack-kinesis-core/src/provider.rs @@ -1,43 +1,47 @@ //! Kinesis service provider implementing all operations. -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashMap, sync::Arc, time::Duration}; use chrono::{DateTime, Utc}; use dashmap::DashMap; +use ruststack_kinesis_model::{ + error::{KinesisError, KinesisErrorCode}, + input::{ + AddTagsToStreamInput, CreateStreamInput, DecreaseStreamRetentionPeriodInput, + DeleteResourcePolicyInput, DeleteStreamInput, DeregisterStreamConsumerInput, + DescribeLimitsInput, DescribeStreamConsumerInput, DescribeStreamInput, + DescribeStreamSummaryInput, GetRecordsInput, GetResourcePolicyInput, GetShardIteratorInput, + IncreaseStreamRetentionPeriodInput, ListShardsInput, ListStreamConsumersInput, + ListStreamsInput, ListTagsForStreamInput, MergeShardsInput, PutRecordInput, + PutRecordsInput, PutResourcePolicyInput, RegisterStreamConsumerInput, + RemoveTagsFromStreamInput, SplitShardInput, StartStreamEncryptionInput, + StopStreamEncryptionInput, SubscribeToShardInput, UpdateShardCountInput, + }, + output::{ + DescribeLimitsOutput, DescribeStreamConsumerOutput, DescribeStreamOutput, + DescribeStreamSummaryOutput, GetRecordsOutput, GetResourcePolicyOutput, + GetShardIteratorOutput, ListShardsOutput, ListStreamConsumersOutput, ListStreamsOutput, + ListTagsForStreamOutput, PutRecordOutput, PutRecordsOutput, RegisterStreamConsumerOutput, + UpdateShardCountOutput, + }, + types::{ + Consumer, ConsumerDescription, ConsumerStatus, EncryptionType, EnhancedMetrics, + PutRecordsResultEntry, Record, SequenceNumberRange, Shard, ShardFilterType, + ShardIteratorType, StreamDescription, StreamDescriptionSummary, StreamMode, + StreamModeDetails, StreamStatus, StreamSummary, Tag, + }, +}; use tokio::sync::{mpsc, oneshot}; use uuid::Uuid; -use ruststack_kinesis_model::error::{KinesisError, KinesisErrorCode}; -use ruststack_kinesis_model::input::{ - AddTagsToStreamInput, CreateStreamInput, DecreaseStreamRetentionPeriodInput, - DeleteResourcePolicyInput, DeleteStreamInput, DeregisterStreamConsumerInput, - DescribeLimitsInput, DescribeStreamConsumerInput, DescribeStreamInput, - DescribeStreamSummaryInput, GetRecordsInput, GetResourcePolicyInput, GetShardIteratorInput, - IncreaseStreamRetentionPeriodInput, ListShardsInput, ListStreamConsumersInput, - ListStreamsInput, ListTagsForStreamInput, MergeShardsInput, PutRecordInput, PutRecordsInput, - PutResourcePolicyInput, RegisterStreamConsumerInput, RemoveTagsFromStreamInput, - SplitShardInput, StartStreamEncryptionInput, StopStreamEncryptionInput, SubscribeToShardInput, - UpdateShardCountInput, -}; -use ruststack_kinesis_model::output::{ - DescribeLimitsOutput, DescribeStreamConsumerOutput, DescribeStreamOutput, - DescribeStreamSummaryOutput, GetRecordsOutput, GetResourcePolicyOutput, GetShardIteratorOutput, - ListShardsOutput, ListStreamConsumersOutput, ListStreamsOutput, ListTagsForStreamOutput, - PutRecordOutput, PutRecordsOutput, RegisterStreamConsumerOutput, UpdateShardCountOutput, +use crate::{ + config::KinesisConfig, + shard::{ + actor::{IteratorRequest, ShardCommand, ShardHandle, ShardInfo}, + hash::{HashKey, HashKeyRange}, + iterator::ShardIteratorToken, + }, }; -use ruststack_kinesis_model::types::{ - Consumer, ConsumerDescription, ConsumerStatus, EncryptionType, EnhancedMetrics, - PutRecordsResultEntry, Record, SequenceNumberRange, Shard, ShardFilterType, ShardIteratorType, - StreamDescription, StreamDescriptionSummary, StreamMode, StreamModeDetails, StreamStatus, - StreamSummary, Tag, -}; - -use crate::config::KinesisConfig; -use crate::shard::actor::{IteratorRequest, ShardCommand, ShardHandle, ShardInfo}; -use crate::shard::hash::{HashKey, HashKeyRange}; -use crate::shard::iterator::ShardIteratorToken; /// Internal consumer metadata. #[derive(Debug, Clone)] diff --git a/crates/ruststack-kinesis-core/src/shard/actor.rs b/crates/ruststack-kinesis-core/src/shard/actor.rs index 5a85433..b04aa31 100644 --- a/crates/ruststack-kinesis-core/src/shard/actor.rs +++ b/crates/ruststack-kinesis-core/src/shard/actor.rs @@ -1,17 +1,17 @@ //! Shard actor implementing the actor-per-shard pattern. -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; use bytes::Bytes; use chrono::{DateTime, Utc}; -use tokio::sync::{mpsc, oneshot}; - use ruststack_kinesis_model::types::HashKeyRange as ModelHashKeyRange; +use tokio::sync::{mpsc, oneshot}; -use super::hash::HashKeyRange; -use super::sequence::{SequenceNumber, SequenceNumberGenerator}; -use super::storage::{ShardRecordLog, StoredRecord}; +use super::{ + hash::HashKeyRange, + sequence::{SequenceNumber, SequenceNumberGenerator}, + storage::{ShardRecordLog, StoredRecord}, +}; /// Commands sent to a shard actor. #[derive(Debug)] diff --git a/crates/ruststack-kinesis-core/src/shard/iterator.rs b/crates/ruststack-kinesis-core/src/shard/iterator.rs index fd9c274..0147119 100644 --- a/crates/ruststack-kinesis-core/src/shard/iterator.rs +++ b/crates/ruststack-kinesis-core/src/shard/iterator.rs @@ -1,7 +1,6 @@ //! Shard iterator token encoding and decoding. -use base64::Engine; -use base64::engine::general_purpose::STANDARD; +use base64::{Engine, engine::general_purpose::STANDARD}; /// A shard iterator token encoding the stream name, shard ID, position, and nonce. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/crates/ruststack-kinesis-core/src/shard/sequence.rs b/crates/ruststack-kinesis-core/src/shard/sequence.rs index 4519d08..9609a97 100644 --- a/crates/ruststack-kinesis-core/src/shard/sequence.rs +++ b/crates/ruststack-kinesis-core/src/shard/sequence.rs @@ -1,7 +1,9 @@ //! Sequence number generation for Kinesis records. -use std::str::FromStr; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::{ + str::FromStr, + sync::atomic::{AtomicU64, Ordering}, +}; /// A 128-bit sequence number for a Kinesis record. /// diff --git a/crates/ruststack-kinesis-http/src/body.rs b/crates/ruststack-kinesis-http/src/body.rs index 52565ab..8d946b7 100644 --- a/crates/ruststack-kinesis-http/src/body.rs +++ b/crates/ruststack-kinesis-http/src/body.rs @@ -1,7 +1,9 @@ //! Kinesis HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-kinesis-http/src/dispatch.rs b/crates/ruststack-kinesis-http/src/dispatch.rs index c27cfa9..aa161bc 100644 --- a/crates/ruststack-kinesis-http/src/dispatch.rs +++ b/crates/ruststack-kinesis-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! Kinesis handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_kinesis_model::error::KinesisError; -use ruststack_kinesis_model::operations::KinesisOperation; +use ruststack_kinesis_model::{error::KinesisError, operations::KinesisOperation}; use crate::body::KinesisResponseBody; diff --git a/crates/ruststack-kinesis-http/src/router.rs b/crates/ruststack-kinesis-http/src/router.rs index 0fc1d57..0c9b17e 100644 --- a/crates/ruststack-kinesis-http/src/router.rs +++ b/crates/ruststack-kinesis-http/src/router.rs @@ -7,8 +7,7 @@ //! X-Amz-Target: Kinesis_20131202.PutRecord //! ``` -use ruststack_kinesis_model::error::KinesisError; -use ruststack_kinesis_model::operations::KinesisOperation; +use ruststack_kinesis_model::{error::KinesisError, operations::KinesisOperation}; /// The expected prefix for the `X-Amz-Target` header value. const TARGET_PREFIX: &str = "Kinesis_20131202."; diff --git a/crates/ruststack-kinesis-http/src/service.rs b/crates/ruststack-kinesis-http/src/service.rs index 9e5309e..6c56aee 100644 --- a/crates/ruststack-kinesis-http/src/service.rs +++ b/crates/ruststack-kinesis-http/src/service.rs @@ -1,20 +1,18 @@ //! Kinesis HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_kinesis_model::error::KinesisError; -use crate::body::KinesisResponseBody; -use crate::dispatch::{KinesisHandler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::KinesisResponseBody, + dispatch::{KinesisHandler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the Kinesis HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-kinesis-model/src/blob.rs b/crates/ruststack-kinesis-model/src/blob.rs index 19c4e2d..96155e1 100644 --- a/crates/ruststack-kinesis-model/src/blob.rs +++ b/crates/ruststack-kinesis-model/src/blob.rs @@ -4,8 +4,7 @@ //! in JSON. This module provides custom serde serializers/deserializers for //! `bytes::Bytes` that handle the base64 encoding. -use base64::Engine; -use base64::engine::general_purpose::STANDARD; +use base64::{Engine, engine::general_purpose::STANDARD}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Serialize `bytes::Bytes` as a base64 string. diff --git a/crates/ruststack-kinesis-model/src/epoch_seconds.rs b/crates/ruststack-kinesis-model/src/epoch_seconds.rs index aa2b756..7b29829 100644 --- a/crates/ruststack-kinesis-model/src/epoch_seconds.rs +++ b/crates/ruststack-kinesis-model/src/epoch_seconds.rs @@ -56,9 +56,10 @@ pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D:: /// Serde helpers for `Option>` as epoch seconds. pub mod option { - use super::{DateTime, Deserializer, Serializer, TimeZone, Utc, epoch_to_datetime}; use serde::de; + use super::{DateTime, Deserializer, Serializer, TimeZone, Utc, epoch_to_datetime}; + /// Serialize `Option>` as epoch seconds (f64) or null. pub fn serialize(dt: &Option>, s: S) -> Result { match dt { diff --git a/crates/ruststack-kms-core/src/crypto.rs b/crates/ruststack-kms-core/src/crypto.rs index 1e6f714..97ab993 100644 --- a/crates/ruststack-kms-core/src/crypto.rs +++ b/crates/ruststack-kms-core/src/crypto.rs @@ -5,19 +5,21 @@ use std::collections::HashMap; -use aws_lc_rs::aead::{self, Aad, BoundKey, NONCE_LEN, Nonce, NonceSequence, SealingKey}; -use aws_lc_rs::encoding::AsDer; -use aws_lc_rs::rand::{SecureRandom, SystemRandom}; -use aws_lc_rs::signature::{self, EcdsaKeyPair, KeyPair as _, RsaKeyPair}; - -use ruststack_kms_model::error::{KmsError, KmsErrorCode}; -use ruststack_kms_model::types::{ - DataKeyPairSpec, DataKeySpec, EncryptionAlgorithmSpec, KeySpec, MacAlgorithmSpec, - SigningAlgorithmSpec, +use aws_lc_rs::{ + aead::{self, Aad, BoundKey, NONCE_LEN, Nonce, NonceSequence, SealingKey}, + encoding::AsDer, + rand::{SecureRandom, SystemRandom}, + signature::{self, EcdsaKeyPair, KeyPair as _, RsaKeyPair}, +}; +use ruststack_kms_model::{ + error::{KmsError, KmsErrorCode}, + types::{ + DataKeyPairSpec, DataKeySpec, EncryptionAlgorithmSpec, KeySpec, MacAlgorithmSpec, + SigningAlgorithmSpec, + }, }; -use crate::ciphertext; -use crate::key::KeyMaterial; +use crate::{ciphertext, key::KeyMaterial}; /// Thread-safe random number generator. fn rng() -> &'static SystemRandom { diff --git a/crates/ruststack-kms-core/src/handler.rs b/crates/ruststack-kms-core/src/handler.rs index 5b93683..dc39340 100644 --- a/crates/ruststack-kms-core/src/handler.rs +++ b/crates/ruststack-kms-core/src/handler.rs @@ -1,16 +1,10 @@ //! KMS handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_kms_http::body::KmsResponseBody; -use ruststack_kms_http::dispatch::KmsHandler; -use ruststack_kms_http::response::json_response; -use ruststack_kms_model::error::KmsError; -use ruststack_kms_model::operations::KmsOperation; +use ruststack_kms_http::{body::KmsResponseBody, dispatch::KmsHandler, response::json_response}; +use ruststack_kms_model::{error::KmsError, operations::KmsOperation}; use crate::provider::RustStackKms; diff --git a/crates/ruststack-kms-core/src/key.rs b/crates/ruststack-kms-core/src/key.rs index 9d90312..751cd93 100644 --- a/crates/ruststack-kms-core/src/key.rs +++ b/crates/ruststack-kms-core/src/key.rs @@ -3,7 +3,6 @@ use std::collections::HashMap; use chrono::{DateTime, Utc}; - use ruststack_kms_model::types::{ EncryptionAlgorithmSpec, KeySpec, KeyState, KeyUsageType, MacAlgorithmSpec, OriginType, SigningAlgorithmSpec, diff --git a/crates/ruststack-kms-core/src/provider.rs b/crates/ruststack-kms-core/src/provider.rs index dc736c6..0567ce0 100644 --- a/crates/ruststack-kms-core/src/provider.rs +++ b/crates/ruststack-kms-core/src/provider.rs @@ -1,44 +1,48 @@ //! KMS provider implementing all 39 operations. -use std::collections::HashMap; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use chrono::Utc; - -use ruststack_kms_model::error::{KmsError, KmsErrorCode}; -use ruststack_kms_model::input::{ - CancelKeyDeletionInput, CreateAliasInput, CreateGrantInput, CreateKeyInput, DecryptInput, - DeleteAliasInput, DescribeKeyInput, DisableKeyInput, DisableKeyRotationInput, EnableKeyInput, - EnableKeyRotationInput, EncryptInput, GenerateDataKeyInput, GenerateDataKeyPairInput, - GenerateDataKeyPairWithoutPlaintextInput, GenerateDataKeyWithoutPlaintextInput, - GenerateMacInput, GenerateRandomInput, GetKeyPolicyInput, GetKeyRotationStatusInput, - GetPublicKeyInput, ListAliasesInput, ListGrantsInput, ListKeyPoliciesInput, ListKeysInput, - ListResourceTagsInput, ListRetirableGrantsInput, PutKeyPolicyInput, ReEncryptInput, - RetireGrantInput, RevokeGrantInput, ScheduleKeyDeletionInput, SignInput, TagResourceInput, - UntagResourceInput, UpdateAliasInput, UpdateKeyDescriptionInput, VerifyInput, VerifyMacInput, -}; -use ruststack_kms_model::output::{ - CancelKeyDeletionResponse, CreateGrantResponse, CreateKeyResponse, DecryptResponse, - DescribeKeyResponse, EncryptResponse, GenerateDataKeyPairResponse, - GenerateDataKeyPairWithoutPlaintextResponse, GenerateDataKeyResponse, - GenerateDataKeyWithoutPlaintextResponse, GenerateMacResponse, GenerateRandomResponse, - GetKeyPolicyResponse, GetKeyRotationStatusResponse, GetPublicKeyResponse, ListAliasesResponse, - ListGrantsResponse, ListKeyPoliciesResponse, ListKeysResponse, ListResourceTagsResponse, - ReEncryptResponse, ScheduleKeyDeletionResponse, SignResponse, VerifyMacResponse, - VerifyResponse, -}; -use ruststack_kms_model::types::{ - AliasListEntry, DataKeySpec, EncryptionAlgorithmSpec, GrantListEntry, KeyListEntry, - KeyManagerType, KeyMetadata, KeySpec, KeyState, KeyUsageType, OriginType, Tag, +use ruststack_kms_model::{ + error::{KmsError, KmsErrorCode}, + input::{ + CancelKeyDeletionInput, CreateAliasInput, CreateGrantInput, CreateKeyInput, DecryptInput, + DeleteAliasInput, DescribeKeyInput, DisableKeyInput, DisableKeyRotationInput, + EnableKeyInput, EnableKeyRotationInput, EncryptInput, GenerateDataKeyInput, + GenerateDataKeyPairInput, GenerateDataKeyPairWithoutPlaintextInput, + GenerateDataKeyWithoutPlaintextInput, GenerateMacInput, GenerateRandomInput, + GetKeyPolicyInput, GetKeyRotationStatusInput, GetPublicKeyInput, ListAliasesInput, + ListGrantsInput, ListKeyPoliciesInput, ListKeysInput, ListResourceTagsInput, + ListRetirableGrantsInput, PutKeyPolicyInput, ReEncryptInput, RetireGrantInput, + RevokeGrantInput, ScheduleKeyDeletionInput, SignInput, TagResourceInput, + UntagResourceInput, UpdateAliasInput, UpdateKeyDescriptionInput, VerifyInput, + VerifyMacInput, + }, + output::{ + CancelKeyDeletionResponse, CreateGrantResponse, CreateKeyResponse, DecryptResponse, + DescribeKeyResponse, EncryptResponse, GenerateDataKeyPairResponse, + GenerateDataKeyPairWithoutPlaintextResponse, GenerateDataKeyResponse, + GenerateDataKeyWithoutPlaintextResponse, GenerateMacResponse, GenerateRandomResponse, + GetKeyPolicyResponse, GetKeyRotationStatusResponse, GetPublicKeyResponse, + ListAliasesResponse, ListGrantsResponse, ListKeyPoliciesResponse, ListKeysResponse, + ListResourceTagsResponse, ReEncryptResponse, ScheduleKeyDeletionResponse, SignResponse, + VerifyMacResponse, VerifyResponse, + }, + types::{ + AliasListEntry, DataKeySpec, EncryptionAlgorithmSpec, GrantListEntry, KeyListEntry, + KeyManagerType, KeyMetadata, KeySpec, KeyState, KeyUsageType, OriginType, Tag, + }, }; -use crate::ciphertext; -use crate::config::KmsConfig; -use crate::crypto; -use crate::key::{KeyMaterial, KmsKey}; -use crate::resolve::resolve_key_id; -use crate::state::{AliasEntry, GrantEntry, KmsStore}; -use crate::validation; +use crate::{ + ciphertext, + config::KmsConfig, + crypto, + key::{KeyMaterial, KmsKey}, + resolve::resolve_key_id, + state::{AliasEntry, GrantEntry, KmsStore}, + validation, +}; /// The KMS business logic provider. #[derive(Debug)] diff --git a/crates/ruststack-kms-core/src/state.rs b/crates/ruststack-kms-core/src/state.rs index 75b4a19..20d0074 100644 --- a/crates/ruststack-kms-core/src/state.rs +++ b/crates/ruststack-kms-core/src/state.rs @@ -3,7 +3,6 @@ use std::collections::HashMap; use dashmap::DashMap; - use ruststack_kms_model::types::{GrantConstraints, GrantOperation}; use crate::key::KmsKey; diff --git a/crates/ruststack-kms-core/src/validation.rs b/crates/ruststack-kms-core/src/validation.rs index 2f3b8b0..0574eed 100644 --- a/crates/ruststack-kms-core/src/validation.rs +++ b/crates/ruststack-kms-core/src/validation.rs @@ -1,9 +1,11 @@ //! Input validation for KMS operations. -use ruststack_kms_model::error::{KmsError, KmsErrorCode}; -use ruststack_kms_model::types::{ - EncryptionAlgorithmSpec, KeySpec, KeyState, KeyUsageType, MacAlgorithmSpec, - SigningAlgorithmSpec, +use ruststack_kms_model::{ + error::{KmsError, KmsErrorCode}, + types::{ + EncryptionAlgorithmSpec, KeySpec, KeyState, KeyUsageType, MacAlgorithmSpec, + SigningAlgorithmSpec, + }, }; use crate::key::KmsKey; diff --git a/crates/ruststack-kms-http/src/body.rs b/crates/ruststack-kms-http/src/body.rs index 7dc36e2..a3aef48 100644 --- a/crates/ruststack-kms-http/src/body.rs +++ b/crates/ruststack-kms-http/src/body.rs @@ -1,7 +1,9 @@ //! KMS HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-kms-http/src/dispatch.rs b/crates/ruststack-kms-http/src/dispatch.rs index 99b2117..1b433cb 100644 --- a/crates/ruststack-kms-http/src/dispatch.rs +++ b/crates/ruststack-kms-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! KMS handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_kms_model::error::KmsError; -use ruststack_kms_model::operations::KmsOperation; +use ruststack_kms_model::{error::KmsError, operations::KmsOperation}; use crate::body::KmsResponseBody; diff --git a/crates/ruststack-kms-http/src/router.rs b/crates/ruststack-kms-http/src/router.rs index 0eb5685..657fd68 100644 --- a/crates/ruststack-kms-http/src/router.rs +++ b/crates/ruststack-kms-http/src/router.rs @@ -7,8 +7,7 @@ //! X-Amz-Target: TrentService.CreateKey //! ``` -use ruststack_kms_model::error::KmsError; -use ruststack_kms_model::operations::KmsOperation; +use ruststack_kms_model::{error::KmsError, operations::KmsOperation}; /// The expected prefix for the `X-Amz-Target` header value. const TARGET_PREFIX: &str = "TrentService."; diff --git a/crates/ruststack-kms-http/src/service.rs b/crates/ruststack-kms-http/src/service.rs index 38ad31e..40b3537 100644 --- a/crates/ruststack-kms-http/src/service.rs +++ b/crates/ruststack-kms-http/src/service.rs @@ -1,20 +1,18 @@ //! KMS HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_kms_model::error::KmsError; -use crate::body::KmsResponseBody; -use crate::dispatch::{KmsHandler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::KmsResponseBody, + dispatch::{KmsHandler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the KMS HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-kms-model/src/blob.rs b/crates/ruststack-kms-model/src/blob.rs index 88a9d51..d206cda 100644 --- a/crates/ruststack-kms-model/src/blob.rs +++ b/crates/ruststack-kms-model/src/blob.rs @@ -4,8 +4,7 @@ //! as base64-encoded strings in JSON. This module provides custom serde //! serializers/deserializers for `bytes::Bytes` that handle the base64 encoding. -use base64::Engine; -use base64::engine::general_purpose::STANDARD; +use base64::{Engine, engine::general_purpose::STANDARD}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Serialize `bytes::Bytes` as a base64 string. diff --git a/crates/ruststack-lambda-core/src/error.rs b/crates/ruststack-lambda-core/src/error.rs index 2084669..dbc2cfa 100644 --- a/crates/ruststack-lambda-core/src/error.rs +++ b/crates/ruststack-lambda-core/src/error.rs @@ -72,6 +72,22 @@ pub enum LambdaServiceError { #[error("Docker not available")] DockerNotAvailable, + /// Event source mapping does not exist. + #[error("Event source mapping not found: {uuid}")] + EventSourceMappingNotFound { + /// UUID that was not found. + uuid: String, + }, + + /// Event invoke config does not exist. + #[error("Event invoke config not found: {function_name}:{qualifier}")] + EventInvokeConfigNotFound { + /// Function name. + function_name: String, + /// Qualifier. + qualifier: String, + }, + /// Policy statement not found. #[error("Policy not found: {sid}")] PolicyNotFound { @@ -106,13 +122,15 @@ impl From for LambdaError { ref function_name, ref version, } => LambdaError::resource_not_found(format!( - "Function not found: arn:aws:lambda:us-east-1:000000000000:function:{function_name}:{version}" + "Function not found: \ + arn:aws:lambda:us-east-1:000000000000:function:{function_name}:{version}" )), LambdaServiceError::AliasNotFound { ref function_name, ref alias, } => LambdaError::resource_not_found(format!( - "Function not found: arn:aws:lambda:us-east-1:000000000000:function:{function_name}:{alias}" + "Function not found: \ + arn:aws:lambda:us-east-1:000000000000:function:{function_name}:{alias}" )), LambdaServiceError::ResourceConflict { ref message } => { LambdaError::resource_conflict(message) @@ -135,9 +153,23 @@ impl From for LambdaError { LambdaServiceError::RequestTooLarge { ref message } => { LambdaError::new(LambdaErrorCode::RequestTooLargeException, message.clone()) } + LambdaServiceError::EventSourceMappingNotFound { ref uuid } => { + LambdaError::resource_not_found(format!( + "The resource you requested does not exist. (Service: Lambda, Status Code: \ + 404, Request ID: 00000000-0000-0000-0000-000000000000, Extended Request ID: \ + null) UUID: {uuid}" + )) + } LambdaServiceError::PolicyNotFound { ref sid } => { LambdaError::resource_not_found(format!("No policy is found for: {sid}")) } + LambdaServiceError::EventInvokeConfigNotFound { + ref function_name, + ref qualifier, + } => LambdaError::resource_not_found(format!( + "The function {function_name} doesn't have an EventInvokeConfig for qualifier \ + {qualifier}" + )), LambdaServiceError::Internal { ref message } => { LambdaError::service_error(message.clone()) } diff --git a/crates/ruststack-lambda-core/src/handler.rs b/crates/ruststack-lambda-core/src/handler.rs index 8154cdf..846402d 100644 --- a/crates/ruststack-lambda-core/src/handler.rs +++ b/crates/ruststack-lambda-core/src/handler.rs @@ -6,24 +6,29 @@ //! Uses manual `Pin>` return types because the `LambdaHandler` //! trait requires object safety for `Arc`. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_lambda_http::body::LambdaResponseBody; -use ruststack_lambda_http::dispatch::LambdaHandler; -use ruststack_lambda_http::response::{empty_response, json_response}; -use ruststack_lambda_http::router::PathParams; -use ruststack_lambda_model::error::LambdaError; -use ruststack_lambda_model::input::{ - AddPermissionInput, CreateAliasInput, CreateFunctionInput, CreateFunctionUrlConfigInput, - PublishVersionInput, TagResourceInput, UpdateAliasInput, UpdateFunctionCodeInput, - UpdateFunctionConfigurationInput, UpdateFunctionUrlConfigInput, +use ruststack_lambda_http::{ + body::LambdaResponseBody, + dispatch::LambdaHandler, + response::{empty_response, json_response}, + router::PathParams, +}; +use ruststack_lambda_model::{ + error::LambdaError, + input::{ + AddLayerVersionPermissionInput, AddPermissionInput, CreateAliasInput, + CreateEventSourceMappingInput, CreateFunctionInput, CreateFunctionUrlConfigInput, + EventInvokeConfigInput, PublishLayerVersionInput, PublishVersionInput, + PutFunctionConcurrencyInput, TagResourceInput, UpdateAliasInput, + UpdateEventSourceMappingInput, UpdateFunctionCodeInput, UpdateFunctionConfigurationInput, + UpdateFunctionUrlConfigInput, + }, + operations::LambdaOperation, + output::ListFunctionEventInvokeConfigsOutput, + types::InvocationType, }; -use ruststack_lambda_model::operations::LambdaOperation; -use ruststack_lambda_model::types::InvocationType; use crate::provider::RustStackLambda; @@ -433,6 +438,258 @@ async fn dispatch( wrap_json_response(200, &output) } + // ---- Phase 2b: Lambda Layers ---- + LambdaOperation::PublishLayerVersion => { + let layer_name = require_path_param(path_params, "LayerName")?; + let input: PublishLayerVersionInput = serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })?; + let output = provider + .publish_layer_version(layer_name, &input) + .map_err(LambdaError::from)?; + wrap_json_response(201, &output) + } + + LambdaOperation::GetLayerVersion => { + let layer_name = require_path_param(path_params, "LayerName")?; + let version_number = require_path_param(path_params, "VersionNumber")?; + let version: u64 = version_number.parse().map_err(|_| { + LambdaError::invalid_parameter(format!("Invalid version number: {version_number}")) + })?; + let output = provider + .get_layer_version(layer_name, version) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::GetLayerVersionByArn => { + // The ARN is passed as a query parameter. + let arn = get_query_param(&query_params, "Arn") + .ok_or_else(|| LambdaError::invalid_parameter("Arn query parameter is required"))?; + let output = provider + .get_layer_version_by_arn(arn) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::ListLayerVersions => { + let layer_name = require_path_param(path_params, "LayerName")?; + let marker = get_query_param(&query_params, "Marker"); + let max_items = + get_query_param(&query_params, "MaxItems").and_then(|v| v.parse::().ok()); + let output = provider + .list_layer_versions(layer_name, marker, max_items) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::ListLayers => { + let marker = get_query_param(&query_params, "Marker"); + let max_items = + get_query_param(&query_params, "MaxItems").and_then(|v| v.parse::().ok()); + let output = provider.list_layers(marker, max_items); + wrap_json_response(200, &output) + } + + LambdaOperation::DeleteLayerVersion => { + let layer_name = require_path_param(path_params, "LayerName")?; + let version_number = require_path_param(path_params, "VersionNumber")?; + let version: u64 = version_number.parse().map_err(|_| { + LambdaError::invalid_parameter(format!("Invalid version number: {version_number}")) + })?; + provider + .delete_layer_version(layer_name, version) + .map_err(LambdaError::from)?; + wrap_empty_response(204) + } + + LambdaOperation::AddLayerVersionPermission => { + let layer_name = require_path_param(path_params, "LayerName")?; + let version_number = require_path_param(path_params, "VersionNumber")?; + let version: u64 = version_number.parse().map_err(|_| { + LambdaError::invalid_parameter(format!("Invalid version number: {version_number}")) + })?; + let input: AddLayerVersionPermissionInput = + serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })?; + let output = provider + .add_layer_version_permission(layer_name, version, &input) + .map_err(LambdaError::from)?; + wrap_json_response(201, &output) + } + + LambdaOperation::GetLayerVersionPolicy => { + let layer_name = require_path_param(path_params, "LayerName")?; + let version_number = require_path_param(path_params, "VersionNumber")?; + let version: u64 = version_number.parse().map_err(|_| { + LambdaError::invalid_parameter(format!("Invalid version number: {version_number}")) + })?; + let output = provider + .get_layer_version_policy(layer_name, version) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::RemoveLayerVersionPermission => { + let layer_name = require_path_param(path_params, "LayerName")?; + let version_number = require_path_param(path_params, "VersionNumber")?; + let version: u64 = version_number.parse().map_err(|_| { + LambdaError::invalid_parameter(format!("Invalid version number: {version_number}")) + })?; + let statement_id = require_path_param(path_params, "StatementId")?; + provider + .remove_layer_version_permission(layer_name, version, statement_id) + .map_err(LambdaError::from)?; + wrap_empty_response(204) + } + + // ---- Phase 3: Event Source Mappings ---- + LambdaOperation::CreateEventSourceMapping => { + let input: CreateEventSourceMappingInput = + serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })?; + let output = provider + .create_event_source_mapping(&input) + .map_err(LambdaError::from)?; + wrap_json_response(202, &output) + } + + LambdaOperation::GetEventSourceMapping => { + let uuid = require_path_param(path_params, "UUID")?; + let output = provider + .get_event_source_mapping(uuid) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::UpdateEventSourceMapping => { + let uuid = require_path_param(path_params, "UUID")?; + let input: UpdateEventSourceMappingInput = + serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })?; + let output = provider + .update_event_source_mapping(uuid, &input) + .map_err(LambdaError::from)?; + wrap_json_response(202, &output) + } + + LambdaOperation::DeleteEventSourceMapping => { + let uuid = require_path_param(path_params, "UUID")?; + let output = provider + .delete_event_source_mapping(uuid) + .map_err(LambdaError::from)?; + wrap_json_response(202, &output) + } + + LambdaOperation::ListEventSourceMappings => { + let function_name = get_query_param(&query_params, "FunctionName"); + let event_source_arn = get_query_param(&query_params, "EventSourceArn"); + let marker = get_query_param(&query_params, "Marker"); + let max_items = + get_query_param(&query_params, "MaxItems").and_then(|v| v.parse::().ok()); + let output = provider.list_event_source_mappings( + function_name, + event_source_arn, + marker, + max_items, + ); + wrap_json_response(200, &output) + } + + // ---- Phase 6: Concurrency ---- + LambdaOperation::PutFunctionConcurrency => { + let function_name = require_path_param(path_params, "FunctionName")?; + let input: PutFunctionConcurrencyInput = serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })?; + let output = provider + .put_function_concurrency(function_name, input.reserved_concurrent_executions) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::GetFunctionConcurrency => { + let function_name = require_path_param(path_params, "FunctionName")?; + let output = provider + .get_function_concurrency(function_name) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::DeleteFunctionConcurrency => { + let function_name = require_path_param(path_params, "FunctionName")?; + provider + .delete_function_concurrency(function_name) + .map_err(LambdaError::from)?; + wrap_empty_response(204) + } + + // ---- Phase 6: Event Invoke Config ---- + LambdaOperation::PutFunctionEventInvokeConfig => { + let function_name = require_path_param(path_params, "FunctionName")?; + let input: EventInvokeConfigInput = if body.is_empty() { + EventInvokeConfigInput::default() + } else { + serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })? + }; + let output = provider + .put_function_event_invoke_config(function_name, qualifier, &input) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::GetFunctionEventInvokeConfig => { + let function_name = require_path_param(path_params, "FunctionName")?; + let output = provider + .get_function_event_invoke_config(function_name, qualifier) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::UpdateFunctionEventInvokeConfig => { + let function_name = require_path_param(path_params, "FunctionName")?; + let input: EventInvokeConfigInput = if body.is_empty() { + EventInvokeConfigInput::default() + } else { + serde_json::from_slice(body).map_err(|e| { + LambdaError::invalid_parameter(format!("Invalid request body: {e}")) + })? + }; + let output = provider + .update_function_event_invoke_config(function_name, qualifier, &input) + .map_err(LambdaError::from)?; + wrap_json_response(200, &output) + } + + LambdaOperation::DeleteFunctionEventInvokeConfig => { + let function_name = require_path_param(path_params, "FunctionName")?; + provider + .delete_function_event_invoke_config(function_name, qualifier) + .map_err(LambdaError::from)?; + wrap_empty_response(204) + } + + LambdaOperation::ListFunctionEventInvokeConfigs => { + let function_name = require_path_param(path_params, "FunctionName")?; + let configs = provider + .list_function_event_invoke_configs(function_name) + .map_err(LambdaError::from)?; + let output = ListFunctionEventInvokeConfigsOutput { + function_event_invoke_configs: if configs.is_empty() { + None + } else { + Some(configs) + }, + next_marker: None, + }; + wrap_json_response(200, &output) + } + _ => Err(LambdaError::service_error(format!( "Operation {op} is not implemented" ))), diff --git a/crates/ruststack-lambda-core/src/provider.rs b/crates/ruststack-lambda-core/src/provider.rs index 6469c56..4235677 100644 --- a/crates/ruststack-lambda-core/src/provider.rs +++ b/crates/ruststack-lambda-core/src/provider.rs @@ -16,39 +16,53 @@ const MAX_ZIP_SIZE: u64 = 50 * 1024 * 1024; /// Maximum synchronous invoke payload size (6 MB, per Appendix C). const MAX_SYNC_PAYLOAD: usize = 6 * 1024 * 1024; -use ruststack_lambda_model::input::{ - AddPermissionInput, CreateAliasInput, CreateFunctionInput, CreateFunctionUrlConfigInput, - PublishVersionInput, TagResourceInput, UpdateAliasInput, UpdateFunctionCodeInput, - UpdateFunctionConfigurationInput, UpdateFunctionUrlConfigInput, -}; -use ruststack_lambda_model::output::{ - AccountLimit, AccountUsage, AddPermissionOutput, GetAccountSettingsOutput, GetFunctionOutput, - GetPolicyOutput, ListAliasesOutput, ListFunctionUrlConfigsOutput, ListFunctionsOutput, - ListTagsOutput, ListVersionsOutput, -}; -use ruststack_lambda_model::types::{ - AliasConfiguration, AliasRoutingConfiguration, EnvironmentResponse, EphemeralStorage, - FunctionCodeLocation, FunctionConfiguration, FunctionUrlConfig, ImageConfigResponse, Layer, - SnapStartResponse, TracingConfigResponse, VpcConfigResponse, +use ruststack_lambda_model::{ + input::{ + AddLayerVersionPermissionInput, AddPermissionInput, CreateAliasInput, + CreateEventSourceMappingInput, CreateFunctionInput, CreateFunctionUrlConfigInput, + EventInvokeConfigInput, PublishLayerVersionInput, PublishVersionInput, TagResourceInput, + UpdateAliasInput, UpdateEventSourceMappingInput, UpdateFunctionCodeInput, + UpdateFunctionConfigurationInput, UpdateFunctionUrlConfigInput, + }, + output::{ + AccountLimit, AccountUsage, AddLayerVersionPermissionOutput, AddPermissionOutput, + GetAccountSettingsOutput, GetFunctionOutput, GetLayerVersionPolicyOutput, GetPolicyOutput, + ListAliasesOutput, ListEventSourceMappingsOutput, ListFunctionUrlConfigsOutput, + ListFunctionsOutput, ListLayerVersionsOutput, ListLayersOutput, ListTagsOutput, + ListVersionsOutput, PublishLayerVersionOutput, + }, + types::{ + AliasConfiguration, AliasRoutingConfiguration, Concurrency, EnvironmentResponse, + EphemeralStorage, EventSourceMappingConfiguration, FunctionCodeLocation, + FunctionConfiguration, FunctionEventInvokeConfig, FunctionUrlConfig, ImageConfigResponse, + Layer, LayerVersionContentOutput, LayerVersionsListItem, LayersListItem, SnapStartResponse, + TracingConfigResponse, VpcConfigResponse, + }, }; -use crate::config::LambdaConfig; -use crate::error::LambdaServiceError; -use crate::resolver::{ - alias_arn, function_arn, function_version_arn, resolve_function_ref, resolve_version, -}; -use crate::storage::{ - AliasRecord, FunctionRecord, FunctionStore, FunctionUrlConfigRecord, PolicyDocument, - PolicyStatement, VersionRecord, compute_sha256, +use crate::{ + config::LambdaConfig, + error::LambdaServiceError, + resolver::{ + alias_arn, function_arn, function_version_arn, layer_arn, layer_version_arn, + parse_layer_version_arn, resolve_function_ref, resolve_version, + }, + storage::{ + AliasRecord, EventInvokeConfigRecord, EventSourceMappingRecord, EventSourceMappingStore, + FunctionRecord, FunctionStore, FunctionUrlConfigRecord, LayerStore, LayerVersionRecord, + PolicyDocument, PolicyStatement, VersionRecord, compute_sha256, + }, }; /// Lambda business logic provider. /// -/// Holds the function store and service configuration. All operations -/// are implemented as async methods that return domain types or errors. +/// Holds the function store, layer store, and service configuration. +/// All operations are implemented as methods that return domain types or errors. #[derive(Debug)] pub struct RustStackLambda { store: FunctionStore, + layer_store: LayerStore, + esm_store: EventSourceMappingStore, config: LambdaConfig, } @@ -56,7 +70,12 @@ impl RustStackLambda { /// Create a new Lambda provider with the given store and config. #[must_use] pub fn with_store(store: FunctionStore, config: LambdaConfig) -> Self { - Self { store, config } + Self { + store, + layer_store: LayerStore::new(), + esm_store: EventSourceMappingStore::new(), + config, + } } /// Create a new Lambda provider from config, using a temp directory for code. @@ -64,7 +83,12 @@ impl RustStackLambda { pub fn new(config: LambdaConfig) -> Self { let code_dir = std::env::temp_dir().join("ruststack-lambda-code"); let store = FunctionStore::new(code_dir); - Self { store, config } + Self { + store, + layer_store: LayerStore::new(), + esm_store: EventSourceMappingStore::new(), + config, + } } /// Returns a reference to the underlying function store. @@ -73,6 +97,18 @@ impl RustStackLambda { &self.store } + /// Returns a reference to the layer store. + #[must_use] + pub fn layer_store(&self) -> &LayerStore { + &self.layer_store + } + + /// Returns a reference to the event source mapping store. + #[must_use] + pub fn esm_store(&self) -> &EventSourceMappingStore { + &self.esm_store + } + /// Returns a reference to the service configuration. #[must_use] pub fn config(&self) -> &LambdaConfig { @@ -277,6 +313,8 @@ impl RustStackLambda { policy: PolicyDocument::default(), tags: input.tags.clone().unwrap_or_default(), url_config: None, + reserved_concurrent_executions: None, + event_invoke_configs: HashMap::new(), created_at: now, }; @@ -609,7 +647,8 @@ impl RustStackLambda { let payload_len = payload.len(); return Err(LambdaServiceError::RequestTooLarge { message: format!( - "Request payload size {payload_len} exceeds the synchronous invoke limit of {MAX_SYNC_PAYLOAD} bytes", + "Request payload size {payload_len} exceeds the synchronous invoke limit of \ + {MAX_SYNC_PAYLOAD} bytes", ), }); } @@ -1403,228 +1442,1130 @@ impl RustStackLambda { } // --------------------------------------------------------------- - // Internal helpers + // Phase 2b: Lambda Layers // --------------------------------------------------------------- - /// Get a function record by name, returning `FunctionNotFound` if absent. - fn get_record(&self, name: &str) -> Result { - self.store - .get(name) - .ok_or(LambdaServiceError::FunctionNotFound { - name: name.to_owned(), - }) - } - - /// Process code input (zip or image URI), returning code metadata. - async fn process_code( + /// Publish a new layer version. + pub fn publish_layer_version( &self, - function_name: &str, - version: &str, - zip_file_b64: Option<&str>, - image_uri: Option<&str>, - ) -> Result< - ( - String, - u64, - Option, - Option, - Option, - ), - LambdaServiceError, - > { - if let Some(b64) = zip_file_b64 { - use base64::Engine; - let zip_bytes = base64::engine::general_purpose::STANDARD - .decode(b64) - .map_err(|e| LambdaServiceError::InvalidZipFile { - message: format!("Invalid base64 encoding: {e}"), - })?; - - let (code_path, sha256, size) = self - .store - .store_zip_code(function_name, version, &zip_bytes) - .await?; + layer_name: &str, + input: &PublishLayerVersionInput, + ) -> Result { + if layer_name.is_empty() || layer_name.len() > 140 { + return Err(LambdaServiceError::InvalidParameter { + message: "Layer name must be between 1 and 140 characters".to_owned(), + }); + } - Ok(( - sha256, - size, - Some(Bytes::from(zip_bytes)), - Some(code_path), - None, - )) - } else if let Some(uri) = image_uri { - let sha256 = compute_sha256(uri.as_bytes()); - Ok((sha256, 0, None, None, Some(uri.to_owned()))) - } else { - // No code provided - use empty hash. - let sha256 = compute_sha256(b""); - Ok((sha256, 0, None, None, None)) + if let Some(ref license) = input.license_info { + if license.len() > 512 { + return Err(LambdaServiceError::InvalidParameter { + message: "License info must be at most 512 characters".to_owned(), + }); + } } - } - /// Build a `FunctionConfiguration` from internal records. - fn build_function_configuration( - &self, - record: &FunctionRecord, - version: &VersionRecord, - ) -> FunctionConfiguration { - let arn = if version.version == "$LATEST" { - record.arn.clone() + // Process layer code. + let (code_sha256, code_size) = if let Some(ref content) = input.content { + if let Some(ref b64) = content.zip_file { + use base64::Engine; + let zip_bytes = base64::engine::general_purpose::STANDARD + .decode(b64) + .map_err(|e| LambdaServiceError::InvalidZipFile { + message: format!("Invalid base64 encoding: {e}"), + })?; + let sha256 = compute_sha256(&zip_bytes); + let size = zip_bytes.len() as u64; + (sha256, size) + } else { + // S3 source accepted but not functional; use empty hash. + (compute_sha256(b""), 0) + } } else { - function_version_arn( - &self.config.default_region, - &self.config.account_id, - &record.name, - &version.version, - ) + (compute_sha256(b""), 0) }; - let env_response = if version.environment.is_empty() { - None - } else { - Some(EnvironmentResponse { - variables: Some(version.environment.clone()), - error: None, - }) + let now = now_iso8601(); + let la = layer_arn( + &self.config.default_region, + &self.config.account_id, + layer_name, + ); + + // Create a temporary version record; the store will assign the actual version number. + let version_record = LayerVersionRecord { + version: 0, // will be overwritten + description: input.description.clone().unwrap_or_default(), + compatible_runtimes: input.compatible_runtimes.clone().unwrap_or_default(), + compatible_architectures: input.compatible_architectures.clone().unwrap_or_default(), + license_info: input.license_info.clone(), + code_sha256: code_sha256.clone(), + code_size, + created_date: now.clone(), + layer_arn: la.clone(), + layer_version_arn: String::new(), // will be overwritten + policy: PolicyDocument::default(), }; - let vpc_response = version.vpc_config.as_ref().map(|vpc| VpcConfigResponse { - subnet_ids: vpc.subnet_ids.clone(), - security_group_ids: vpc.security_group_ids.clone(), - vpc_id: None, - }); + let version_num = self + .layer_store + .publish_version(layer_name, &la, version_record); - let tracing_response = version - .tracing_config - .as_ref() - .map(|tc| TracingConfigResponse { - mode: tc.mode.clone(), - }); + // Update the version number and ARN in the stored record. + let lva = layer_version_arn( + &self.config.default_region, + &self.config.account_id, + layer_name, + version_num, + ); + self.layer_store + .update_version(layer_name, version_num, |ver| { + ver.version = version_num; + ver.layer_version_arn.clone_from(&lva); + })?; - let layers = if version.layers.is_empty() { - None - } else { - Some( - version - .layers - .iter() - .map(|l| Layer { - arn: Some(l.clone()), - code_size: None, - signing_profile_version_arn: None, - signing_job_arn: None, - }) - .collect(), - ) - }; + info!(layer_name = %layer_name, version = %version_num, "published layer version"); - let image_config_response = version.image_config.as_ref().map(|ic| ImageConfigResponse { - image_config: Some(ic.clone()), - error: None, - }); + #[allow(clippy::cast_possible_wrap)] + Ok(PublishLayerVersionOutput { + content: Some(LayerVersionContentOutput { + code_sha256: Some(code_sha256), + code_size: Some(code_size as i64), + ..Default::default() + }), + layer_arn: Some(la), + layer_version_arn: Some(lva), + description: input.description.clone(), + created_date: Some(now), + version: Some(version_num as i64), + compatible_runtimes: input.compatible_runtimes.clone(), + license_info: input.license_info.clone(), + compatible_architectures: input.compatible_architectures.clone(), + }) + } - let snap_start_response = version.snap_start.as_ref().map(|ss| SnapStartResponse { - apply_on: ss.apply_on.clone(), - optimization_status: Some("Off".to_owned()), - }); + /// Get a specific layer version. + pub fn get_layer_version( + &self, + layer_name: &str, + version_number: u64, + ) -> Result { + let ver = self + .layer_store + .get_version(layer_name, version_number) + .ok_or(LambdaServiceError::InvalidParameter { + message: format!("Layer version not found: {layer_name}:{version_number}"), + })?; - FunctionConfiguration { - function_name: Some(record.name.clone()), - function_arn: Some(arn), - runtime: version.runtime.clone(), - role: Some(version.role.clone()), - handler: version.handler.clone(), - #[allow(clippy::cast_possible_wrap)] - code_size: Some(version.code_size as i64), - description: if version.description.is_empty() { - None - } else { - Some(version.description.clone()) - }, - timeout: Some(version.timeout), - memory_size: Some(version.memory_size), - last_modified: Some(version.last_modified.clone()), - code_sha256: Some(version.code_sha256.clone()), - version: Some(version.version.clone()), - environment: env_response, - vpc_config: vpc_response, - dead_letter_config: version.dead_letter_config.clone(), - tracing_config: tracing_response, - revision_id: Some(version.revision_id.clone()), - layers, - state: Some(version.state.clone()), - state_reason: None, - state_reason_code: None, - package_type: Some(version.package_type.clone()), - architectures: Some(version.architectures.clone()), - ephemeral_storage: Some(EphemeralStorage { - size: version.ephemeral_storage_size, - }), - logging_config: version.logging_config.clone(), - snap_start: snap_start_response, - image_config_response, - last_update_status: Some("Successful".to_owned()), - last_update_status_reason: None, - last_update_status_reason_code: None, - } + Ok(Self::build_layer_version_output(&ver)) } - /// Extract function name from a Lambda function ARN. - fn extract_function_name_from_arn(arn: &str) -> Result { - // Try ARN parsing first. - if arn.starts_with("arn:") { - let (name, _) = resolve_function_ref(arn)?; - return Ok(name); - } - // If it's not an ARN, treat it as a function name. - Ok(arn.to_owned()) + /// Get a layer version by its full ARN. + pub fn get_layer_version_by_arn( + &self, + arn: &str, + ) -> Result { + let (name, version) = parse_layer_version_arn(arn)?; + self.get_layer_version(&name, version) } -} -/// Get current time in ISO 8601 format matching AWS Lambda conventions. -fn now_iso8601() -> String { - chrono::Utc::now() - .format("%Y-%m-%dT%H:%M:%S%.3f+0000") - .to_string() -} + /// List versions of a layer. + pub fn list_layer_versions( + &self, + layer_name: &str, + marker: Option<&str>, + max_items: Option, + ) -> Result { + let versions = self.layer_store.list_versions(layer_name); + let max = max_items.unwrap_or(50).min(10_000); -#[cfg(test)] -mod tests { - use super::*; + let start = marker + .and_then(|m| m.parse::().ok()) + .and_then(|marker_ver| versions.iter().position(|v| v.version > marker_ver)) + .unwrap_or(0); - fn test_provider() -> RustStackLambda { - let tmp = tempfile::tempdir().unwrap(); - let store = FunctionStore::new(tmp.path()); - let config = LambdaConfig::default(); - RustStackLambda::with_store(store, config) - } + #[allow(clippy::cast_possible_wrap)] + let page: Vec = versions + .iter() + .skip(start) + .take(max) + .map(|v| LayerVersionsListItem { + layer_version_arn: Some(v.layer_version_arn.clone()), + version: Some(v.version as i64), + description: if v.description.is_empty() { + None + } else { + Some(v.description.clone()) + }, + created_date: Some(v.created_date.clone()), + compatible_runtimes: if v.compatible_runtimes.is_empty() { + None + } else { + Some(v.compatible_runtimes.clone()) + }, + license_info: v.license_info.clone(), + compatible_architectures: if v.compatible_architectures.is_empty() { + None + } else { + Some(v.compatible_architectures.clone()) + }, + }) + .collect(); - fn sample_create_input(name: &str) -> CreateFunctionInput { - use base64::Engine; - let zip_data = base64::engine::general_purpose::STANDARD.encode(b"PK\x03\x04fake"); - CreateFunctionInput { - function_name: name.to_owned(), - runtime: Some("python3.12".to_owned()), - role: "arn:aws:iam::000000000000:role/test-role".to_owned(), - handler: Some("index.handler".to_owned()), - code: ruststack_lambda_model::types::FunctionCode { - zip_file: Some(zip_data), - ..Default::default() - }, - ..Default::default() - } + let next_marker = if start + max < versions.len() { + page.last() + .and_then(|v| v.version.map(|ver| ver.to_string())) + } else { + None + }; + + Ok(ListLayerVersionsOutput { + layer_versions: Some(page), + next_marker, + }) } - #[tokio::test] - async fn test_should_create_and_get_function() { - let provider = test_provider(); + /// List all layers. + #[must_use] + pub fn list_layers(&self, marker: Option<&str>, max_items: Option) -> ListLayersOutput { + let all = self.layer_store.list_layers(); + let max = max_items.unwrap_or(50).min(10_000); - let config = provider - .create_function(sample_create_input("my-func")) - .await - .unwrap(); - assert_eq!(config.function_name, Some("my-func".to_owned())); - assert_eq!(config.runtime, Some("python3.12".to_owned())); + let start = marker + .and_then(|m| all.iter().position(|r| r.name.as_str() > m)) + .unwrap_or(0); + + #[allow(clippy::cast_possible_wrap)] + let page: Vec = all + .iter() + .skip(start) + .take(max) + .map(|r| { + let latest = r + .versions + .values() + .next_back() + .map(|v| LayerVersionsListItem { + layer_version_arn: Some(v.layer_version_arn.clone()), + version: Some(v.version as i64), + description: if v.description.is_empty() { + None + } else { + Some(v.description.clone()) + }, + created_date: Some(v.created_date.clone()), + compatible_runtimes: if v.compatible_runtimes.is_empty() { + None + } else { + Some(v.compatible_runtimes.clone()) + }, + license_info: v.license_info.clone(), + compatible_architectures: if v.compatible_architectures.is_empty() { + None + } else { + Some(v.compatible_architectures.clone()) + }, + }); + LayersListItem { + layer_name: Some(r.name.clone()), + layer_arn: Some(r.layer_arn.clone()), + latest_matching_version: latest, + } + }) + .collect(); + + let next_marker = if start + max < all.len() { + page.last().and_then(|l| l.layer_name.clone()) + } else { + None + }; + + ListLayersOutput { + layers: Some(page), + next_marker, + } + } + + /// Delete a layer version. + pub fn delete_layer_version( + &self, + layer_name: &str, + version_number: u64, + ) -> Result<(), LambdaServiceError> { + // AWS silently succeeds even if the version doesn't exist. + let _ = self.layer_store.delete_version(layer_name, version_number); + info!(layer_name = %layer_name, version = %version_number, "deleted layer version"); + Ok(()) + } + + /// Add a permission to a layer version's resource policy. + pub fn add_layer_version_permission( + &self, + layer_name: &str, + version_number: u64, + input: &AddLayerVersionPermissionInput, + ) -> Result { + // Validate required fields. + let sid = match &input.statement_id { + Some(s) if !s.is_empty() => s.clone(), + _ => { + return Err(LambdaServiceError::InvalidParameter { + message: "StatementId is required".to_owned(), + }); + } + }; + let action = match &input.action { + Some(a) if !a.is_empty() => a.clone(), + _ => { + return Err(LambdaServiceError::InvalidParameter { + message: "Action is required".to_owned(), + }); + } + }; + let principal = match &input.principal { + Some(p) if !p.is_empty() => p.clone(), + _ => { + return Err(LambdaServiceError::InvalidParameter { + message: "Principal is required".to_owned(), + }); + } + }; + + let lva = layer_version_arn( + &self.config.default_region, + &self.config.account_id, + layer_name, + version_number, + ); + + let statement = PolicyStatement { + sid: sid.clone(), + effect: "Allow".to_owned(), + principal: principal.clone(), + action: action.clone(), + resource: lva, + condition: None, + }; + + let statement_json = serde_json::json!({ + "Sid": sid, + "Effect": "Allow", + "Principal": { "Service": principal }, + "Action": action, + "Resource": statement.resource, + }); + + let revision_id = self.layer_store.update_version( + layer_name, + version_number, + |ver| -> Result { + if ver.policy.statements.iter().any(|s| s.sid == sid) { + return Err(LambdaServiceError::ResourceConflict { + message: format!("The statement id ({sid}) provided already exists."), + }); + } + ver.policy.statements.push(statement); + Ok(uuid::Uuid::new_v4().to_string()) + }, + )??; + + Ok(AddLayerVersionPermissionOutput { + statement: Some(statement_json.to_string()), + revision_id: Some(revision_id), + }) + } + + /// Get the resource policy for a layer version. + pub fn get_layer_version_policy( + &self, + layer_name: &str, + version_number: u64, + ) -> Result { + let ver = self + .layer_store + .get_version(layer_name, version_number) + .ok_or(LambdaServiceError::InvalidParameter { + message: format!("Layer version not found: {layer_name}:{version_number}"), + })?; + + if ver.policy.statements.is_empty() { + return Err(LambdaServiceError::PolicyNotFound { + sid: format!("{layer_name}:{version_number}"), + }); + } + + let statements: Vec = ver + .policy + .statements + .iter() + .map(|s| { + serde_json::json!({ + "Sid": s.sid, + "Effect": s.effect, + "Principal": { "Service": s.principal }, + "Action": s.action, + "Resource": s.resource, + }) + }) + .collect(); + + let policy = serde_json::json!({ + "Version": "2012-10-17", + "Id": "default", + "Statement": statements, + }); + + Ok(GetLayerVersionPolicyOutput { + policy: Some(policy.to_string()), + revision_id: Some(uuid::Uuid::new_v4().to_string()), + }) + } + + /// Remove a permission from a layer version's resource policy. + pub fn remove_layer_version_permission( + &self, + layer_name: &str, + version_number: u64, + statement_id: &str, + ) -> Result<(), LambdaServiceError> { + self.layer_store.update_version( + layer_name, + version_number, + |ver| -> Result<(), LambdaServiceError> { + let initial_len = ver.policy.statements.len(); + ver.policy.statements.retain(|s| s.sid != statement_id); + if ver.policy.statements.len() == initial_len { + return Err(LambdaServiceError::PolicyNotFound { + sid: statement_id.to_owned(), + }); + } + Ok(()) + }, + )??; + + Ok(()) + } + + // --------------------------------------------------------------- + // Phase 3: Event Source Mappings + // --------------------------------------------------------------- + + /// Create an event source mapping. + /// + /// Validates the function exists, generates a UUID, and stores the mapping. + /// Defaults: `batch_size=10`, `enabled=true`. + /// + /// # Errors + /// + /// Returns `FunctionNotFound` if the specified function does not exist. + /// Returns `InvalidParameter` if `event_source_arn` is empty. + pub fn create_event_source_mapping( + &self, + input: &CreateEventSourceMappingInput, + ) -> Result { + if input.event_source_arn.is_empty() { + return Err(LambdaServiceError::InvalidParameter { + message: "eventSourceArn is required".to_owned(), + }); + } + + // Resolve function name to ARN; validates the function exists. + let (name, _) = resolve_function_ref(&input.function_name)?; + let _record = self.get_record(&name)?; + let func_arn = function_arn(&self.config.default_region, &self.config.account_id, &name); + + let uuid = uuid::Uuid::new_v4().to_string(); + let enabled = input.enabled.unwrap_or(true); + let batch_size = input.batch_size.unwrap_or(10); + let max_batching_window = input.maximum_batching_window_in_seconds.unwrap_or(0); + let state = if enabled { "Enabled" } else { "Disabled" }.to_owned(); + let now = chrono::Utc::now().timestamp(); + + let esm_record = EventSourceMappingRecord { + uuid: uuid.clone(), + event_source_arn: input.event_source_arn.clone(), + function_arn: func_arn, + enabled, + batch_size, + maximum_batching_window_in_seconds: max_batching_window, + starting_position: input.starting_position.clone(), + starting_position_timestamp: input.starting_position_timestamp.clone(), + maximum_record_age_in_seconds: input.maximum_record_age_in_seconds, + bisect_batch_on_function_error: input.bisect_batch_on_function_error, + maximum_retry_attempts: input.maximum_retry_attempts, + parallelization_factor: input.parallelization_factor, + function_response_types: input.function_response_types.clone().unwrap_or_default(), + state, + state_transition_reason: "User action".to_owned(), + last_modified: now, + last_processing_result: "No records processed".to_owned(), + }; + + let config = Self::record_to_configuration(&esm_record); + self.esm_store.create(esm_record); + + info!(uuid = %uuid, function_name = %name, "Created event source mapping"); + + Ok(config) + } + + /// Get an event source mapping by UUID. + /// + /// # Errors + /// + /// Returns `EventSourceMappingNotFound` if the UUID does not exist. + pub fn get_event_source_mapping( + &self, + uuid: &str, + ) -> Result { + let record = self.esm_store.get(uuid).ok_or_else(|| { + LambdaServiceError::EventSourceMappingNotFound { + uuid: uuid.to_owned(), + } + })?; + Ok(Self::record_to_configuration(&record)) + } + + /// Update an event source mapping. + /// + /// # Errors + /// + /// Returns `EventSourceMappingNotFound` if the UUID does not exist. + /// Returns `FunctionNotFound` if a new function name is provided that does not exist. + pub fn update_event_source_mapping( + &self, + uuid: &str, + input: &UpdateEventSourceMappingInput, + ) -> Result { + // If a new function name is provided, validate it exists and resolve the ARN. + let new_function_arn = if let Some(ref fn_name) = input.function_name { + let (name, _) = resolve_function_ref(fn_name)?; + let _record = self.get_record(&name)?; + Some(function_arn( + &self.config.default_region, + &self.config.account_id, + &name, + )) + } else { + None + }; + + let now = chrono::Utc::now().timestamp(); + + let updated = self.esm_store.update(uuid, |record| { + if let Some(ref arn) = new_function_arn { + record.function_arn.clone_from(arn); + } + if let Some(enabled) = input.enabled { + record.enabled = enabled; + if enabled { "Enabled" } else { "Disabled" }.clone_into(&mut record.state); + } + if let Some(batch_size) = input.batch_size { + record.batch_size = batch_size; + } + if let Some(max_window) = input.maximum_batching_window_in_seconds { + record.maximum_batching_window_in_seconds = max_window; + } + if let Some(max_age) = input.maximum_record_age_in_seconds { + record.maximum_record_age_in_seconds = Some(max_age); + } + if let Some(bisect) = input.bisect_batch_on_function_error { + record.bisect_batch_on_function_error = Some(bisect); + } + if let Some(retries) = input.maximum_retry_attempts { + record.maximum_retry_attempts = Some(retries); + } + if let Some(factor) = input.parallelization_factor { + record.parallelization_factor = Some(factor); + } + if let Some(ref types) = input.function_response_types { + record.function_response_types.clone_from(types); + } + record.last_modified = now; + "User action".clone_into(&mut record.state_transition_reason); + Self::record_to_configuration(record) + })?; + + info!(uuid = %uuid, "Updated event source mapping"); + + Ok(updated) + } + + /// Delete an event source mapping. + /// + /// Returns the final configuration with state set to `Deleting`. + /// + /// # Errors + /// + /// Returns `EventSourceMappingNotFound` if the UUID does not exist. + pub fn delete_event_source_mapping( + &self, + uuid: &str, + ) -> Result { + let record = self.esm_store.delete(uuid).ok_or_else(|| { + LambdaServiceError::EventSourceMappingNotFound { + uuid: uuid.to_owned(), + } + })?; + + info!(uuid = %uuid, "Deleted event source mapping"); + + let mut config = Self::record_to_configuration(&record); + config.state = Some("Deleting".to_owned()); + Ok(config) + } + + /// List event source mappings with optional filters and pagination. + /// + /// Supports filtering by `function_name` and `event_source_arn`. + #[must_use] + pub fn list_event_source_mappings( + &self, + function_name: Option<&str>, + event_source_arn: Option<&str>, + marker: Option<&str>, + max_items: Option, + ) -> ListEventSourceMappingsOutput { + let all = self.esm_store.list(function_name, event_source_arn); + let max = max_items.unwrap_or(100); + + // Find start index from marker. + let start = marker + .and_then(|m| all.iter().position(|r| r.uuid == m)) + .map_or(0, |pos| pos + 1); + + let page: Vec = all + .iter() + .skip(start) + .take(max) + .map(Self::record_to_configuration) + .collect(); + + let next_marker = if start + max < all.len() { + all.get(start + max - 1).map(|r| r.uuid.clone()) + } else { + None + }; + + ListEventSourceMappingsOutput { + event_source_mappings: Some(page), + next_marker, + } + } + + /// Convert an `EventSourceMappingRecord` to an `EventSourceMappingConfiguration`. + fn record_to_configuration( + record: &EventSourceMappingRecord, + ) -> EventSourceMappingConfiguration { + #[allow(clippy::cast_precision_loss)] + let last_modified_f64 = record.last_modified as f64; + + EventSourceMappingConfiguration { + uuid: Some(record.uuid.clone()), + event_source_arn: Some(record.event_source_arn.clone()), + function_arn: Some(record.function_arn.clone()), + state: Some(record.state.clone()), + state_transition_reason: Some(record.state_transition_reason.clone()), + last_modified: Some(last_modified_f64), + last_processing_result: Some(record.last_processing_result.clone()), + batch_size: Some(record.batch_size), + maximum_batching_window_in_seconds: Some(record.maximum_batching_window_in_seconds), + starting_position: record.starting_position.clone(), + starting_position_timestamp: record.starting_position_timestamp.clone(), + maximum_record_age_in_seconds: record.maximum_record_age_in_seconds, + bisect_batch_on_function_error: record.bisect_batch_on_function_error, + maximum_retry_attempts: record.maximum_retry_attempts, + parallelization_factor: record.parallelization_factor, + function_response_types: if record.function_response_types.is_empty() { + None + } else { + Some(record.function_response_types.clone()) + }, + } + } + + // --------------------------------------------------------------- + // Internal helpers + // --------------------------------------------------------------- + + /// Build a `PublishLayerVersionOutput` from a `LayerVersionRecord`. + #[allow(clippy::cast_possible_wrap)] + fn build_layer_version_output(ver: &LayerVersionRecord) -> PublishLayerVersionOutput { + PublishLayerVersionOutput { + content: Some(LayerVersionContentOutput { + code_sha256: Some(ver.code_sha256.clone()), + code_size: Some(ver.code_size as i64), + ..Default::default() + }), + layer_arn: Some(ver.layer_arn.clone()), + layer_version_arn: Some(ver.layer_version_arn.clone()), + description: if ver.description.is_empty() { + None + } else { + Some(ver.description.clone()) + }, + created_date: Some(ver.created_date.clone()), + version: Some(ver.version as i64), + compatible_runtimes: if ver.compatible_runtimes.is_empty() { + None + } else { + Some(ver.compatible_runtimes.clone()) + }, + license_info: ver.license_info.clone(), + compatible_architectures: if ver.compatible_architectures.is_empty() { + None + } else { + Some(ver.compatible_architectures.clone()) + }, + } + } + + // ----------------------------------------------------------------- + // Phase 6: Concurrency + // ----------------------------------------------------------------- + + /// Put (set) reserved concurrency for a function. + pub fn put_function_concurrency( + &self, + function_ref: &str, + reserved: i32, + ) -> Result { + let (name, _) = resolve_function_ref(function_ref)?; + let _ = self.get_record(&name)?; + + self.store.update(&name, |rec| { + rec.reserved_concurrent_executions = Some(reserved); + })?; + + Ok(Concurrency { + reserved_concurrent_executions: Some(reserved), + }) + } + + /// Get reserved concurrency for a function. + pub fn get_function_concurrency( + &self, + function_ref: &str, + ) -> Result { + let (name, _) = resolve_function_ref(function_ref)?; + let record = self.get_record(&name)?; + Ok(Concurrency { + reserved_concurrent_executions: record.reserved_concurrent_executions, + }) + } + + /// Delete reserved concurrency for a function. + pub fn delete_function_concurrency( + &self, + function_ref: &str, + ) -> Result<(), LambdaServiceError> { + let (name, _) = resolve_function_ref(function_ref)?; + let _ = self.get_record(&name)?; + self.store.update(&name, |rec| { + rec.reserved_concurrent_executions = None; + })?; + Ok(()) + } + + // ----------------------------------------------------------------- + // Phase 6: Event Invoke Config + // ----------------------------------------------------------------- + + /// Put (create/replace) an event invoke config for a function qualifier. + pub fn put_function_event_invoke_config( + &self, + function_ref: &str, + qualifier: Option<&str>, + input: &EventInvokeConfigInput, + ) -> Result { + let (name, ref_qualifier) = resolve_function_ref(function_ref)?; + let qualifier = qualifier.or(ref_qualifier.as_deref()).unwrap_or("$LATEST"); + let record = self.get_record(&name)?; + let fn_arn = self.build_qualified_arn(&name, qualifier); + let now = chrono::Utc::now(); + let now_iso = now.format("%Y-%m-%dT%H:%M:%S%.3f%z").to_string(); + let epoch_millis = millis_to_f64(now.timestamp_millis()); + + let config_record = EventInvokeConfigRecord { + function_arn: fn_arn.clone(), + qualifier: qualifier.to_owned(), + maximum_retry_attempts: input.maximum_retry_attempts, + maximum_event_age_in_seconds: input.maximum_event_age_in_seconds, + last_modified: now_iso, + destination_config: input.destination_config.clone(), + }; + + drop(record); + self.store.update(&name, |rec| { + rec.event_invoke_configs + .insert(qualifier.to_owned(), config_record.clone()); + })?; + + Ok(build_event_invoke_config(&config_record, epoch_millis)) + } + + /// Get an event invoke config for a function qualifier. + pub fn get_function_event_invoke_config( + &self, + function_ref: &str, + qualifier: Option<&str>, + ) -> Result { + let (name, ref_qualifier) = resolve_function_ref(function_ref)?; + let qualifier = qualifier.or(ref_qualifier.as_deref()).unwrap_or("$LATEST"); + let record = self.get_record(&name)?; + + let config_record = record.event_invoke_configs.get(qualifier).ok_or_else(|| { + LambdaServiceError::EventInvokeConfigNotFound { + function_name: name.clone(), + qualifier: qualifier.to_owned(), + } + })?; + + let epoch_millis = parse_epoch_millis(&config_record.last_modified); + + Ok(build_event_invoke_config(config_record, epoch_millis)) + } + + /// Update (merge) an event invoke config for a function qualifier. + pub fn update_function_event_invoke_config( + &self, + function_ref: &str, + qualifier: Option<&str>, + input: &EventInvokeConfigInput, + ) -> Result { + let (name, ref_qualifier) = resolve_function_ref(function_ref)?; + let qualifier = qualifier.or(ref_qualifier.as_deref()).unwrap_or("$LATEST"); + let _ = self.get_record(&name)?; + let fn_arn = self.build_qualified_arn(&name, qualifier); + let now = chrono::Utc::now(); + let now_iso = now.format("%Y-%m-%dT%H:%M:%S%.3f%z").to_string(); + let epoch_millis = millis_to_f64(now.timestamp_millis()); + + let result = self.store.update(&name, |rec| { + let entry = rec + .event_invoke_configs + .entry(qualifier.to_owned()) + .or_insert_with(|| EventInvokeConfigRecord { + function_arn: fn_arn.clone(), + qualifier: qualifier.to_owned(), + maximum_retry_attempts: None, + maximum_event_age_in_seconds: None, + last_modified: now_iso.clone(), + destination_config: None, + }); + + if let Some(v) = input.maximum_retry_attempts { + entry.maximum_retry_attempts = Some(v); + } + if let Some(v) = input.maximum_event_age_in_seconds { + entry.maximum_event_age_in_seconds = Some(v); + } + if let Some(ref dc) = input.destination_config { + entry.destination_config = Some(dc.clone()); + } + entry.last_modified.clone_from(&now_iso); + entry.clone() + })?; + + Ok(build_event_invoke_config(&result, epoch_millis)) + } + + /// Delete an event invoke config for a function qualifier. + pub fn delete_function_event_invoke_config( + &self, + function_ref: &str, + qualifier: Option<&str>, + ) -> Result<(), LambdaServiceError> { + let (name, ref_qualifier) = resolve_function_ref(function_ref)?; + let qualifier = qualifier.or(ref_qualifier.as_deref()).unwrap_or("$LATEST"); + let _ = self.get_record(&name)?; + + self.store.update(&name, |rec| { + rec.event_invoke_configs.remove(qualifier); + })?; + Ok(()) + } + + /// List all event invoke configs for a function. + pub fn list_function_event_invoke_configs( + &self, + function_ref: &str, + ) -> Result, LambdaServiceError> { + let (name, _) = resolve_function_ref(function_ref)?; + let record = self.get_record(&name)?; + + let configs: Vec = record + .event_invoke_configs + .values() + .map(|cr| { + let epoch_millis = parse_epoch_millis(&cr.last_modified); + build_event_invoke_config(cr, epoch_millis) + }) + .collect(); + + Ok(configs) + } + + /// Build a qualified ARN for a function + qualifier. + fn build_qualified_arn(&self, function_name: &str, qualifier: &str) -> String { + function_version_arn( + &self.config.default_region, + &self.config.account_id, + function_name, + qualifier, + ) + } + + /// Get a function record by name, returning `FunctionNotFound` if absent. + fn get_record(&self, name: &str) -> Result { + self.store + .get(name) + .ok_or(LambdaServiceError::FunctionNotFound { + name: name.to_owned(), + }) + } + + /// Process code input (zip or image URI), returning code metadata. + async fn process_code( + &self, + function_name: &str, + version: &str, + zip_file_b64: Option<&str>, + image_uri: Option<&str>, + ) -> Result< + ( + String, + u64, + Option, + Option, + Option, + ), + LambdaServiceError, + > { + if let Some(b64) = zip_file_b64 { + use base64::Engine; + let zip_bytes = base64::engine::general_purpose::STANDARD + .decode(b64) + .map_err(|e| LambdaServiceError::InvalidZipFile { + message: format!("Invalid base64 encoding: {e}"), + })?; + + let (code_path, sha256, size) = self + .store + .store_zip_code(function_name, version, &zip_bytes) + .await?; + + Ok(( + sha256, + size, + Some(Bytes::from(zip_bytes)), + Some(code_path), + None, + )) + } else if let Some(uri) = image_uri { + let sha256 = compute_sha256(uri.as_bytes()); + Ok((sha256, 0, None, None, Some(uri.to_owned()))) + } else { + // No code provided - use empty hash. + let sha256 = compute_sha256(b""); + Ok((sha256, 0, None, None, None)) + } + } + + /// Build a `FunctionConfiguration` from internal records. + fn build_function_configuration( + &self, + record: &FunctionRecord, + version: &VersionRecord, + ) -> FunctionConfiguration { + let arn = if version.version == "$LATEST" { + record.arn.clone() + } else { + function_version_arn( + &self.config.default_region, + &self.config.account_id, + &record.name, + &version.version, + ) + }; + + let env_response = if version.environment.is_empty() { + None + } else { + Some(EnvironmentResponse { + variables: Some(version.environment.clone()), + error: None, + }) + }; + + let vpc_response = version.vpc_config.as_ref().map(|vpc| VpcConfigResponse { + subnet_ids: vpc.subnet_ids.clone(), + security_group_ids: vpc.security_group_ids.clone(), + vpc_id: None, + }); + + let tracing_response = version + .tracing_config + .as_ref() + .map(|tc| TracingConfigResponse { + mode: tc.mode.clone(), + }); + + let layers = if version.layers.is_empty() { + None + } else { + Some( + version + .layers + .iter() + .map(|l| Layer { + arn: Some(l.clone()), + code_size: None, + signing_profile_version_arn: None, + signing_job_arn: None, + }) + .collect(), + ) + }; + + let image_config_response = version.image_config.as_ref().map(|ic| ImageConfigResponse { + image_config: Some(ic.clone()), + error: None, + }); + + let snap_start_response = version.snap_start.as_ref().map(|ss| SnapStartResponse { + apply_on: ss.apply_on.clone(), + optimization_status: Some("Off".to_owned()), + }); + + FunctionConfiguration { + function_name: Some(record.name.clone()), + function_arn: Some(arn), + runtime: version.runtime.clone(), + role: Some(version.role.clone()), + handler: version.handler.clone(), + #[allow(clippy::cast_possible_wrap)] + code_size: Some(version.code_size as i64), + description: if version.description.is_empty() { + None + } else { + Some(version.description.clone()) + }, + timeout: Some(version.timeout), + memory_size: Some(version.memory_size), + last_modified: Some(version.last_modified.clone()), + code_sha256: Some(version.code_sha256.clone()), + version: Some(version.version.clone()), + environment: env_response, + vpc_config: vpc_response, + dead_letter_config: version.dead_letter_config.clone(), + tracing_config: tracing_response, + revision_id: Some(version.revision_id.clone()), + layers, + state: Some(version.state.clone()), + state_reason: None, + state_reason_code: None, + package_type: Some(version.package_type.clone()), + architectures: Some(version.architectures.clone()), + ephemeral_storage: Some(EphemeralStorage { + size: version.ephemeral_storage_size, + }), + logging_config: version.logging_config.clone(), + snap_start: snap_start_response, + image_config_response, + last_update_status: Some("Successful".to_owned()), + last_update_status_reason: None, + last_update_status_reason_code: None, + } + } + + /// Extract function name from a Lambda function ARN. + fn extract_function_name_from_arn(arn: &str) -> Result { + // Try ARN parsing first. + if arn.starts_with("arn:") { + let (name, _) = resolve_function_ref(arn)?; + return Ok(name); + } + // If it's not an ARN, treat it as a function name. + Ok(arn.to_owned()) + } +} + +/// Get current time in ISO 8601 format matching AWS Lambda conventions. +fn now_iso8601() -> String { + chrono::Utc::now() + .format("%Y-%m-%dT%H:%M:%S%.3f+0000") + .to_string() +} + +/// Convert an `i64` millis timestamp to `f64` for the API response. +/// +/// The AWS Lambda API returns `LastModified` as epoch milliseconds in a float. +/// Precision loss is acceptable since timestamps fit well within f64 mantissa range +/// for any reasonable date (up to year ~285,000). +#[allow(clippy::cast_precision_loss)] +fn millis_to_f64(millis: i64) -> f64 { + millis as f64 +} + +/// Parse an ISO 8601 timestamp string into epoch millis as `f64`. +fn parse_epoch_millis(iso: &str) -> f64 { + chrono::DateTime::parse_from_str(iso, "%Y-%m-%dT%H:%M:%S%.3f%z") + .map(|dt| millis_to_f64(dt.timestamp_millis())) + .unwrap_or(0.0) +} + +/// Convert an `EventInvokeConfigRecord` to the output type. +fn build_event_invoke_config( + record: &EventInvokeConfigRecord, + epoch_millis: f64, +) -> FunctionEventInvokeConfig { + FunctionEventInvokeConfig { + function_arn: Some(record.function_arn.clone()), + maximum_retry_attempts: record.maximum_retry_attempts, + maximum_event_age_in_seconds: record.maximum_event_age_in_seconds, + last_modified: Some(epoch_millis), + destination_config: record.destination_config.clone(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_provider() -> RustStackLambda { + let tmp = tempfile::tempdir().unwrap(); + let store = FunctionStore::new(tmp.path()); + let config = LambdaConfig::default(); + RustStackLambda::with_store(store, config) + } + + fn sample_create_input(name: &str) -> CreateFunctionInput { + use base64::Engine; + let zip_data = base64::engine::general_purpose::STANDARD.encode(b"PK\x03\x04fake"); + CreateFunctionInput { + function_name: name.to_owned(), + runtime: Some("python3.12".to_owned()), + role: "arn:aws:iam::000000000000:role/test-role".to_owned(), + handler: Some("index.handler".to_owned()), + code: ruststack_lambda_model::types::FunctionCode { + zip_file: Some(zip_data), + ..Default::default() + }, + ..Default::default() + } + } + + #[tokio::test] + async fn test_should_create_and_get_function() { + let provider = test_provider(); + + let config = provider + .create_function(sample_create_input("my-func")) + .await + .unwrap(); + assert_eq!(config.function_name, Some("my-func".to_owned())); + assert_eq!(config.runtime, Some("python3.12".to_owned())); assert_eq!(config.state, Some("Active".to_owned())); let output = provider.get_function("my-func", None).unwrap(); @@ -1886,6 +2827,206 @@ mod tests { assert!(matches!(err, LambdaServiceError::ResourceConflict { .. })); } + // ---- Layer operation tests ---- + + #[test] + fn test_should_publish_and_get_layer_version() { + use base64::Engine; + let provider = test_provider(); + let zip_data = base64::engine::general_purpose::STANDARD.encode(b"PK\x03\x04layer"); + let input = PublishLayerVersionInput { + description: Some("Test layer".to_owned()), + content: Some(ruststack_lambda_model::types::LayerVersionContentInput { + zip_file: Some(zip_data), + ..Default::default() + }), + compatible_runtimes: Some(vec!["python3.12".to_owned()]), + ..Default::default() + }; + + let output = provider.publish_layer_version("my-layer", &input).unwrap(); + assert_eq!(output.version, Some(1)); + assert_eq!(output.description, Some("Test layer".to_owned())); + assert!( + output + .layer_arn + .as_ref() + .unwrap() + .contains("layer:my-layer") + ); + assert!( + output + .layer_version_arn + .as_ref() + .unwrap() + .contains("layer:my-layer:1") + ); + + // Get the layer version. + let get_output = provider.get_layer_version("my-layer", 1).unwrap(); + assert_eq!(get_output.version, Some(1)); + assert_eq!(get_output.description, Some("Test layer".to_owned())); + } + + #[test] + fn test_should_publish_multiple_layer_versions() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + + let v1 = provider.publish_layer_version("my-layer", &input).unwrap(); + let v2 = provider.publish_layer_version("my-layer", &input).unwrap(); + + assert_eq!(v1.version, Some(1)); + assert_eq!(v2.version, Some(2)); + } + + #[test] + fn test_should_list_layer_versions() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + + provider.publish_layer_version("my-layer", &input).unwrap(); + provider.publish_layer_version("my-layer", &input).unwrap(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + let output = provider + .list_layer_versions("my-layer", None, None) + .unwrap(); + assert_eq!(output.layer_versions.as_ref().unwrap().len(), 3); + } + + #[test] + fn test_should_list_layers() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + + provider + .publish_layer_version("alpha-layer", &input) + .unwrap(); + provider + .publish_layer_version("bravo-layer", &input) + .unwrap(); + + let output = provider.list_layers(None, None); + let layers = output.layers.as_ref().unwrap(); + assert_eq!(layers.len(), 2); + assert_eq!(layers[0].layer_name, Some("alpha-layer".to_owned()),); + assert_eq!(layers[1].layer_name, Some("bravo-layer".to_owned()),); + } + + #[test] + fn test_should_delete_layer_version() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + + provider.publish_layer_version("my-layer", &input).unwrap(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + provider.delete_layer_version("my-layer", 1).unwrap(); + + // Version 1 should be gone. + let err = provider.get_layer_version("my-layer", 1); + assert!(err.is_err()); + + // Version 2 should still exist. + let output = provider.get_layer_version("my-layer", 2).unwrap(); + assert_eq!(output.version, Some(2)); + } + + #[test] + fn test_should_delete_nonexistent_layer_version_silently() { + let provider = test_provider(); + // Should not error even if layer doesn't exist. + provider.delete_layer_version("nonexistent", 99).unwrap(); + } + + #[test] + fn test_should_add_and_get_layer_version_policy() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + let perm_input = AddLayerVersionPermissionInput { + statement_id: Some("stmt-1".to_owned()), + action: Some("lambda:GetLayerVersion".to_owned()), + principal: Some("*".to_owned()), + ..Default::default() + }; + let output = provider + .add_layer_version_permission("my-layer", 1, &perm_input) + .unwrap(); + assert!(output.statement.is_some()); + + let policy = provider.get_layer_version_policy("my-layer", 1).unwrap(); + assert!(policy.policy.as_ref().unwrap().contains("stmt-1")); + } + + #[test] + fn test_should_reject_duplicate_layer_permission_sid() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + let perm_input = AddLayerVersionPermissionInput { + statement_id: Some("stmt-1".to_owned()), + action: Some("lambda:GetLayerVersion".to_owned()), + principal: Some("*".to_owned()), + ..Default::default() + }; + provider + .add_layer_version_permission("my-layer", 1, &perm_input) + .unwrap(); + + let err = provider + .add_layer_version_permission("my-layer", 1, &perm_input) + .unwrap_err(); + assert!(matches!(err, LambdaServiceError::ResourceConflict { .. })); + } + + #[test] + fn test_should_remove_layer_version_permission() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + let perm_input = AddLayerVersionPermissionInput { + statement_id: Some("stmt-1".to_owned()), + action: Some("lambda:GetLayerVersion".to_owned()), + principal: Some("*".to_owned()), + ..Default::default() + }; + provider + .add_layer_version_permission("my-layer", 1, &perm_input) + .unwrap(); + + provider + .remove_layer_version_permission("my-layer", 1, "stmt-1") + .unwrap(); + + // Policy should now be empty. + let err = provider.get_layer_version_policy("my-layer", 1); + assert!(err.is_err()); + } + + #[test] + fn test_should_get_layer_version_by_arn() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + provider.publish_layer_version("my-layer", &input).unwrap(); + + let arn = "arn:aws:lambda:us-east-1:000000000000:layer:my-layer:1"; + let output = provider.get_layer_version_by_arn(arn).unwrap(); + assert_eq!(output.version, Some(1)); + } + + #[test] + fn test_should_error_on_empty_layer_name() { + let provider = test_provider(); + let input = PublishLayerVersionInput::default(); + let err = provider.publish_layer_version("", &input).unwrap_err(); + assert!(matches!(err, LambdaServiceError::InvalidParameter { .. })); + } + #[tokio::test] async fn test_should_generate_local_function_url() { let provider = test_provider(); @@ -1936,4 +3077,235 @@ mod tests { .unwrap(); assert_eq!(output2.versions.as_ref().unwrap().len(), 2); } + + // ---- Event Source Mapping tests ---- + + fn sample_esm_input() -> CreateEventSourceMappingInput { + CreateEventSourceMappingInput { + event_source_arn: "arn:aws:sqs:us-east-1:000000000000:my-queue".to_owned(), + function_name: "my-func".to_owned(), + ..Default::default() + } + } + + #[tokio::test] + async fn test_should_create_event_source_mapping() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + let config = provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + + assert!(config.uuid.is_some()); + assert_eq!( + config.event_source_arn.as_deref(), + Some("arn:aws:sqs:us-east-1:000000000000:my-queue") + ); + assert!(config.function_arn.is_some()); + assert_eq!(config.state.as_deref(), Some("Enabled")); + assert_eq!(config.batch_size, Some(10)); + assert_eq!(config.maximum_batching_window_in_seconds, Some(0)); + } + + #[tokio::test] + async fn test_should_create_esm_disabled() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + let mut input = sample_esm_input(); + input.enabled = Some(false); + input.batch_size = Some(50); + + let config = provider.create_event_source_mapping(&input).unwrap(); + assert_eq!(config.state.as_deref(), Some("Disabled")); + assert_eq!(config.batch_size, Some(50)); + } + + #[tokio::test] + async fn test_should_reject_esm_for_nonexistent_function() { + let provider = test_provider(); + let err = provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap_err(); + assert!(matches!(err, LambdaServiceError::FunctionNotFound { .. })); + } + + #[test] + fn test_should_reject_esm_with_empty_event_source_arn() { + let provider = test_provider(); + let input = CreateEventSourceMappingInput { + event_source_arn: String::new(), + function_name: "my-func".to_owned(), + ..Default::default() + }; + let err = provider.create_event_source_mapping(&input).unwrap_err(); + assert!(matches!(err, LambdaServiceError::InvalidParameter { .. })); + } + + #[tokio::test] + async fn test_should_get_event_source_mapping() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + let created = provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + let uuid = created.uuid.as_ref().unwrap(); + + let retrieved = provider.get_event_source_mapping(uuid).unwrap(); + assert_eq!(retrieved.uuid.as_deref(), Some(uuid.as_str())); + assert_eq!(retrieved.batch_size, Some(10)); + } + + #[test] + fn test_should_error_on_get_nonexistent_esm() { + let provider = test_provider(); + let err = provider + .get_event_source_mapping("no-such-uuid") + .unwrap_err(); + assert!(matches!( + err, + LambdaServiceError::EventSourceMappingNotFound { .. } + )); + } + + #[tokio::test] + async fn test_should_update_event_source_mapping() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + let created = provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + let uuid = created.uuid.as_ref().unwrap(); + + let update_input = UpdateEventSourceMappingInput { + batch_size: Some(100), + enabled: Some(false), + maximum_retry_attempts: Some(3), + ..Default::default() + }; + + let updated = provider + .update_event_source_mapping(uuid, &update_input) + .unwrap(); + assert_eq!(updated.batch_size, Some(100)); + assert_eq!(updated.state.as_deref(), Some("Disabled")); + assert_eq!(updated.maximum_retry_attempts, Some(3)); + } + + #[test] + fn test_should_error_on_update_nonexistent_esm() { + let provider = test_provider(); + let input = UpdateEventSourceMappingInput::default(); + let err = provider + .update_event_source_mapping("no-such-uuid", &input) + .unwrap_err(); + assert!(matches!( + err, + LambdaServiceError::EventSourceMappingNotFound { .. } + )); + } + + #[tokio::test] + async fn test_should_delete_event_source_mapping() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + let created = provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + let uuid = created.uuid.as_ref().unwrap(); + + let deleted = provider.delete_event_source_mapping(uuid).unwrap(); + assert_eq!(deleted.state.as_deref(), Some("Deleting")); + + // Should no longer be findable. + let err = provider.get_event_source_mapping(uuid).unwrap_err(); + assert!(matches!( + err, + LambdaServiceError::EventSourceMappingNotFound { .. } + )); + } + + #[test] + fn test_should_error_on_delete_nonexistent_esm() { + let provider = test_provider(); + let err = provider + .delete_event_source_mapping("no-such-uuid") + .unwrap_err(); + assert!(matches!( + err, + LambdaServiceError::EventSourceMappingNotFound { .. } + )); + } + + #[tokio::test] + async fn test_should_list_event_source_mappings() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + // Create 3 mappings. + for _ in 0..3 { + provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + } + + let output = provider.list_event_source_mappings(None, None, None, None); + assert_eq!(output.event_source_mappings.as_ref().unwrap().len(), 3); + } + + #[tokio::test] + async fn test_should_list_esm_with_function_filter() { + let provider = test_provider(); + create_test_function(&provider, "func-a").await; + create_test_function(&provider, "func-b").await; + + let input_a = CreateEventSourceMappingInput { + event_source_arn: "arn:aws:sqs:us-east-1:000000000000:queue".to_owned(), + function_name: "func-a".to_owned(), + ..Default::default() + }; + let input_b = CreateEventSourceMappingInput { + event_source_arn: "arn:aws:sqs:us-east-1:000000000000:queue".to_owned(), + function_name: "func-b".to_owned(), + ..Default::default() + }; + + provider.create_event_source_mapping(&input_a).unwrap(); + provider.create_event_source_mapping(&input_b).unwrap(); + + let output = provider.list_event_source_mappings(Some("func-a"), None, None, None); + assert_eq!(output.event_source_mappings.as_ref().unwrap().len(), 1); + } + + #[tokio::test] + async fn test_should_list_esm_with_pagination() { + let provider = test_provider(); + create_test_function(&provider, "my-func").await; + + for _ in 0..5 { + provider + .create_event_source_mapping(&sample_esm_input()) + .unwrap(); + } + + let page1 = provider.list_event_source_mappings(None, None, None, Some(2)); + assert_eq!(page1.event_source_mappings.as_ref().unwrap().len(), 2); + assert!(page1.next_marker.is_some()); + + let page2 = + provider.list_event_source_mappings(None, None, page1.next_marker.as_deref(), Some(2)); + assert_eq!(page2.event_source_mappings.as_ref().unwrap().len(), 2); + } + + /// Helper to create a test function for ESM tests. + async fn create_test_function(provider: &RustStackLambda, name: &str) { + provider + .create_function(sample_create_input(name)) + .await + .unwrap(); + } } diff --git a/crates/ruststack-lambda-core/src/resolver.rs b/crates/ruststack-lambda-core/src/resolver.rs index 321814b..fa60815 100644 --- a/crates/ruststack-lambda-core/src/resolver.rs +++ b/crates/ruststack-lambda-core/src/resolver.rs @@ -6,8 +6,10 @@ //! - Full ARN: `arn:aws:lambda:us-east-1:123456789012:function:my-function` //! - Qualified ARN: `arn:aws:lambda:us-east-1:123456789012:function:my-function:qualifier` -use crate::error::LambdaServiceError; -use crate::storage::{FunctionRecord, VersionRecord}; +use crate::{ + error::LambdaServiceError, + storage::{FunctionRecord, VersionRecord}, +}; /// Parse a function reference into `(function_name, optional_qualifier)`. /// @@ -146,6 +148,48 @@ pub fn alias_arn(region: &str, account_id: &str, function_name: &str, alias: &st format!("arn:aws:lambda:{region}:{account_id}:function:{function_name}:{alias}") } +/// Construct a layer ARN (without version). +/// +/// Format: `arn:aws:lambda:{region}:{account_id}:layer:{layer_name}` +#[must_use] +pub fn layer_arn(region: &str, account_id: &str, layer_name: &str) -> String { + format!("arn:aws:lambda:{region}:{account_id}:layer:{layer_name}") +} + +/// Construct a layer version ARN. +/// +/// Format: `arn:aws:lambda:{region}:{account_id}:layer:{layer_name}:{version}` +#[must_use] +pub fn layer_version_arn(region: &str, account_id: &str, layer_name: &str, version: u64) -> String { + format!("arn:aws:lambda:{region}:{account_id}:layer:{layer_name}:{version}") +} + +/// Parse a layer version ARN into `(layer_name, version)`. +/// +/// Format: `arn:aws:lambda:{region}:{account}:layer:{name}:{version}` +/// +/// # Errors +/// +/// Returns `InvalidArn` if the ARN cannot be parsed. +pub fn parse_layer_version_arn(arn: &str) -> Result<(String, u64), LambdaServiceError> { + let parts: Vec<&str> = arn.split(':').collect(); + // Expected: arn:aws:lambda:region:account:layer:name:version = 8 parts + if parts.len() < 8 || parts[0] != "arn" || parts[2] != "lambda" || parts[5] != "layer" { + return Err(LambdaServiceError::InvalidArn { + arn: arn.to_owned(), + }); + } + + let name = parts[6].to_owned(); + let version: u64 = parts[7] + .parse() + .map_err(|_| LambdaServiceError::InvalidArn { + arn: arn.to_owned(), + })?; + + Ok((name, version)) +} + #[cfg(test)] mod tests { use std::collections::{BTreeMap, HashMap}; @@ -210,6 +254,8 @@ mod tests { policy: PolicyDocument::default(), tags: HashMap::new(), url_config: None, + reserved_concurrent_executions: None, + event_invoke_configs: HashMap::new(), created_at: "2024-01-01T00:00:00.000+0000".to_owned(), } } @@ -336,4 +382,45 @@ mod tests { "arn:aws:lambda:us-east-1:123456789012:function:my-func:prod" ); } + + // ---- Layer ARN tests ---- + + #[test] + fn test_should_build_layer_arn() { + assert_eq!( + layer_arn("us-east-1", "123456789012", "my-layer"), + "arn:aws:lambda:us-east-1:123456789012:layer:my-layer" + ); + } + + #[test] + fn test_should_build_layer_version_arn() { + assert_eq!( + layer_version_arn("us-east-1", "123456789012", "my-layer", 3), + "arn:aws:lambda:us-east-1:123456789012:layer:my-layer:3" + ); + } + + #[test] + fn test_should_parse_layer_version_arn_valid() { + let (name, version) = + parse_layer_version_arn("arn:aws:lambda:us-east-1:123456789012:layer:my-layer:5") + .unwrap(); + assert_eq!(name, "my-layer"); + assert_eq!(version, 5); + } + + #[test] + fn test_should_error_on_invalid_layer_arn() { + let err = parse_layer_version_arn("arn:invalid:stuff").unwrap_err(); + assert!(matches!(err, LambdaServiceError::InvalidArn { .. })); + } + + #[test] + fn test_should_error_on_non_numeric_layer_version() { + let err = + parse_layer_version_arn("arn:aws:lambda:us-east-1:123456789012:layer:my-layer:abc") + .unwrap_err(); + assert!(matches!(err, LambdaServiceError::InvalidArn { .. })); + } } diff --git a/crates/ruststack-lambda-core/src/storage.rs b/crates/ruststack-lambda-core/src/storage.rs index 23b524c..6f7bafb 100644 --- a/crates/ruststack-lambda-core/src/storage.rs +++ b/crates/ruststack-lambda-core/src/storage.rs @@ -4,16 +4,18 @@ //! and function URL configurations. Code is stored as raw bytes with //! SHA-256 hashes computed on ingestion. -use std::collections::{BTreeMap, HashMap}; -use std::path::{Path, PathBuf}; +use std::{ + collections::{BTreeMap, HashMap}, + path::{Path, PathBuf}, +}; use bytes::Bytes; use dashmap::DashMap; -use sha2::{Digest, Sha256}; - use ruststack_lambda_model::types::{ - Cors, DeadLetterConfig, ImageConfig, LoggingConfig, SnapStart, TracingConfig, VpcConfig, + Cors, DeadLetterConfig, DestinationConfig, ImageConfig, LoggingConfig, SnapStart, + TracingConfig, VpcConfig, }; +use sha2::{Digest, Sha256}; use crate::error::LambdaServiceError; @@ -47,10 +49,31 @@ pub struct FunctionRecord { pub tags: HashMap, /// Function URL configuration. pub url_config: Option, + /// Reserved concurrent executions. + pub reserved_concurrent_executions: Option, + /// Event invoke configurations keyed by qualifier. + pub event_invoke_configs: HashMap, /// ISO 8601 creation timestamp. pub created_at: String, } +/// Stored event invoke configuration for a function qualifier. +#[derive(Debug, Clone)] +pub struct EventInvokeConfigRecord { + /// The qualified function ARN. + pub function_arn: String, + /// The qualifier (version or alias, defaults to `$LATEST`). + pub qualifier: String, + /// Maximum retry attempts (0-2). + pub maximum_retry_attempts: Option, + /// Maximum event age in seconds (60-21600). + pub maximum_event_age_in_seconds: Option, + /// ISO 8601 last-modified timestamp. + pub last_modified: String, + /// Destination configuration. + pub destination_config: Option, +} + /// A snapshot of function configuration at a specific version. #[derive(Debug, Clone)] pub struct VersionRecord { @@ -164,6 +187,314 @@ pub struct FunctionUrlConfigRecord { pub last_modified_time: String, } +/// Complete record for a Lambda layer. +#[derive(Debug, Clone)] +pub struct LayerRecord { + /// Layer name. + pub name: String, + /// Layer ARN (without version). + pub layer_arn: String, + /// Published layer versions keyed by version number. + pub versions: BTreeMap, + /// Next version number to assign. + pub next_version: u64, +} + +/// A snapshot of a layer at a specific version. +#[derive(Debug, Clone)] +pub struct LayerVersionRecord { + /// Version number. + pub version: u64, + /// Description. + pub description: String, + /// Compatible runtimes. + pub compatible_runtimes: Vec, + /// Compatible architectures. + pub compatible_architectures: Vec, + /// License info. + pub license_info: Option, + /// Base64-encoded SHA-256 of the layer code. + pub code_sha256: String, + /// Code size in bytes. + pub code_size: u64, + /// ISO 8601 creation date. + pub created_date: String, + /// Layer ARN (without version). + pub layer_arn: String, + /// Layer version ARN. + pub layer_version_arn: String, + /// Resource-based policy document for this layer version. + pub policy: PolicyDocument, +} + +/// In-memory store for Lambda layers. +#[derive(Debug)] +pub struct LayerStore { + /// All layers keyed by layer name. + layers: DashMap, +} + +impl LayerStore { + /// Create a new empty layer store. + #[must_use] + pub fn new() -> Self { + Self { + layers: DashMap::new(), + } + } + + /// Get a clone of a layer record by name. + #[must_use] + pub fn get(&self, name: &str) -> Option { + self.layers.get(name).map(|r| r.value().clone()) + } + + /// Publish a new version of a layer. + /// + /// Creates the layer record if it does not exist, then inserts a new version. + /// Returns the assigned version number. + #[must_use] + pub fn publish_version( + &self, + name: &str, + layer_arn: &str, + version_record: LayerVersionRecord, + ) -> u64 { + use dashmap::mapref::entry::Entry; + match self.layers.entry(name.to_owned()) { + Entry::Occupied(mut entry) => { + let record = entry.get_mut(); + let version_num = record.next_version; + record.next_version += 1; + record.versions.insert(version_num, version_record); + version_num + } + Entry::Vacant(entry) => { + let version_num = 1; + let mut versions = BTreeMap::new(); + versions.insert(version_num, version_record); + entry.insert(LayerRecord { + name: name.to_owned(), + layer_arn: layer_arn.to_owned(), + versions, + next_version: 2, + }); + version_num + } + } + } + + /// Get a specific layer version. + #[must_use] + pub fn get_version(&self, name: &str, version: u64) -> Option { + self.layers + .get(name) + .and_then(|r| r.versions.get(&version).cloned()) + } + + /// List all versions of a layer, sorted by version number. + #[must_use] + pub fn list_versions(&self, name: &str) -> Vec { + self.layers + .get(name) + .map(|r| r.versions.values().cloned().collect()) + .unwrap_or_default() + } + + /// List all layers with their latest version. + /// + /// Returns cloned records sorted by layer name. + #[must_use] + pub fn list_layers(&self) -> Vec { + let mut records: Vec = self.layers.iter().map(|r| r.value().clone()).collect(); + records.sort_by(|a, b| a.name.cmp(&b.name)); + records + } + + /// Delete a specific layer version. + /// + /// Returns `true` if the version existed and was removed. + #[must_use] + pub fn delete_version(&self, name: &str, version: u64) -> bool { + if let Some(mut entry) = self.layers.get_mut(name) { + let removed = entry.versions.remove(&version).is_some(); + // If no versions remain, remove the entire layer record. + if entry.versions.is_empty() { + drop(entry); + self.layers.remove(name); + } + removed + } else { + false + } + } + + /// Mutate a layer version record in place. + /// + /// # Errors + /// + /// Returns an error if the layer or version does not exist. + pub fn update_version( + &self, + name: &str, + version: u64, + f: F, + ) -> Result + where + F: FnOnce(&mut LayerVersionRecord) -> R, + { + match self.layers.get_mut(name) { + Some(mut entry) => match entry.versions.get_mut(&version) { + Some(ver) => Ok(f(ver)), + None => Err(LambdaServiceError::InvalidParameter { + message: format!("Layer version not found: {name}:{version}"), + }), + }, + None => Err(LambdaServiceError::InvalidParameter { + message: format!("Layer not found: {name}"), + }), + } + } +} + +impl Default for LayerStore { + fn default() -> Self { + Self::new() + } +} + +/// Complete record for a Lambda event source mapping. +#[derive(Debug, Clone)] +pub struct EventSourceMappingRecord { + /// Unique identifier for the mapping. + pub uuid: String, + /// ARN of the event source (e.g., SQS queue, DynamoDB stream). + pub event_source_arn: String, + /// ARN of the target Lambda function. + pub function_arn: String, + /// Whether the mapping is enabled. + pub enabled: bool, + /// Maximum number of records per batch. + pub batch_size: i32, + /// Maximum batching window in seconds. + pub maximum_batching_window_in_seconds: i32, + /// Starting position for stream-based sources. + pub starting_position: Option, + /// Timestamp for `AT_TIMESTAMP` starting position. + pub starting_position_timestamp: Option, + /// Maximum age of a record in seconds before discarding. + pub maximum_record_age_in_seconds: Option, + /// Whether to split a batch on function error. + pub bisect_batch_on_function_error: Option, + /// Maximum number of retry attempts. + pub maximum_retry_attempts: Option, + /// Parallelization factor (1-10). + pub parallelization_factor: Option, + /// Function response types (e.g., `ReportBatchItemFailures`). + pub function_response_types: Vec, + /// State of the mapping (`Enabled` or `Disabled`). + pub state: String, + /// Reason for the current state transition. + pub state_transition_reason: String, + /// Last modified time as epoch seconds. + pub last_modified: i64, + /// Result of the last processing attempt. + pub last_processing_result: String, +} + +/// In-memory store for Lambda event source mappings. +#[derive(Debug)] +pub struct EventSourceMappingStore { + /// All mappings keyed by UUID. + mappings: DashMap, +} + +impl EventSourceMappingStore { + /// Create a new empty event source mapping store. + #[must_use] + pub fn new() -> Self { + Self { + mappings: DashMap::new(), + } + } + + /// Insert a new event source mapping record. + pub fn create(&self, record: EventSourceMappingRecord) { + self.mappings.insert(record.uuid.clone(), record); + } + + /// Get a clone of an event source mapping by UUID. + #[must_use] + pub fn get(&self, uuid: &str) -> Option { + self.mappings.get(uuid).map(|r| r.value().clone()) + } + + /// Update an event source mapping in place. + /// + /// # Errors + /// + /// Returns an error if the mapping does not exist. + pub fn update(&self, uuid: &str, f: F) -> Result + where + F: FnOnce(&mut EventSourceMappingRecord) -> R, + { + match self.mappings.get_mut(uuid) { + Some(mut entry) => Ok(f(entry.value_mut())), + None => Err(LambdaServiceError::EventSourceMappingNotFound { + uuid: uuid.to_owned(), + }), + } + } + + /// Delete an event source mapping by UUID. + /// + /// Returns the removed record, or `None` if it did not exist. + #[must_use] + pub fn delete(&self, uuid: &str) -> Option { + self.mappings.remove(uuid).map(|(_, v)| v) + } + + /// List all event source mappings, optionally filtering by function name and/or event source + /// ARN. + /// + /// Results are sorted by UUID for deterministic ordering. + #[must_use] + pub fn list( + &self, + function_name_filter: Option<&str>, + event_source_arn_filter: Option<&str>, + ) -> Vec { + let mut records: Vec = self + .mappings + .iter() + .filter(|entry| { + let record = entry.value(); + if let Some(fn_filter) = function_name_filter { + // Match against function ARN (contains function name or full ARN match). + if !record.function_arn.contains(fn_filter) { + return false; + } + } + if let Some(arn_filter) = event_source_arn_filter { + if record.event_source_arn != arn_filter { + return false; + } + } + true + }) + .map(|entry| entry.value().clone()) + .collect(); + records.sort_by(|a, b| a.uuid.cmp(&b.uuid)); + records + } +} + +impl Default for EventSourceMappingStore { + fn default() -> Self { + Self::new() + } +} + impl FunctionStore { /// Create a new function store with the given code storage directory. pub fn new(code_dir: impl Into) -> Self { @@ -363,6 +694,8 @@ mod tests { policy: PolicyDocument::default(), tags: HashMap::new(), url_config: None, + reserved_concurrent_executions: None, + event_invoke_configs: HashMap::new(), created_at: "2024-01-01T00:00:00.000+0000".to_owned(), } } @@ -455,6 +788,106 @@ mod tests { assert_eq!(decoded.unwrap().len(), 32); } + // ---- Layer store tests ---- + + #[test] + fn test_should_publish_and_get_layer_version() { + let store = LayerStore::new(); + let ver = LayerVersionRecord { + version: 0, + description: "test".to_owned(), + compatible_runtimes: vec!["python3.12".to_owned()], + compatible_architectures: Vec::new(), + license_info: None, + code_sha256: "abc".to_owned(), + code_size: 100, + created_date: "2024-01-01".to_owned(), + layer_arn: "arn:layer".to_owned(), + layer_version_arn: "arn:layer:1".to_owned(), + policy: PolicyDocument::default(), + }; + + let num = store.publish_version("my-layer", "arn:layer", ver); + assert_eq!(num, 1); + + let retrieved = store.get_version("my-layer", 1); + assert!(retrieved.is_some()); + assert_eq!(retrieved.unwrap().description, "test"); + } + + #[test] + fn test_should_list_layer_versions() { + let store = LayerStore::new(); + for i in 0..3 { + let ver = LayerVersionRecord { + version: 0, + description: format!("v{i}"), + compatible_runtimes: Vec::new(), + compatible_architectures: Vec::new(), + license_info: None, + code_sha256: "abc".to_owned(), + code_size: 100, + created_date: "2024-01-01".to_owned(), + layer_arn: "arn:layer".to_owned(), + layer_version_arn: format!("arn:layer:{}", i + 1), + policy: PolicyDocument::default(), + }; + let _ = store.publish_version("my-layer", "arn:layer", ver); + } + + let versions = store.list_versions("my-layer"); + assert_eq!(versions.len(), 3); + } + + #[test] + fn test_should_delete_layer_version_and_cleanup() { + let store = LayerStore::new(); + let ver = LayerVersionRecord { + version: 0, + description: String::new(), + compatible_runtimes: Vec::new(), + compatible_architectures: Vec::new(), + license_info: None, + code_sha256: "abc".to_owned(), + code_size: 0, + created_date: "2024-01-01".to_owned(), + layer_arn: "arn:layer".to_owned(), + layer_version_arn: "arn:layer:1".to_owned(), + policy: PolicyDocument::default(), + }; + let _ = store.publish_version("my-layer", "arn:layer", ver); + + assert!(store.delete_version("my-layer", 1)); + // Layer record should be removed since no versions remain. + assert!(store.get("my-layer").is_none()); + } + + #[test] + fn test_should_list_layers_sorted() { + let store = LayerStore::new(); + let make_ver = || LayerVersionRecord { + version: 0, + description: String::new(), + compatible_runtimes: Vec::new(), + compatible_architectures: Vec::new(), + license_info: None, + code_sha256: "abc".to_owned(), + code_size: 0, + created_date: "2024-01-01".to_owned(), + layer_arn: String::new(), + layer_version_arn: String::new(), + policy: PolicyDocument::default(), + }; + + let _ = store.publish_version("charlie", "arn:charlie", make_ver()); + let _ = store.publish_version("alpha", "arn:alpha", make_ver()); + let _ = store.publish_version("bravo", "arn:bravo", make_ver()); + + let layers = store.list_layers(); + let names: Vec<&str> = layers.iter().map(|l| l.name.as_str()).collect(); + assert_eq!(names, ["alpha", "bravo", "charlie"]); + } + #[tokio::test] async fn test_should_store_and_cleanup_zip_code() { let tmp = tempfile::tempdir().unwrap(); @@ -474,4 +907,138 @@ mod tests { store.cleanup_code("test-func").await; assert!(!dir.exists()); } + + // ---- Event source mapping store tests ---- + + fn sample_esm_record(uuid: &str, function_arn: &str) -> EventSourceMappingRecord { + EventSourceMappingRecord { + uuid: uuid.to_owned(), + event_source_arn: "arn:aws:sqs:us-east-1:000000000000:my-queue".to_owned(), + function_arn: function_arn.to_owned(), + enabled: true, + batch_size: 10, + maximum_batching_window_in_seconds: 0, + starting_position: None, + starting_position_timestamp: None, + maximum_record_age_in_seconds: None, + bisect_batch_on_function_error: None, + maximum_retry_attempts: None, + parallelization_factor: None, + function_response_types: Vec::new(), + state: "Enabled".to_owned(), + state_transition_reason: "User action".to_owned(), + last_modified: 1_700_000_000, + last_processing_result: "No records processed".to_owned(), + } + } + + #[test] + fn test_should_create_and_get_esm() { + let store = EventSourceMappingStore::new(); + let record = sample_esm_record( + "uuid-1", + "arn:aws:lambda:us-east-1:000000000000:function:my-func", + ); + store.create(record); + + let retrieved = store.get("uuid-1"); + assert!(retrieved.is_some()); + assert_eq!(retrieved.as_ref().map(|r| r.uuid.as_str()), Some("uuid-1")); + assert_eq!(retrieved.as_ref().map(|r| r.batch_size), Some(10)); + } + + #[test] + fn test_should_return_none_for_missing_esm() { + let store = EventSourceMappingStore::new(); + assert!(store.get("no-such-uuid").is_none()); + } + + #[test] + fn test_should_update_esm() { + let store = EventSourceMappingStore::new(); + store.create(sample_esm_record("uuid-1", "arn:func")); + + store + .update("uuid-1", |record| { + record.batch_size = 50; + record.enabled = false; + "Disabled".clone_into(&mut record.state); + }) + .unwrap(); + + let retrieved = store.get("uuid-1").unwrap(); + assert_eq!(retrieved.batch_size, 50); + assert!(!retrieved.enabled); + assert_eq!(retrieved.state, "Disabled"); + } + + #[test] + fn test_should_error_on_update_nonexistent_esm() { + let store = EventSourceMappingStore::new(); + let err = store.update("no-such", |_| {}).unwrap_err(); + assert!(matches!( + err, + LambdaServiceError::EventSourceMappingNotFound { .. } + )); + } + + #[test] + fn test_should_delete_esm() { + let store = EventSourceMappingStore::new(); + store.create(sample_esm_record("uuid-1", "arn:func")); + + let removed = store.delete("uuid-1"); + assert!(removed.is_some()); + assert!(store.get("uuid-1").is_none()); + } + + #[test] + fn test_should_return_none_on_delete_nonexistent_esm() { + let store = EventSourceMappingStore::new(); + assert!(store.delete("no-such").is_none()); + } + + #[test] + fn test_should_list_esm_sorted_by_uuid() { + let store = EventSourceMappingStore::new(); + store.create(sample_esm_record("charlie", "arn:func-a")); + store.create(sample_esm_record("alpha", "arn:func-b")); + store.create(sample_esm_record("bravo", "arn:func-a")); + + let all = store.list(None, None); + let uuids: Vec<&str> = all.iter().map(|r| r.uuid.as_str()).collect(); + assert_eq!(uuids, ["alpha", "bravo", "charlie"]); + } + + #[test] + fn test_should_filter_esm_by_function_name() { + let store = EventSourceMappingStore::new(); + store.create(sample_esm_record( + "uuid-1", + "arn:aws:lambda:us-east-1:000000000000:function:func-a", + )); + store.create(sample_esm_record( + "uuid-2", + "arn:aws:lambda:us-east-1:000000000000:function:func-b", + )); + + let filtered = store.list(Some("func-a"), None); + assert_eq!(filtered.len(), 1); + assert_eq!(filtered[0].uuid, "uuid-1"); + } + + #[test] + fn test_should_filter_esm_by_event_source_arn() { + let store = EventSourceMappingStore::new(); + let mut record1 = sample_esm_record("uuid-1", "arn:func"); + record1.event_source_arn = "arn:aws:sqs:us-east-1:000000000000:queue-a".to_owned(); + let mut record2 = sample_esm_record("uuid-2", "arn:func"); + record2.event_source_arn = "arn:aws:sqs:us-east-1:000000000000:queue-b".to_owned(); + store.create(record1); + store.create(record2); + + let filtered = store.list(None, Some("arn:aws:sqs:us-east-1:000000000000:queue-a")); + assert_eq!(filtered.len(), 1); + assert_eq!(filtered[0].uuid, "uuid-1"); + } } diff --git a/crates/ruststack-lambda-http/src/body.rs b/crates/ruststack-lambda-http/src/body.rs index b89a4fe..fb61db4 100644 --- a/crates/ruststack-lambda-http/src/body.rs +++ b/crates/ruststack-lambda-http/src/body.rs @@ -1,7 +1,9 @@ //! Lambda HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-lambda-http/src/dispatch.rs b/crates/ruststack-lambda-http/src/dispatch.rs index 1d4b9c3..9a1133c 100644 --- a/crates/ruststack-lambda-http/src/dispatch.rs +++ b/crates/ruststack-lambda-http/src/dispatch.rs @@ -3,16 +3,12 @@ //! Uses `async_trait` because `LambdaHandler` requires object safety for //! dynamic dispatch (`Arc`). -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; +use ruststack_lambda_model::{error::LambdaError, operations::LambdaOperation}; -use ruststack_lambda_model::error::LambdaError; -use ruststack_lambda_model::operations::LambdaOperation; - -use crate::body::LambdaResponseBody; -use crate::router::PathParams; +use crate::{body::LambdaResponseBody, router::PathParams}; /// The boundary between HTTP and business logic for Lambda. /// diff --git a/crates/ruststack-lambda-http/src/response.rs b/crates/ruststack-lambda-http/src/response.rs index f420116..7737cd6 100644 --- a/crates/ruststack-lambda-http/src/response.rs +++ b/crates/ruststack-lambda-http/src/response.rs @@ -6,9 +6,8 @@ //! - Content type: `application/json` use bytes::Bytes; -use serde::Serialize; - use ruststack_lambda_model::error::LambdaError; +use serde::Serialize; /// Content type for Lambda JSON responses. pub const CONTENT_TYPE: &str = "application/json"; @@ -82,9 +81,10 @@ pub fn empty_response(status: u16) -> Result, LambdaError> #[cfg(test)] mod tests { - use super::*; use ruststack_lambda_model::error::LambdaErrorCode; + use super::*; + #[test] fn test_should_format_error_with_x_amzn_errortype_header() { let err = LambdaError::resource_not_found("Function not found: my-func"); diff --git a/crates/ruststack-lambda-http/src/router.rs b/crates/ruststack-lambda-http/src/router.rs index d8c8194..2a81786 100644 --- a/crates/ruststack-lambda-http/src/router.rs +++ b/crates/ruststack-lambda-http/src/router.rs @@ -10,9 +10,10 @@ //! ``` use http::Method; - -use ruststack_lambda_model::error::LambdaError; -use ruststack_lambda_model::operations::{LAMBDA_ROUTES, LambdaOperation}; +use ruststack_lambda_model::{ + error::LambdaError, + operations::{LAMBDA_ROUTES, LambdaOperation}, +}; /// Extracted path parameters from a matched route. /// @@ -119,28 +120,31 @@ fn normalize_date_prefix(path: &str) -> Option { } // Map resource prefix to canonical date. + // The AWS SDK may use different date versions in the path prefix. We + // normalize everything to the date used in LAMBDA_ROUTES so that the + // route matcher can find the operation. let canonical_date = if rest.starts_with("functions") { - // Function URLs use 2021-10-31, function CRUD uses 2015-03-31. - // Detect URL-related paths: /functions/{name}/url or /functions/{name}/urls + // Function URLs use 2021-10-31, everything else uses 2015-03-31. let is_url_path = rest.contains("/url") || rest.ends_with("/urls"); - let target_date = if is_url_path { + if is_url_path { "2021-10-31" } else { "2015-03-31" - }; - if date_part == target_date { - return None; // Already canonical. } - target_date } else if rest.starts_with("tags") || rest == "account-settings" { - if date_part == "2015-03-31" { - return None; // Already canonical. - } + "2015-03-31" + } else if rest.starts_with("layers") { + "2018-10-31" + } else if rest.starts_with("event-source-mappings") { "2015-03-31" } else { return None; }; + if date_part == canonical_date { + return None; // Already canonical. + } + Some(format!("/{canonical_date}/{rest}")) } @@ -253,7 +257,8 @@ mod tests { #[test] fn test_should_percent_decode_path_param() { let params = match_path( - "/2015-03-31/functions/arn%3Aaws%3Alambda%3Aus-east-1%3A123456789012%3Afunction%3Amy-func", + "/2015-03-31/functions/arn%3Aaws%3Alambda%3Aus-east-1%3A123456789012%3Afunction%\ + 3Amy-func", "/2015-03-31/functions/{FunctionName}", ) .expect("should match"); diff --git a/crates/ruststack-lambda-http/src/service.rs b/crates/ruststack-lambda-http/src/service.rs index 14b3209..645e597 100644 --- a/crates/ruststack-lambda-http/src/service.rs +++ b/crates/ruststack-lambda-http/src/service.rs @@ -1,20 +1,18 @@ //! Lambda HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_lambda_model::error::LambdaError; -use crate::body::LambdaResponseBody; -use crate::dispatch::{LambdaHandler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::LambdaResponseBody, + dispatch::{LambdaHandler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the Lambda HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-lambda-model/src/error.rs b/crates/ruststack-lambda-model/src/error.rs index 336681e..0076828 100644 --- a/crates/ruststack-lambda-model/src/error.rs +++ b/crates/ruststack-lambda-model/src/error.rs @@ -1,7 +1,6 @@ //! Lambda error types. -use std::error::Error; -use std::fmt; +use std::{error::Error, fmt}; /// Lambda error codes matching AWS Lambda API error types. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/crates/ruststack-lambda-model/src/input.rs b/crates/ruststack-lambda-model/src/input.rs index 80f6f63..dbf6ff1 100644 --- a/crates/ruststack-lambda-model/src/input.rs +++ b/crates/ruststack-lambda-model/src/input.rs @@ -5,8 +5,9 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use crate::types::{ - AliasRoutingConfiguration, Cors, DeadLetterConfig, Environment, EphemeralStorage, FunctionCode, - ImageConfig, LoggingConfig, SnapStart, TracingConfig, VpcConfig, + AliasRoutingConfiguration, Cors, DeadLetterConfig, DestinationConfig, Environment, + EphemeralStorage, FunctionCode, ImageConfig, LayerVersionContentInput, LoggingConfig, + SnapStart, TracingConfig, VpcConfig, }; /// Input for `CreateFunction`. @@ -274,3 +275,152 @@ pub struct UpdateFunctionUrlConfigInput { #[serde(skip_serializing_if = "Option::is_none")] pub invoke_mode: Option, } + +/// Input for `PublishLayerVersion`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct PublishLayerVersionInput { + /// Layer name. + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_name: Option, + /// Description. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// Layer code content. + #[serde(skip_serializing_if = "Option::is_none")] + pub content: Option, + /// Compatible runtimes. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_runtimes: Option>, + /// License info (max 512 characters). + #[serde(skip_serializing_if = "Option::is_none")] + pub license_info: Option, + /// Compatible architectures. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_architectures: Option>, +} + +/// Input for `CreateEventSourceMapping`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct CreateEventSourceMappingInput { + /// ARN of the event source (e.g., SQS queue, DynamoDB stream, Kinesis stream). + pub event_source_arn: String, + /// Function name or ARN. + pub function_name: String, + /// Whether the mapping is enabled. + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + /// Maximum number of records per batch. + #[serde(skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + /// Maximum batching window in seconds. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, + /// Starting position for stream-based sources. + #[serde(skip_serializing_if = "Option::is_none")] + pub starting_position: Option, + /// Timestamp for `AT_TIMESTAMP` starting position. + #[serde(skip_serializing_if = "Option::is_none")] + pub starting_position_timestamp: Option, + /// Maximum age of a record in seconds before discarding. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_record_age_in_seconds: Option, + /// Whether to split a batch on function error. + #[serde(skip_serializing_if = "Option::is_none")] + pub bisect_batch_on_function_error: Option, + /// Maximum number of retry attempts. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_retry_attempts: Option, + /// Parallelization factor (1-10). + #[serde(skip_serializing_if = "Option::is_none")] + pub parallelization_factor: Option, + /// Function response types (e.g., `ReportBatchItemFailures`). + #[serde(skip_serializing_if = "Option::is_none")] + pub function_response_types: Option>, +} + +/// Input for `UpdateEventSourceMapping`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct UpdateEventSourceMappingInput { + /// Function name or ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub function_name: Option, + /// Whether the mapping is enabled. + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + /// Maximum number of records per batch. + #[serde(skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + /// Maximum batching window in seconds. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, + /// Maximum age of a record in seconds before discarding. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_record_age_in_seconds: Option, + /// Whether to split a batch on function error. + #[serde(skip_serializing_if = "Option::is_none")] + pub bisect_batch_on_function_error: Option, + /// Maximum number of retry attempts. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_retry_attempts: Option, + /// Parallelization factor (1-10). + #[serde(skip_serializing_if = "Option::is_none")] + pub parallelization_factor: Option, + /// Function response types (e.g., `ReportBatchItemFailures`). + #[serde(skip_serializing_if = "Option::is_none")] + pub function_response_types: Option>, +} + +/// Input for `AddLayerVersionPermission`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct AddLayerVersionPermissionInput { + /// Statement ID. + #[serde(skip_serializing_if = "Option::is_none")] + pub statement_id: Option, + /// Action (e.g., `lambda:GetLayerVersion`). + #[serde(skip_serializing_if = "Option::is_none")] + pub action: Option, + /// Principal. + #[serde(skip_serializing_if = "Option::is_none")] + pub principal: Option, + /// Organization ID. + #[serde(skip_serializing_if = "Option::is_none")] + pub organization_id: Option, + /// Revision ID for optimistic concurrency. + #[serde(skip_serializing_if = "Option::is_none")] + pub revision_id: Option, +} + +// --------------------------------------------------------------------------- +// Concurrency +// --------------------------------------------------------------------------- + +/// Input for `PutFunctionConcurrency`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct PutFunctionConcurrencyInput { + /// The number of reserved concurrent executions. + pub reserved_concurrent_executions: i32, +} + +// --------------------------------------------------------------------------- +// Event Invoke Config +// --------------------------------------------------------------------------- + +/// Input for `PutFunctionEventInvokeConfig` and `UpdateFunctionEventInvokeConfig`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct EventInvokeConfigInput { + /// Maximum retry attempts (0-2). + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_retry_attempts: Option, + /// Maximum event age in seconds (60-21600). + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_event_age_in_seconds: Option, + /// Destination configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_config: Option, +} diff --git a/crates/ruststack-lambda-model/src/operations.rs b/crates/ruststack-lambda-model/src/operations.rs index 233556f..09bb6db 100644 --- a/crates/ruststack-lambda-model/src/operations.rs +++ b/crates/ruststack-lambda-model/src/operations.rs @@ -56,6 +56,26 @@ pub enum LambdaOperation { /// Get account settings. GetAccountSettings, + // Phase 2b: Lambda Layers + /// Publish a new layer version. + PublishLayerVersion, + /// Get a layer version by layer name and version number. + GetLayerVersion, + /// Get a layer version by its ARN. + GetLayerVersionByArn, + /// List versions of a layer. + ListLayerVersions, + /// List all layers. + ListLayers, + /// Delete a layer version. + DeleteLayerVersion, + /// Add a permission to a layer version's resource policy. + AddLayerVersionPermission, + /// Get the resource policy for a layer version. + GetLayerVersionPolicy, + /// Remove a permission from a layer version's resource policy. + RemoveLayerVersionPermission, + // Phase 3: Function URLs /// Create a function URL config. CreateFunctionUrlConfig, @@ -67,6 +87,38 @@ pub enum LambdaOperation { DeleteFunctionUrlConfig, /// List function URL configs. ListFunctionUrlConfigs, + + // Phase 3: Event Source Mappings + /// Create an event source mapping. + CreateEventSourceMapping, + /// Get an event source mapping by UUID. + GetEventSourceMapping, + /// Update an event source mapping. + UpdateEventSourceMapping, + /// Delete an event source mapping. + DeleteEventSourceMapping, + /// List event source mappings. + ListEventSourceMappings, + + // Phase 6: Concurrency + /// Set reserved concurrency for a function. + PutFunctionConcurrency, + /// Get reserved concurrency for a function. + GetFunctionConcurrency, + /// Delete reserved concurrency for a function. + DeleteFunctionConcurrency, + + // Phase 6: Event Invoke Config + /// Create an event invoke config for a function. + PutFunctionEventInvokeConfig, + /// Get an event invoke config for a function. + GetFunctionEventInvokeConfig, + /// Update an event invoke config for a function. + UpdateFunctionEventInvokeConfig, + /// Delete an event invoke config for a function. + DeleteFunctionEventInvokeConfig, + /// List event invoke configs for a function. + ListFunctionEventInvokeConfigs, } impl LambdaOperation { @@ -96,11 +148,33 @@ impl LambdaOperation { Self::UntagResource => "UntagResource", Self::ListTags => "ListTags", Self::GetAccountSettings => "GetAccountSettings", + Self::PublishLayerVersion => "PublishLayerVersion", + Self::GetLayerVersion => "GetLayerVersion", + Self::GetLayerVersionByArn => "GetLayerVersionByArn", + Self::ListLayerVersions => "ListLayerVersions", + Self::ListLayers => "ListLayers", + Self::DeleteLayerVersion => "DeleteLayerVersion", + Self::AddLayerVersionPermission => "AddLayerVersionPermission", + Self::GetLayerVersionPolicy => "GetLayerVersionPolicy", + Self::RemoveLayerVersionPermission => "RemoveLayerVersionPermission", Self::CreateFunctionUrlConfig => "CreateFunctionUrlConfig", Self::GetFunctionUrlConfig => "GetFunctionUrlConfig", Self::UpdateFunctionUrlConfig => "UpdateFunctionUrlConfig", Self::DeleteFunctionUrlConfig => "DeleteFunctionUrlConfig", Self::ListFunctionUrlConfigs => "ListFunctionUrlConfigs", + Self::CreateEventSourceMapping => "CreateEventSourceMapping", + Self::GetEventSourceMapping => "GetEventSourceMapping", + Self::UpdateEventSourceMapping => "UpdateEventSourceMapping", + Self::DeleteEventSourceMapping => "DeleteEventSourceMapping", + Self::ListEventSourceMappings => "ListEventSourceMappings", + Self::PutFunctionConcurrency => "PutFunctionConcurrency", + Self::GetFunctionConcurrency => "GetFunctionConcurrency", + Self::DeleteFunctionConcurrency => "DeleteFunctionConcurrency", + Self::PutFunctionEventInvokeConfig => "PutFunctionEventInvokeConfig", + Self::GetFunctionEventInvokeConfig => "GetFunctionEventInvokeConfig", + Self::UpdateFunctionEventInvokeConfig => "UpdateFunctionEventInvokeConfig", + Self::DeleteFunctionEventInvokeConfig => "DeleteFunctionEventInvokeConfig", + Self::ListFunctionEventInvokeConfigs => "ListFunctionEventInvokeConfigs", } } } @@ -248,6 +322,67 @@ pub const LAMBDA_ROUTES: &[LambdaRoute] = &[ operation: LambdaOperation::ListFunctions, success_status: 200, }, + // --- /2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId} --- + LambdaRoute { + method: http::Method::DELETE, + path_pattern: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/\ + {StatementId}", + operation: LambdaOperation::RemoveLayerVersionPermission, + success_status: 204, + }, + // --- /2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy --- + LambdaRoute { + method: http::Method::POST, + path_pattern: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", + operation: LambdaOperation::AddLayerVersionPermission, + success_status: 201, + }, + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", + operation: LambdaOperation::GetLayerVersionPolicy, + success_status: 200, + }, + // --- /2018-10-31/layers/{LayerName}/versions/{VersionNumber} --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", + operation: LambdaOperation::GetLayerVersion, + success_status: 200, + }, + LambdaRoute { + method: http::Method::DELETE, + path_pattern: "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", + operation: LambdaOperation::DeleteLayerVersion, + success_status: 204, + }, + // --- /2018-10-31/layers/{LayerName}/versions --- + LambdaRoute { + method: http::Method::POST, + path_pattern: "/2018-10-31/layers/{LayerName}/versions", + operation: LambdaOperation::PublishLayerVersion, + success_status: 201, + }, + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2018-10-31/layers/{LayerName}/versions", + operation: LambdaOperation::ListLayerVersions, + success_status: 200, + }, + // --- /2018-10-31/layers --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2018-10-31/layers", + operation: LambdaOperation::ListLayers, + success_status: 200, + }, + // --- /2021-10-31/layers/{LayerName}/versions/{VersionNumber} (GetLayerVersionByArn) --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2021-10-31/layers/{LayerName}/versions/{VersionNumber}", + operation: LambdaOperation::GetLayerVersionByArn, + success_status: 200, + }, // --- /2021-10-31/functions/{name}/url --- LambdaRoute { method: http::Method::POST, @@ -280,6 +415,90 @@ pub const LAMBDA_ROUTES: &[LambdaRoute] = &[ operation: LambdaOperation::ListFunctionUrlConfigs, success_status: 200, }, + // --- /functions/{name}/event-invoke-config/list --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2015-03-31/functions/{FunctionName}/event-invoke-config/list", + operation: LambdaOperation::ListFunctionEventInvokeConfigs, + success_status: 200, + }, + // --- /functions/{name}/event-invoke-config --- + LambdaRoute { + method: http::Method::PUT, + path_pattern: "/2015-03-31/functions/{FunctionName}/event-invoke-config", + operation: LambdaOperation::PutFunctionEventInvokeConfig, + success_status: 200, + }, + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2015-03-31/functions/{FunctionName}/event-invoke-config", + operation: LambdaOperation::GetFunctionEventInvokeConfig, + success_status: 200, + }, + LambdaRoute { + method: http::Method::POST, + path_pattern: "/2015-03-31/functions/{FunctionName}/event-invoke-config", + operation: LambdaOperation::UpdateFunctionEventInvokeConfig, + success_status: 200, + }, + LambdaRoute { + method: http::Method::DELETE, + path_pattern: "/2015-03-31/functions/{FunctionName}/event-invoke-config", + operation: LambdaOperation::DeleteFunctionEventInvokeConfig, + success_status: 204, + }, + // --- /functions/{name}/concurrency (GET) --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2015-03-31/functions/{FunctionName}/concurrency", + operation: LambdaOperation::GetFunctionConcurrency, + success_status: 200, + }, + // --- /functions/{name}/concurrency (PUT/DELETE) --- + LambdaRoute { + method: http::Method::PUT, + path_pattern: "/2015-03-31/functions/{FunctionName}/concurrency", + operation: LambdaOperation::PutFunctionConcurrency, + success_status: 200, + }, + LambdaRoute { + method: http::Method::DELETE, + path_pattern: "/2015-03-31/functions/{FunctionName}/concurrency", + operation: LambdaOperation::DeleteFunctionConcurrency, + success_status: 204, + }, + // --- /2015-03-31/event-source-mappings/{UUID} --- + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2015-03-31/event-source-mappings/{UUID}", + operation: LambdaOperation::GetEventSourceMapping, + success_status: 200, + }, + LambdaRoute { + method: http::Method::PUT, + path_pattern: "/2015-03-31/event-source-mappings/{UUID}", + operation: LambdaOperation::UpdateEventSourceMapping, + success_status: 202, + }, + LambdaRoute { + method: http::Method::DELETE, + path_pattern: "/2015-03-31/event-source-mappings/{UUID}", + operation: LambdaOperation::DeleteEventSourceMapping, + success_status: 202, + }, + // --- /2015-03-31/event-source-mappings/ --- + LambdaRoute { + method: http::Method::POST, + path_pattern: "/2015-03-31/event-source-mappings/", + operation: LambdaOperation::CreateEventSourceMapping, + success_status: 202, + }, + LambdaRoute { + method: http::Method::GET, + path_pattern: "/2015-03-31/event-source-mappings/", + operation: LambdaOperation::ListEventSourceMappings, + success_status: 200, + }, // --- /2015-03-31/tags/{arn} --- LambdaRoute { method: http::Method::POST, diff --git a/crates/ruststack-lambda-model/src/output.rs b/crates/ruststack-lambda-model/src/output.rs index d5131dd..18d2093 100644 --- a/crates/ruststack-lambda-model/src/output.rs +++ b/crates/ruststack-lambda-model/src/output.rs @@ -5,7 +5,9 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use crate::types::{ - AliasConfiguration, FunctionCodeLocation, FunctionConfiguration, FunctionUrlConfig, + AliasConfiguration, EventSourceMappingConfiguration, FunctionCodeLocation, + FunctionConfiguration, FunctionEventInvokeConfig, FunctionUrlConfig, LayerVersionContentOutput, + LayerVersionsListItem, LayersListItem, }; /// Output for `GetFunction`. @@ -145,3 +147,108 @@ pub struct ListFunctionUrlConfigsOutput { #[serde(skip_serializing_if = "Option::is_none")] pub next_marker: Option, } + +/// Output for `PublishLayerVersion` and `GetLayerVersion`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct PublishLayerVersionOutput { + /// Layer version content metadata. + #[serde(skip_serializing_if = "Option::is_none")] + pub content: Option, + /// Layer ARN (without version). + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_arn: Option, + /// Layer version ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_version_arn: Option, + /// Description. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// ISO 8601 creation date. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_date: Option, + /// Version number. + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + /// Compatible runtimes. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_runtimes: Option>, + /// License info. + #[serde(skip_serializing_if = "Option::is_none")] + pub license_info: Option, + /// Compatible architectures. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_architectures: Option>, +} + +/// Output for `ListLayerVersions`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListLayerVersionsOutput { + /// Layer versions. + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_versions: Option>, + /// Next pagination marker. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, +} + +/// Output for `ListLayers`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListLayersOutput { + /// Layers. + #[serde(skip_serializing_if = "Option::is_none")] + pub layers: Option>, + /// Next pagination marker. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, +} + +/// Output for `GetLayerVersionPolicy`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct GetLayerVersionPolicyOutput { + /// JSON policy document. + #[serde(skip_serializing_if = "Option::is_none")] + pub policy: Option, + /// Revision ID. + #[serde(skip_serializing_if = "Option::is_none")] + pub revision_id: Option, +} + +/// Output for `ListEventSourceMappings`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListEventSourceMappingsOutput { + /// List of event source mapping configurations. + #[serde(skip_serializing_if = "Option::is_none")] + pub event_source_mappings: Option>, + /// Next pagination marker. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, +} + +/// Output for `AddLayerVersionPermission`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct AddLayerVersionPermissionOutput { + /// JSON policy statement. + #[serde(skip_serializing_if = "Option::is_none")] + pub statement: Option, + /// Revision ID. + #[serde(skip_serializing_if = "Option::is_none")] + pub revision_id: Option, +} + +/// Output for `ListFunctionEventInvokeConfigs`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ListFunctionEventInvokeConfigsOutput { + /// List of event invoke configs. + #[serde(skip_serializing_if = "Option::is_none")] + pub function_event_invoke_configs: Option>, + /// Next pagination marker. + #[serde(skip_serializing_if = "Option::is_none")] + pub next_marker: Option, +} diff --git a/crates/ruststack-lambda-model/src/types.rs b/crates/ruststack-lambda-model/src/types.rs index 8ac3e15..2f35520 100644 --- a/crates/ruststack-lambda-model/src/types.rs +++ b/crates/ruststack-lambda-model/src/types.rs @@ -464,6 +464,90 @@ pub struct Layer { pub signing_job_arn: Option, } +/// Layer version code input for `PublishLayerVersion`. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct LayerVersionContentInput { + /// S3 bucket containing the layer code. + #[serde(rename = "S3Bucket")] + #[serde(skip_serializing_if = "Option::is_none")] + pub s3_bucket: Option, + /// S3 key for the layer code. + #[serde(rename = "S3Key")] + #[serde(skip_serializing_if = "Option::is_none")] + pub s3_key: Option, + /// S3 object version. + #[serde(rename = "S3ObjectVersion")] + #[serde(skip_serializing_if = "Option::is_none")] + pub s3_object_version: Option, + /// Base64-encoded zip file contents. + #[serde(skip_serializing_if = "Option::is_none")] + pub zip_file: Option, +} + +/// Layer version code output returned in layer version responses. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct LayerVersionContentOutput { + /// Pre-signed URL to download the layer code. + #[serde(skip_serializing_if = "Option::is_none")] + pub location: Option, + /// SHA-256 hash of the layer code. + #[serde(skip_serializing_if = "Option::is_none")] + pub code_sha256: Option, + /// Code size in bytes. + #[serde(skip_serializing_if = "Option::is_none")] + pub code_size: Option, + /// Signing profile version ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub signing_profile_version_arn: Option, + /// Signing job ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub signing_job_arn: Option, +} + +/// Summary of a layer version in list responses. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct LayerVersionsListItem { + /// Layer version ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_version_arn: Option, + /// Version number. + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + /// Description. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// ISO 8601 creation date. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_date: Option, + /// Compatible runtimes. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_runtimes: Option>, + /// License info. + #[serde(skip_serializing_if = "Option::is_none")] + pub license_info: Option, + /// Compatible architectures. + #[serde(skip_serializing_if = "Option::is_none")] + pub compatible_architectures: Option>, +} + +/// Summary of a layer in list responses. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct LayersListItem { + /// Layer name. + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_name: Option, + /// Layer ARN (without version). + #[serde(skip_serializing_if = "Option::is_none")] + pub layer_arn: Option, + /// Latest matching version summary. + #[serde(skip_serializing_if = "Option::is_none")] + pub latest_matching_version: Option, +} + /// Function configuration returned by many operations. #[derive(Debug, Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] @@ -643,3 +727,129 @@ pub struct FunctionUrlConfig { #[serde(skip_serializing_if = "Option::is_none")] pub invoke_mode: Option, } + +/// Configuration of an event source mapping. +/// +/// This is the response type shared across all event source mapping operations +/// (Create, Get, Update, Delete, List). +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct EventSourceMappingConfiguration { + /// The event source mapping UUID. + #[serde(rename = "UUID", skip_serializing_if = "Option::is_none")] + pub uuid: Option, + /// ARN of the event source. + #[serde(skip_serializing_if = "Option::is_none")] + pub event_source_arn: Option, + /// Function ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub function_arn: Option, + /// State of the mapping (Creating, Enabled, Disabled, Enabling, Disabling, Updating, + /// Deleting). + #[serde(skip_serializing_if = "Option::is_none")] + pub state: Option, + /// Reason for the current state transition. + #[serde(skip_serializing_if = "Option::is_none")] + pub state_transition_reason: Option, + /// Last modified timestamp (epoch seconds). + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified: Option, + /// Result of the last processing attempt. + #[serde(skip_serializing_if = "Option::is_none")] + pub last_processing_result: Option, + /// Maximum number of records per batch. + #[serde(skip_serializing_if = "Option::is_none")] + pub batch_size: Option, + /// Maximum batching window in seconds. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_batching_window_in_seconds: Option, + /// Starting position for stream-based sources. + #[serde(skip_serializing_if = "Option::is_none")] + pub starting_position: Option, + /// Timestamp for `AT_TIMESTAMP` starting position. + #[serde(skip_serializing_if = "Option::is_none")] + pub starting_position_timestamp: Option, + /// Maximum age of a record in seconds before discarding. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_record_age_in_seconds: Option, + /// Whether to split a batch on function error. + #[serde(skip_serializing_if = "Option::is_none")] + pub bisect_batch_on_function_error: Option, + /// Maximum number of retry attempts. + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_retry_attempts: Option, + /// Parallelization factor (1-10). + #[serde(skip_serializing_if = "Option::is_none")] + pub parallelization_factor: Option, + /// Function response types (e.g., `ReportBatchItemFailures`). + #[serde(skip_serializing_if = "Option::is_none")] + pub function_response_types: Option>, +} + +// --------------------------------------------------------------------------- +// Concurrency +// --------------------------------------------------------------------------- + +/// Concurrency configuration for a Lambda function. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct Concurrency { + /// The number of reserved concurrent executions. + #[serde(skip_serializing_if = "Option::is_none")] + pub reserved_concurrent_executions: Option, +} + +// --------------------------------------------------------------------------- +// Event Invoke Config +// --------------------------------------------------------------------------- + +/// Destination configuration for asynchronous invocations. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct DestinationConfig { + /// Destination for successful invocations. + #[serde(skip_serializing_if = "Option::is_none")] + pub on_success: Option, + /// Destination for failed invocations. + #[serde(skip_serializing_if = "Option::is_none")] + pub on_failure: Option, +} + +/// Destination for successful asynchronous invocations. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct OnSuccess { + /// The ARN of the destination resource. + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, +} + +/// Destination for failed asynchronous invocations. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct OnFailure { + /// The ARN of the destination resource. + #[serde(skip_serializing_if = "Option::is_none")] + pub destination: Option, +} + +/// Event invoke configuration for a function. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct FunctionEventInvokeConfig { + /// The function ARN. + #[serde(skip_serializing_if = "Option::is_none")] + pub function_arn: Option, + /// Maximum retry attempts (0-2). + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_retry_attempts: Option, + /// Maximum event age in seconds (60-21600). + #[serde(skip_serializing_if = "Option::is_none")] + pub maximum_event_age_in_seconds: Option, + /// Last modified timestamp as epoch millis (float). + #[serde(skip_serializing_if = "Option::is_none")] + pub last_modified: Option, + /// Destination configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_config: Option, +} diff --git a/crates/ruststack-logs-core/src/handler.rs b/crates/ruststack-logs-core/src/handler.rs index 1c05b93..025b173 100644 --- a/crates/ruststack-logs-core/src/handler.rs +++ b/crates/ruststack-logs-core/src/handler.rs @@ -1,16 +1,10 @@ //! CloudWatch Logs handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_logs_http::body::LogsResponseBody; -use ruststack_logs_http::dispatch::LogsHandler; -use ruststack_logs_http::response::json_response; -use ruststack_logs_model::error::LogsError; -use ruststack_logs_model::operations::LogsOperation; +use ruststack_logs_http::{body::LogsResponseBody, dispatch::LogsHandler, response::json_response}; +use ruststack_logs_model::{error::LogsError, operations::LogsOperation}; use crate::provider::RustStackLogs; diff --git a/crates/ruststack-logs-core/src/provider.rs b/crates/ruststack-logs-core/src/provider.rs index c250645..2b3908b 100644 --- a/crates/ruststack-logs-core/src/provider.rs +++ b/crates/ruststack-logs-core/src/provider.rs @@ -5,36 +5,37 @@ use std::collections::HashMap; -use dashmap::DashMap; -use dashmap::mapref::entry::Entry; - -use ruststack_logs_model::error::{LogsError, LogsErrorCode}; -use ruststack_logs_model::input::{ - AssociateKmsKeyInput, CreateLogGroupInput, CreateLogStreamInput, DeleteDestinationInput, - DeleteLogGroupInput, DeleteLogStreamInput, DeleteMetricFilterInput, DeleteQueryDefinitionInput, - DeleteResourcePolicyInput, DeleteRetentionPolicyInput, DeleteSubscriptionFilterInput, - DescribeDestinationsInput, DescribeLogGroupsInput, DescribeLogStreamsInput, - DescribeMetricFiltersInput, DescribeQueriesInput, DescribeQueryDefinitionsInput, - DescribeResourcePoliciesInput, DescribeSubscriptionFiltersInput, DisassociateKmsKeyInput, - FilterLogEventsInput, GetLogEventsInput, GetQueryResultsInput, ListTagsForResourceInput, - ListTagsLogGroupInput, PutDestinationInput, PutDestinationPolicyInput, PutLogEventsInput, - PutMetricFilterInput, PutQueryDefinitionInput, PutResourcePolicyInput, PutRetentionPolicyInput, - PutSubscriptionFilterInput, StartQueryInput, StopQueryInput, TagLogGroupInput, - TagResourceInput, TestMetricFilterInput, UntagLogGroupInput, UntagResourceInput, -}; -use ruststack_logs_model::output::{ - DescribeDestinationsResponse, DescribeLogGroupsResponse, DescribeLogStreamsResponse, - DescribeMetricFiltersResponse, DescribeQueriesResponse, DescribeQueryDefinitionsResponse, - DescribeResourcePoliciesResponse, DescribeSubscriptionFiltersResponse, FilterLogEventsResponse, - GetLogEventsResponse, GetQueryResultsResponse, ListTagsForResourceResponse, - ListTagsLogGroupResponse, PutDestinationResponse, PutLogEventsResponse, - PutQueryDefinitionResponse, PutResourcePolicyResponse, StartQueryResponse, StopQueryResponse, - TestMetricFilterResponse, -}; -use ruststack_logs_model::types::{ - Destination, FilteredLogEvent, LogGroup, LogStream, MetricFilter, MetricFilterMatchRecord, - OutputLogEvent, QueryDefinition, QueryStatistics, QueryStatus, ResourcePolicy, - SearchedLogStream, SubscriptionFilter, +use dashmap::{DashMap, mapref::entry::Entry}; +use ruststack_logs_model::{ + error::{LogsError, LogsErrorCode}, + input::{ + AssociateKmsKeyInput, CreateLogGroupInput, CreateLogStreamInput, DeleteDestinationInput, + DeleteLogGroupInput, DeleteLogStreamInput, DeleteMetricFilterInput, + DeleteQueryDefinitionInput, DeleteResourcePolicyInput, DeleteRetentionPolicyInput, + DeleteSubscriptionFilterInput, DescribeDestinationsInput, DescribeLogGroupsInput, + DescribeLogStreamsInput, DescribeMetricFiltersInput, DescribeQueriesInput, + DescribeQueryDefinitionsInput, DescribeResourcePoliciesInput, + DescribeSubscriptionFiltersInput, DisassociateKmsKeyInput, FilterLogEventsInput, + GetLogEventsInput, GetQueryResultsInput, ListTagsForResourceInput, ListTagsLogGroupInput, + PutDestinationInput, PutDestinationPolicyInput, PutLogEventsInput, PutMetricFilterInput, + PutQueryDefinitionInput, PutResourcePolicyInput, PutRetentionPolicyInput, + PutSubscriptionFilterInput, StartQueryInput, StopQueryInput, TagLogGroupInput, + TagResourceInput, TestMetricFilterInput, UntagLogGroupInput, UntagResourceInput, + }, + output::{ + DescribeDestinationsResponse, DescribeLogGroupsResponse, DescribeLogStreamsResponse, + DescribeMetricFiltersResponse, DescribeQueriesResponse, DescribeQueryDefinitionsResponse, + DescribeResourcePoliciesResponse, DescribeSubscriptionFiltersResponse, + FilterLogEventsResponse, GetLogEventsResponse, GetQueryResultsResponse, + ListTagsForResourceResponse, ListTagsLogGroupResponse, PutDestinationResponse, + PutLogEventsResponse, PutQueryDefinitionResponse, PutResourcePolicyResponse, + StartQueryResponse, StopQueryResponse, TestMetricFilterResponse, + }, + types::{ + Destination, FilteredLogEvent, LogGroup, LogStream, MetricFilter, MetricFilterMatchRecord, + OutputLogEvent, QueryDefinition, QueryStatistics, QueryStatus, ResourcePolicy, + SearchedLogStream, SubscriptionFilter, + }, }; use crate::config::LogsConfig; @@ -536,7 +537,8 @@ impl RustStackLogs { return Err(LogsError::with_message( LogsErrorCode::InvalidParameterException, format!( - "Log events in a single PutLogEvents request cannot exceed {MAX_PUT_LOG_EVENTS}" + "Log events in a single PutLogEvents request cannot exceed \ + {MAX_PUT_LOG_EVENTS}" ), )); } diff --git a/crates/ruststack-logs-http/src/body.rs b/crates/ruststack-logs-http/src/body.rs index 49b57a6..8feb8bf 100644 --- a/crates/ruststack-logs-http/src/body.rs +++ b/crates/ruststack-logs-http/src/body.rs @@ -1,7 +1,9 @@ //! CloudWatch Logs HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-logs-http/src/dispatch.rs b/crates/ruststack-logs-http/src/dispatch.rs index 15036a3..4eb031b 100644 --- a/crates/ruststack-logs-http/src/dispatch.rs +++ b/crates/ruststack-logs-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! CloudWatch Logs handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_logs_model::error::LogsError; -use ruststack_logs_model::operations::LogsOperation; +use ruststack_logs_model::{error::LogsError, operations::LogsOperation}; use crate::body::LogsResponseBody; diff --git a/crates/ruststack-logs-http/src/response.rs b/crates/ruststack-logs-http/src/response.rs index 4fd8b94..0525e6f 100644 --- a/crates/ruststack-logs-http/src/response.rs +++ b/crates/ruststack-logs-http/src/response.rs @@ -71,9 +71,10 @@ pub fn json_response(json: Vec, request_id: &str) -> http::Response Owner { diff --git a/crates/ruststack-s3-core/src/ops/bucket_config.rs b/crates/ruststack-s3-core/src/ops/bucket_config.rs index bc03443..6cd9514 100644 --- a/crates/ruststack-s3-core/src/ops/bucket_config.rs +++ b/crates/ruststack-s3-core/src/ops/bucket_config.rs @@ -5,54 +5,60 @@ //! object lock, accelerate, request payment, website, ACL, and //! policy status operations. -use ruststack_s3_model::error::S3Error; -use ruststack_s3_model::input::{ - DeleteBucketCorsInput, DeleteBucketEncryptionInput, DeleteBucketLifecycleInput, - DeleteBucketOwnershipControlsInput, DeleteBucketPolicyInput, DeleteBucketTaggingInput, - DeleteBucketWebsiteInput, DeletePublicAccessBlockInput, GetBucketAccelerateConfigurationInput, - GetBucketAclInput, GetBucketCorsInput, GetBucketEncryptionInput, - GetBucketLifecycleConfigurationInput, GetBucketLoggingInput, - GetBucketNotificationConfigurationInput, GetBucketOwnershipControlsInput, GetBucketPolicyInput, - GetBucketPolicyStatusInput, GetBucketRequestPaymentInput, GetBucketTaggingInput, - GetBucketVersioningInput, GetBucketWebsiteInput, GetObjectLockConfigurationInput, - GetPublicAccessBlockInput, PutBucketAccelerateConfigurationInput, PutBucketAclInput, - PutBucketCorsInput, PutBucketEncryptionInput, PutBucketLifecycleConfigurationInput, - PutBucketLoggingInput, PutBucketNotificationConfigurationInput, - PutBucketOwnershipControlsInput, PutBucketPolicyInput, PutBucketRequestPaymentInput, - PutBucketTaggingInput, PutBucketVersioningInput, PutBucketWebsiteInput, - PutObjectLockConfigurationInput, PutPublicAccessBlockInput, -}; -use ruststack_s3_model::output::{ - GetBucketAccelerateConfigurationOutput, GetBucketAclOutput, GetBucketCorsOutput, - GetBucketEncryptionOutput, GetBucketLifecycleConfigurationOutput, GetBucketLoggingOutput, - GetBucketNotificationConfigurationOutput, GetBucketOwnershipControlsOutput, - GetBucketPolicyOutput, GetBucketPolicyStatusOutput, GetBucketRequestPaymentOutput, - GetBucketTaggingOutput, GetBucketVersioningOutput, GetBucketWebsiteOutput, - GetObjectLockConfigurationOutput, GetPublicAccessBlockOutput, - PutBucketLifecycleConfigurationOutput, PutObjectLockConfigurationOutput, -}; -use ruststack_s3_model::types::{ - BucketAccelerateStatus, BucketVersioningStatus, CORSRule, - DefaultRetention as ModelDefaultRetention, ErrorDocument, Grant, Grantee, IndexDocument, - ObjectLockConfiguration as ModelObjectLockConfiguration, ObjectLockEnabled, - ObjectLockRetentionMode, ObjectLockRule as ModelObjectLockRule, ObjectOwnership, - OwnershipControls, OwnershipControlsRule, Payer, Permission, PolicyStatus, Protocol, - PublicAccessBlockConfiguration, RedirectAllRequestsTo, ServerSideEncryption, - ServerSideEncryptionByDefault, ServerSideEncryptionConfiguration, ServerSideEncryptionRule, - Tag, +use ruststack_s3_model::{ + error::S3Error, + input::{ + DeleteBucketCorsInput, DeleteBucketEncryptionInput, DeleteBucketLifecycleInput, + DeleteBucketOwnershipControlsInput, DeleteBucketPolicyInput, DeleteBucketTaggingInput, + DeleteBucketWebsiteInput, DeletePublicAccessBlockInput, + GetBucketAccelerateConfigurationInput, GetBucketAclInput, GetBucketCorsInput, + GetBucketEncryptionInput, GetBucketLifecycleConfigurationInput, GetBucketLoggingInput, + GetBucketNotificationConfigurationInput, GetBucketOwnershipControlsInput, + GetBucketPolicyInput, GetBucketPolicyStatusInput, GetBucketRequestPaymentInput, + GetBucketTaggingInput, GetBucketVersioningInput, GetBucketWebsiteInput, + GetObjectLockConfigurationInput, GetPublicAccessBlockInput, + PutBucketAccelerateConfigurationInput, PutBucketAclInput, PutBucketCorsInput, + PutBucketEncryptionInput, PutBucketLifecycleConfigurationInput, PutBucketLoggingInput, + PutBucketNotificationConfigurationInput, PutBucketOwnershipControlsInput, + PutBucketPolicyInput, PutBucketRequestPaymentInput, PutBucketTaggingInput, + PutBucketVersioningInput, PutBucketWebsiteInput, PutObjectLockConfigurationInput, + PutPublicAccessBlockInput, + }, + output::{ + GetBucketAccelerateConfigurationOutput, GetBucketAclOutput, GetBucketCorsOutput, + GetBucketEncryptionOutput, GetBucketLifecycleConfigurationOutput, GetBucketLoggingOutput, + GetBucketNotificationConfigurationOutput, GetBucketOwnershipControlsOutput, + GetBucketPolicyOutput, GetBucketPolicyStatusOutput, GetBucketRequestPaymentOutput, + GetBucketTaggingOutput, GetBucketVersioningOutput, GetBucketWebsiteOutput, + GetObjectLockConfigurationOutput, GetPublicAccessBlockOutput, + PutBucketLifecycleConfigurationOutput, PutObjectLockConfigurationOutput, + }, + types::{ + BucketAccelerateStatus, BucketVersioningStatus, CORSRule, + DefaultRetention as ModelDefaultRetention, ErrorDocument, Grant, Grantee, IndexDocument, + ObjectLockConfiguration as ModelObjectLockConfiguration, ObjectLockEnabled, + ObjectLockRetentionMode, ObjectLockRule as ModelObjectLockRule, ObjectOwnership, + OwnershipControls, OwnershipControlsRule, Payer, Permission, PolicyStatus, Protocol, + PublicAccessBlockConfiguration, RedirectAllRequestsTo, ServerSideEncryption, + ServerSideEncryptionByDefault, ServerSideEncryptionConfiguration, ServerSideEncryptionRule, + Tag, + }, }; use tracing::debug; -use crate::cors::CorsRule; -use crate::error::S3ServiceError; -use crate::provider::RustStackS3; -use crate::state::bucket::{ - BucketEncryption, CorsRuleConfig, ObjectLockConfiguration, ObjectLockRule, - OwnershipControlsConfig, PublicAccessBlockConfig, VersioningStatus, WebsiteConfig, -}; -use crate::state::object::{CannedAcl, Owner as InternalOwner}; - use super::bucket::to_model_owner; +use crate::{ + cors::CorsRule, + error::S3ServiceError, + provider::RustStackS3, + state::{ + bucket::{ + BucketEncryption, CorsRuleConfig, ObjectLockConfiguration, ObjectLockRule, + OwnershipControlsConfig, PublicAccessBlockConfig, VersioningStatus, WebsiteConfig, + }, + object::{CannedAcl, Owner as InternalOwner}, + }, +}; // These handler methods must remain async for consistency. #[allow(clippy::unused_async)] diff --git a/crates/ruststack-s3-core/src/ops/list.rs b/crates/ruststack-s3-core/src/ops/list.rs index fa40733..a68094f 100644 --- a/crates/ruststack-s3-core/src/ops/list.rs +++ b/crates/ruststack-s3-core/src/ops/list.rs @@ -2,22 +2,23 @@ //! //! Implements `list_objects` (v1), `list_objects_v2`, and `list_object_versions`. -use ruststack_s3_model::error::S3Error; -use ruststack_s3_model::input::{ListObjectVersionsInput, ListObjectsInput, ListObjectsV2Input}; -use ruststack_s3_model::output::{ - ListObjectVersionsOutput, ListObjectsOutput, ListObjectsV2Output, -}; -use ruststack_s3_model::types::{ - CommonPrefix, DeleteMarkerEntry, Object, ObjectStorageClass, ObjectVersion, - ObjectVersionStorageClass, Owner, +use ruststack_s3_model::{ + error::S3Error, + input::{ListObjectVersionsInput, ListObjectsInput, ListObjectsV2Input}, + output::{ListObjectVersionsOutput, ListObjectsOutput, ListObjectsV2Output}, + types::{ + CommonPrefix, DeleteMarkerEntry, Object, ObjectStorageClass, ObjectVersion, + ObjectVersionStorageClass, Owner, + }, }; use tracing::debug; -use crate::error::S3ServiceError; -use crate::provider::RustStackS3; -use crate::state::keystore::VersionListEntry; -use crate::state::object::Owner as InternalOwner; -use crate::utils::{decode_continuation_token, encode_continuation_token}; +use crate::{ + error::S3ServiceError, + provider::RustStackS3, + state::{keystore::VersionListEntry, object::Owner as InternalOwner}, + utils::{decode_continuation_token, encode_continuation_token}, +}; /// Default maximum number of keys returned in a single listing response. const DEFAULT_MAX_KEYS: i32 = 1000; diff --git a/crates/ruststack-s3-core/src/ops/multipart.rs b/crates/ruststack-s3-core/src/ops/multipart.rs index f0738ca..424af38 100644 --- a/crates/ruststack-s3-core/src/ops/multipart.rs +++ b/crates/ruststack-s3-core/src/ops/multipart.rs @@ -7,30 +7,36 @@ use std::str::FromStr; use chrono::Utc; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; -use ruststack_s3_model::input::{ - AbortMultipartUploadInput, CompleteMultipartUploadInput, CreateMultipartUploadInput, - ListMultipartUploadsInput, ListPartsInput, UploadPartCopyInput, UploadPartInput, -}; -use ruststack_s3_model::output::{ - AbortMultipartUploadOutput, CompleteMultipartUploadOutput, CreateMultipartUploadOutput, - ListMultipartUploadsOutput, ListPartsOutput, UploadPartCopyOutput, UploadPartOutput, -}; -use ruststack_s3_model::types::{ - ChecksumAlgorithm, ChecksumType, CopyPartResult, Initiator, - MultipartUpload as ModelMultipartUpload, Part, StorageClass, +use ruststack_s3_model::{ + error::{S3Error, S3ErrorCode}, + input::{ + AbortMultipartUploadInput, CompleteMultipartUploadInput, CreateMultipartUploadInput, + ListMultipartUploadsInput, ListPartsInput, UploadPartCopyInput, UploadPartInput, + }, + output::{ + AbortMultipartUploadOutput, CompleteMultipartUploadOutput, CreateMultipartUploadOutput, + ListMultipartUploadsOutput, ListPartsOutput, UploadPartCopyOutput, UploadPartOutput, + }, + types::{ + ChecksumAlgorithm, ChecksumType, CopyPartResult, Initiator, + MultipartUpload as ModelMultipartUpload, Part, StorageClass, + }, }; use tracing::debug; -use crate::checksums::{ - ChecksumAlgorithm as CoreChecksumAlgorithm, compute_checksum, compute_composite_checksum, +use crate::{ + checksums::{ + ChecksumAlgorithm as CoreChecksumAlgorithm, compute_checksum, compute_composite_checksum, + }, + error::S3ServiceError, + provider::RustStackS3, + state::{ + multipart::{MultipartUpload, UploadPart}, + object::{ChecksumData, ObjectMetadata, Owner as InternalOwner, S3Object}, + }, + utils::{generate_upload_id, parse_copy_source}, + validation::{validate_content_md5, validate_object_key}, }; -use crate::error::S3ServiceError; -use crate::provider::RustStackS3; -use crate::state::multipart::{MultipartUpload, UploadPart}; -use crate::state::object::{ChecksumData, ObjectMetadata, Owner as InternalOwner, S3Object}; -use crate::utils::{generate_upload_id, parse_copy_source}; -use crate::validation::{validate_content_md5, validate_object_key}; /// Minimum part size for multipart uploads (5 MB). All parts except the last /// must be at least this size per the S3 specification. diff --git a/crates/ruststack-s3-core/src/ops/object.rs b/crates/ruststack-s3-core/src/ops/object.rs index fc57ffd..e3f41e9 100644 --- a/crates/ruststack-s3-core/src/ops/object.rs +++ b/crates/ruststack-s3-core/src/ops/object.rs @@ -3,38 +3,39 @@ //! Implements `put_object`, `get_object`, `head_object`, `delete_object`, //! `delete_objects`, and `copy_object`. -use std::collections::HashMap; -use std::str::FromStr; +use std::{collections::HashMap, str::FromStr}; use bytes::Bytes; use chrono::Utc; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; -use ruststack_s3_model::input::{ - CopyObjectInput, DeleteObjectInput, DeleteObjectsInput, GetObjectInput, HeadObjectInput, - PutObjectInput, -}; -use ruststack_s3_model::output::{ - CopyObjectOutput, DeleteObjectOutput, DeleteObjectsOutput, GetObjectOutput, HeadObjectOutput, - PutObjectOutput, -}; -use ruststack_s3_model::request::StreamingBlob; -use ruststack_s3_model::types::{ - ChecksumType, CopyObjectResult, DeletedObject, MetadataDirective, ObjectCannedACL, - ObjectLockLegalHoldStatus, ObjectLockMode, ServerSideEncryption, StorageClass, +use ruststack_s3_model::{ + error::{S3Error, S3ErrorCode}, + input::{ + CopyObjectInput, DeleteObjectInput, DeleteObjectsInput, GetObjectInput, HeadObjectInput, + PutObjectInput, + }, + output::{ + CopyObjectOutput, DeleteObjectOutput, DeleteObjectsOutput, GetObjectOutput, + HeadObjectOutput, PutObjectOutput, + }, + request::StreamingBlob, + types::{ + ChecksumType, CopyObjectResult, DeletedObject, MetadataDirective, ObjectCannedACL, + ObjectLockLegalHoldStatus, ObjectLockMode, ServerSideEncryption, StorageClass, + }, }; use tracing::debug; -use crate::checksums::{ChecksumAlgorithm, compute_checksum}; -use crate::error::S3ServiceError; -use crate::provider::RustStackS3; -use crate::state::keystore::ObjectStore; -use crate::state::object::{ - CannedAcl, ChecksumData, ObjectMetadata, Owner as InternalOwner, S3Object, -}; -use crate::utils::{ - is_valid_if_match, is_valid_if_none_match, parse_copy_source, parse_range_header, +use crate::{ + checksums::{ChecksumAlgorithm, compute_checksum}, + error::S3ServiceError, + provider::RustStackS3, + state::{ + keystore::ObjectStore, + object::{CannedAcl, ChecksumData, ObjectMetadata, Owner as InternalOwner, S3Object}, + }, + utils::{is_valid_if_match, is_valid_if_none_match, parse_copy_source, parse_range_header}, + validation::{validate_content_md5, validate_metadata, validate_object_key}, }; -use crate::validation::{validate_content_md5, validate_metadata, validate_object_key}; /// Check whether Object Lock (legal hold or retention) prevents deletion of a /// specific object version. @@ -46,10 +47,10 @@ use crate::validation::{validate_content_md5, validate_metadata, validate_object /// /// AWS S3 rules: /// - DELETE *without* a version ID always succeeds (creates a delete marker). -/// - DELETE *with* a version ID must be rejected if the version has a legal -/// hold enabled or a retention period that has not yet expired. -/// - `BypassGovernanceRetention` allows skipping GOVERNANCE-mode retention -/// checks, but never COMPLIANCE-mode or legal holds. +/// - DELETE *with* a version ID must be rejected if the version has a legal hold enabled or a +/// retention period that has not yet expired. +/// - `BypassGovernanceRetention` allows skipping GOVERNANCE-mode retention checks, but never +/// COMPLIANCE-mode or legal holds. /// /// Returns `Ok(())` when the deletion is allowed. #[allow(clippy::result_large_err)] diff --git a/crates/ruststack-s3-core/src/ops/object_config.rs b/crates/ruststack-s3-core/src/ops/object_config.rs index 1f55a4d..b76dc03 100644 --- a/crates/ruststack-s3-core/src/ops/object_config.rs +++ b/crates/ruststack-s3-core/src/ops/object_config.rs @@ -5,29 +5,29 @@ //! `put_object_retention`, `get_object_legal_hold`, `put_object_legal_hold`, //! and `get_object_attributes`. -use ruststack_s3_model::error::S3Error; -use ruststack_s3_model::input::{ - DeleteObjectTaggingInput, GetObjectAclInput, GetObjectAttributesInput, GetObjectLegalHoldInput, - GetObjectRetentionInput, GetObjectTaggingInput, PutObjectAclInput, PutObjectLegalHoldInput, - PutObjectRetentionInput, PutObjectTaggingInput, -}; -use ruststack_s3_model::output::{ - DeleteObjectTaggingOutput, GetObjectAclOutput, GetObjectAttributesOutput, - GetObjectLegalHoldOutput, GetObjectRetentionOutput, GetObjectTaggingOutput, PutObjectAclOutput, - PutObjectLegalHoldOutput, PutObjectRetentionOutput, PutObjectTaggingOutput, -}; -use ruststack_s3_model::types::{ - Checksum, ChecksumType, GetObjectAttributesParts, Grant, Grantee, ObjectLockLegalHold, - ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode, Permission, - StorageClass, Tag, Type, +use ruststack_s3_model::{ + error::S3Error, + input::{ + DeleteObjectTaggingInput, GetObjectAclInput, GetObjectAttributesInput, + GetObjectLegalHoldInput, GetObjectRetentionInput, GetObjectTaggingInput, PutObjectAclInput, + PutObjectLegalHoldInput, PutObjectRetentionInput, PutObjectTaggingInput, + }, + output::{ + DeleteObjectTaggingOutput, GetObjectAclOutput, GetObjectAttributesOutput, + GetObjectLegalHoldOutput, GetObjectRetentionOutput, GetObjectTaggingOutput, + PutObjectAclOutput, PutObjectLegalHoldOutput, PutObjectRetentionOutput, + PutObjectTaggingOutput, + }, + types::{ + Checksum, ChecksumType, GetObjectAttributesParts, Grant, Grantee, ObjectLockLegalHold, + ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode, Permission, + StorageClass, Tag, Type, + }, }; use tracing::debug; -use crate::error::S3ServiceError; -use crate::provider::RustStackS3; -use crate::state::object::CannedAcl; - use super::bucket::to_model_owner; +use crate::{error::S3ServiceError, provider::RustStackS3, state::object::CannedAcl}; // AWS S3 DTOs use signed integers (i32/i64) for inherently non-negative values. // These handler methods must remain async for consistency. diff --git a/crates/ruststack-s3-core/src/provider.rs b/crates/ruststack-s3-core/src/provider.rs index 5a9fb11..68f82bc 100644 --- a/crates/ruststack-s3-core/src/provider.rs +++ b/crates/ruststack-s3-core/src/provider.rs @@ -10,10 +10,9 @@ use std::sync::Arc; -use crate::config::S3Config; -use crate::cors::CorsIndex; -use crate::state::service::S3ServiceState; -use crate::storage::InMemoryStorage; +use crate::{ + config::S3Config, cors::CorsIndex, state::service::S3ServiceState, storage::InMemoryStorage, +}; /// The main S3 provider. /// diff --git a/crates/ruststack-s3-core/src/state/bucket.rs b/crates/ruststack-s3-core/src/state/bucket.rs index 6c1c6e5..76c350e 100644 --- a/crates/ruststack-s3-core/src/state/bucket.rs +++ b/crates/ruststack-s3-core/src/state/bucket.rs @@ -16,9 +16,11 @@ use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use tracing::debug; -use super::keystore::ObjectStore; -use super::multipart::MultipartUpload; -use super::object::{CannedAcl, Owner}; +use super::{ + keystore::ObjectStore, + multipart::MultipartUpload, + object::{CannedAcl, Owner}, +}; // --------------------------------------------------------------------------- // Supporting configuration types diff --git a/crates/ruststack-s3-core/src/state/mod.rs b/crates/ruststack-s3-core/src/state/mod.rs index b10bb5d..82bb16f 100644 --- a/crates/ruststack-s3-core/src/state/mod.rs +++ b/crates/ruststack-s3-core/src/state/mod.rs @@ -13,8 +13,7 @@ //! All types are `Send + Sync`. Concurrent access is handled via: //! //! - `DashMap` for the bucket table and multipart upload table -//! - `parking_lot::RwLock` for per-bucket configuration fields and the object -//! store +//! - `parking_lot::RwLock` for per-bucket configuration fields and the object store pub(crate) mod bucket; pub(crate) mod keystore; diff --git a/crates/ruststack-s3-core/src/state/object.rs b/crates/ruststack-s3-core/src/state/object.rs index ee77e2d..0abfaa7 100644 --- a/crates/ruststack-s3-core/src/state/object.rs +++ b/crates/ruststack-s3-core/src/state/object.rs @@ -3,9 +3,7 @@ //! This module defines the core data structures for S3 objects, delete markers, //! object metadata, ownership, ACL configuration, and versioning. -use std::collections::HashMap; -use std::fmt; -use std::str::FromStr; +use std::{collections::HashMap, fmt, str::FromStr}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; diff --git a/crates/ruststack-s3-core/src/state/service.rs b/crates/ruststack-s3-core/src/state/service.rs index 4053302..268ebbb 100644 --- a/crates/ruststack-s3-core/src/state/service.rs +++ b/crates/ruststack-s3-core/src/state/service.rs @@ -4,15 +4,15 @@ //! bucket-name uniqueness. All operations are thread-safe via `DashMap`. use chrono::{DateTime, Utc}; -use dashmap::DashMap; -use dashmap::mapref::one::{Ref, RefMut}; +use dashmap::{ + DashMap, + mapref::one::{Ref, RefMut}, +}; use tracing::{debug, info}; +use super::{bucket::S3Bucket, object::Owner}; use crate::error::S3ServiceError; -use super::bucket::S3Bucket; -use super::object::Owner; - /// Top-level S3 service state holding all buckets. /// /// Bucket names are globally unique across accounts, enforced by @@ -55,10 +55,10 @@ impl S3ServiceState { /// /// # Errors /// - /// - [`S3ServiceError::BucketAlreadyOwnedByYou`] if the caller already - /// owns a bucket with the same name. - /// - [`S3ServiceError::BucketAlreadyExists`] if the bucket name is taken - /// by a different account. + /// - [`S3ServiceError::BucketAlreadyOwnedByYou`] if the caller already owns a bucket with the + /// same name. + /// - [`S3ServiceError::BucketAlreadyExists`] if the bucket name is taken by a different + /// account. pub fn create_bucket( &self, name: String, @@ -89,8 +89,8 @@ impl S3ServiceState { /// # Errors /// /// - [`S3ServiceError::NoSuchBucket`] if the bucket does not exist. - /// - [`S3ServiceError::BucketNotEmpty`] if the bucket still contains - /// objects or in-progress multipart uploads. + /// - [`S3ServiceError::BucketNotEmpty`] if the bucket still contains objects or in-progress + /// multipart uploads. pub fn delete_bucket(&self, name: &str) -> Result<(), S3ServiceError> { let bucket_ref = self .buckets diff --git a/crates/ruststack-s3-core/src/storage.rs b/crates/ruststack-s3-core/src/storage.rs index f1d26be..864acb5 100644 --- a/crates/ruststack-s3-core/src/storage.rs +++ b/crates/ruststack-s3-core/src/storage.rs @@ -20,8 +20,7 @@ use dashmap::DashMap; use tokio::io::AsyncReadExt as _; use tracing::{debug, trace, warn}; -use crate::checksums; -use crate::error::S3ServiceError; +use crate::{checksums, error::S3ServiceError}; /// Composite key identifying a stored object: `(bucket, key, version_id)`. type StorageKey = (String, String, String); @@ -96,14 +95,20 @@ impl std::fmt::Debug for StoredData { impl Drop for StoredData { fn drop(&mut self) { if let Self::OnDisk { path, .. } = self { - if let Err(e) = std::fs::remove_file(path.as_path()) { - // File may have already been cleaned up; only warn if the - // error is something other than "not found". - if e.kind() != std::io::ErrorKind::NotFound { - warn!(path = %path.display(), error = %e, "failed to remove temp file"); - } - } else { - trace!(path = %path.display(), "removed temp file"); + let path = path.clone(); + // Spawn async file removal on the tokio runtime. If no runtime is + // active (e.g. during test teardown), the file will be cleaned up + // by the OS temp-dir reaper. + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn(async move { + if let Err(e) = tokio::fs::remove_file(&path).await { + if e.kind() != std::io::ErrorKind::NotFound { + warn!(path = %path.display(), error = %e, "failed to remove temp file"); + } + } else { + trace!(path = %path.display(), "removed temp file"); + } + }); } } } diff --git a/crates/ruststack-s3-core/src/utils.rs b/crates/ruststack-s3-core/src/utils.rs index f635929..3d94c9b 100644 --- a/crates/ruststack-s3-core/src/utils.rs +++ b/crates/ruststack-s3-core/src/utils.rs @@ -4,8 +4,7 @@ //! conditional-request matching, continuation-token encoding, and XML //! escaping functions. -use base64::Engine; -use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; +use base64::{Engine, engine::general_purpose::STANDARD as BASE64_STANDARD}; use chrono::Utc; use rand::Rng; use uuid::Uuid; diff --git a/crates/ruststack-s3-core/src/validation.rs b/crates/ruststack-s3-core/src/validation.rs index eb8ff28..596034e 100644 --- a/crates/ruststack-s3-core/src/validation.rs +++ b/crates/ruststack-s3-core/src/validation.rs @@ -4,9 +4,7 @@ //! user-defined metadata following the rules defined in the //! [Amazon S3 documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). -use std::collections::HashMap; -use std::hash::BuildHasher; -use std::net::Ipv4Addr; +use std::{collections::HashMap, hash::BuildHasher, net::Ipv4Addr}; use base64::Engine; use md5::{Digest, Md5}; @@ -65,7 +63,8 @@ pub fn validate_bucket_name(name: &str) -> Result<(), S3ServiceError> { return Err(S3ServiceError::InvalidBucketName { name: name.to_owned(), reason: format!( - "Bucket name must be between {MIN_BUCKET_NAME_LEN} and {MAX_BUCKET_NAME_LEN} characters long" + "Bucket name must be between {MIN_BUCKET_NAME_LEN} and {MAX_BUCKET_NAME_LEN} \ + characters long" ), }); } diff --git a/crates/ruststack-s3-http/src/body.rs b/crates/ruststack-s3-http/src/body.rs index 1a5a2ac..991ca6d 100644 --- a/crates/ruststack-s3-http/src/body.rs +++ b/crates/ruststack-s3-http/src/body.rs @@ -9,8 +9,10 @@ //! Streaming support for large objects (e.g., `GetObject`) can be added in the future //! by extending this enum with a streaming variant. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-s3-http/src/dispatch.rs b/crates/ruststack-s3-http/src/dispatch.rs index 337be77..13e0bd4 100644 --- a/crates/ruststack-s3-http/src/dispatch.rs +++ b/crates/ruststack-s3-http/src/dispatch.rs @@ -4,23 +4,19 @@ //! with the business logic handler. Given a [`RoutingContext`] and HTTP request parts/body, //! it: //! -//! 1. Deserializes the HTTP request into the operation's typed Input struct -//! (via [`FromS3Request`]) +//! 1. Deserializes the HTTP request into the operation's typed Input struct (via [`FromS3Request`]) //! 2. Calls the appropriate method on the [`S3Handler`] trait //! 3. Serializes the Output struct into an HTTP response (via [`IntoS3Response`]) //! //! Phase 3 of the project will implement [`S3Handler`] on the `RustStackS3` provider. //! For now, all operations return `NotImplemented` by default. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; -use ruststack_s3_model::S3Operation; -use ruststack_s3_model::error::S3Error; +use ruststack_s3_model::{S3Operation, error::S3Error}; -use crate::body::S3ResponseBody; -use crate::router::RoutingContext; +use crate::{body::S3ResponseBody, router::RoutingContext}; /// Trait that the business logic provider must implement. /// diff --git a/crates/ruststack-s3-http/src/lib.rs b/crates/ruststack-s3-http/src/lib.rs index b1c279c..f060170 100644 --- a/crates/ruststack-s3-http/src/lib.rs +++ b/crates/ruststack-s3-http/src/lib.rs @@ -2,25 +2,23 @@ //! //! This crate provides the HTTP layer for a LocalStack-compatible S3 server. It handles: //! -//! - **Routing** ([`router`]): Maps HTTP requests to S3 operations by examining -//! method, path, query parameters, and headers. Supports both path-style and -//! virtual-hosted-style bucket addressing. +//! - **Routing** ([`router`]): Maps HTTP requests to S3 operations by examining method, path, query +//! parameters, and headers. Supports both path-style and virtual-hosted-style bucket addressing. //! -//! - **Request deserialization** ([`request`]): Converts raw HTTP request parts into -//! typed S3 Input structs from `ruststack-s3-model`. +//! - **Request deserialization** ([`request`]): Converts raw HTTP request parts into typed S3 Input +//! structs from `ruststack-s3-model`. //! -//! - **Response serialization** ([`response`]): Converts typed S3 Output structs into -//! HTTP responses with appropriate status codes, headers, and bodies. +//! - **Response serialization** ([`response`]): Converts typed S3 Output structs into HTTP +//! responses with appropriate status codes, headers, and bodies. //! -//! - **Dispatch** ([`dispatch`]): Routes identified S3 operations to the business logic -//! handler via the [`S3Handler`](dispatch::S3Handler) trait. +//! - **Dispatch** ([`dispatch`]): Routes identified S3 operations to the business logic handler via +//! the [`S3Handler`](dispatch::S3Handler) trait. //! -//! - **Service** ([`service`]): The main [`S3HttpService`](service::S3HttpService) that -//! implements hyper's `Service` trait, tying routing, auth, dispatch, and middleware -//! together. +//! - **Service** ([`service`]): The main [`S3HttpService`](service::S3HttpService) that implements +//! hyper's `Service` trait, tying routing, auth, dispatch, and middleware together. //! -//! - **Body** ([`body`]): The [`S3ResponseBody`](body::S3ResponseBody) type supporting -//! buffered and empty response modes. +//! - **Body** ([`body`]): The [`S3ResponseBody`](body::S3ResponseBody) type supporting buffered and +//! empty response modes. //! //! # Architecture //! diff --git a/crates/ruststack-s3-http/src/multipart.rs b/crates/ruststack-s3-http/src/multipart.rs index 81e87a6..a0d6e50 100644 --- a/crates/ruststack-s3-http/src/multipart.rs +++ b/crates/ruststack-s3-http/src/multipart.rs @@ -291,16 +291,10 @@ mod tests { #[test] fn test_should_parse_simple_multipart() { let boundary = "----boundary"; - let body = "------boundary\r\n\ - Content-Disposition: form-data; name=\"key\"\r\n\ - \r\n\ - my-object-key\r\n\ - ------boundary\r\n\ - Content-Disposition: form-data; name=\"file\"; filename=\"test.txt\"\r\n\ - Content-Type: text/plain\r\n\ - \r\n\ - hello world\r\n\ - ------boundary--\r\n"; + let body = "------boundary\r\nContent-Disposition: form-data; \ + name=\"key\"\r\n\r\nmy-object-key\r\n------boundary\r\nContent-Disposition: \ + form-data; name=\"file\"; filename=\"test.txt\"\r\nContent-Type: \ + text/plain\r\n\r\nhello world\r\n------boundary--\r\n"; let result = parse_multipart(body.as_bytes(), boundary).expect("should parse"); assert_eq!( @@ -314,24 +308,8 @@ mod tests { #[test] fn test_should_parse_multipart_with_policy_fields() { let boundary = "xyzzy"; - let body = "--xyzzy\r\n\ - Content-Disposition: form-data; name=\"key\"\r\n\ - \r\n\ - uploads/test.bin\r\n\ - --xyzzy\r\n\ - Content-Disposition: form-data; name=\"policy\"\r\n\ - \r\n\ - eyJjb25kaXRpb25zIjpbXX0=\r\n\ - --xyzzy\r\n\ - Content-Disposition: form-data; name=\"x-amz-algorithm\"\r\n\ - \r\n\ - AWS4-HMAC-SHA256\r\n\ - --xyzzy\r\n\ - Content-Disposition: form-data; name=\"file\"; filename=\"test.bin\"\r\n\ - Content-Type: application/octet-stream\r\n\ - \r\n\ - \x00\x01\x02\x03\r\n\ - --xyzzy--\r\n"; + #[rustfmt::skip] + let body = "--xyzzy\r\nContent-Disposition: form-data; name=\"key\"\r\n\r\nuploads/test.bin\r\n--xyzzy\r\nContent-Disposition: form-data; name=\"policy\"\r\n\r\neyJjb25kaXRpb25zIjpbXX0=\r\n--xyzzy\r\nContent-Disposition: form-data; name=\"x-amz-algorithm\"\r\n\r\nAWS4-HMAC-SHA256\r\n--xyzzy\r\nContent-Disposition: form-data; name=\"file\"; filename=\"test.bin\"\r\nContent-Type: application/octet-stream\r\n\r\n\x00\x01\x02\x03\r\n--xyzzy--\r\n"; let result = parse_multipart(body.as_bytes(), boundary).expect("should parse"); assert_eq!( @@ -352,11 +330,8 @@ mod tests { #[test] fn test_should_reject_missing_file() { let boundary = "abc"; - let body = "--abc\r\n\ - Content-Disposition: form-data; name=\"key\"\r\n\ - \r\n\ - test\r\n\ - --abc--\r\n"; + let body = + "--abc\r\nContent-Disposition: form-data; name=\"key\"\r\n\r\ntest\r\n--abc--\r\n"; let result = parse_multipart(body.as_bytes(), boundary); assert!(result.is_err()); diff --git a/crates/ruststack-s3-http/src/request.rs b/crates/ruststack-s3-http/src/request.rs index e361fe7..cc7a32c 100644 --- a/crates/ruststack-s3-http/src/request.rs +++ b/crates/ruststack-s3-http/src/request.rs @@ -11,20 +11,21 @@ //! - `HTTP payload body` - From the request body (XML or raw bytes) //! - `HTTP prefix headers: x-amz-meta-` - Collect all `x-amz-meta-*` headers -use std::collections::HashMap; -use std::str::FromStr; +use std::{collections::HashMap, str::FromStr}; use bytes::Bytes; use chrono::{DateTime, Utc}; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; -use ruststack_s3_model::request::StreamingBlob; -use ruststack_s3_model::types::{ - AccelerateConfiguration, AccessControlPolicy, BucketLifecycleConfiguration, - BucketLoggingStatus, CORSConfiguration, CompletedMultipartUpload, CreateBucketConfiguration, - Delete, NotificationConfiguration, ObjectLockConfiguration, ObjectLockLegalHold, - ObjectLockRetention, OwnershipControls, PublicAccessBlockConfiguration, - RequestPaymentConfiguration, ServerSideEncryptionConfiguration, Tagging, - VersioningConfiguration, WebsiteConfiguration, +use ruststack_s3_model::{ + error::{S3Error, S3ErrorCode}, + request::StreamingBlob, + types::{ + AccelerateConfiguration, AccessControlPolicy, BucketLifecycleConfiguration, + BucketLoggingStatus, CORSConfiguration, CompletedMultipartUpload, + CreateBucketConfiguration, Delete, NotificationConfiguration, ObjectLockConfiguration, + ObjectLockLegalHold, ObjectLockRetention, OwnershipControls, + PublicAccessBlockConfiguration, RequestPaymentConfiguration, + ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration, WebsiteConfiguration, + }, }; use ruststack_s3_xml::from_xml; diff --git a/crates/ruststack-s3-http/src/router.rs b/crates/ruststack-s3-http/src/router.rs index 7907b5a..518d424 100644 --- a/crates/ruststack-s3-http/src/router.rs +++ b/crates/ruststack-s3-http/src/router.rs @@ -13,8 +13,10 @@ use http::Method; use percent_encoding::percent_decode_str; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; -use ruststack_s3_model::operations::S3Operation; +use ruststack_s3_model::{ + error::{S3Error, S3ErrorCode}, + operations::S3Operation, +}; /// Configuration for S3 request routing. #[derive(Debug, Clone)] diff --git a/crates/ruststack-s3-http/src/service.rs b/crates/ruststack-s3-http/src/service.rs index c408cdd..34ce9f7 100644 --- a/crates/ruststack-s3-http/src/service.rs +++ b/crates/ruststack-s3-http/src/service.rs @@ -12,26 +12,23 @@ //! 7. Common response headers (`x-amz-request-id`, `Server`, `Date`) //! 8. Error response formatting -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; -use hyper::body::Incoming; -use hyper::service::Service; +use hyper::{body::Incoming, service::Service}; +use ruststack_auth::CredentialProvider; +use ruststack_s3_model::error::{S3Error, S3ErrorCode}; use sha2::{Digest, Sha256}; use tracing::{debug, error, info, warn}; use uuid::Uuid; -use ruststack_auth::CredentialProvider; -use ruststack_s3_model::error::{S3Error, S3ErrorCode}; - -use crate::body::S3ResponseBody; -use crate::dispatch::{S3Handler, dispatch_operation}; -use crate::response::error_to_response; -use crate::router::S3Router; +use crate::{ + body::S3ResponseBody, + dispatch::{S3Handler, dispatch_operation}, + response::error_to_response, + router::S3Router, +}; /// Configuration for the S3 HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-s3-model/src/input/multipart.rs b/crates/ruststack-s3-model/src/input/multipart.rs index 99f7132..d422afd 100644 --- a/crates/ruststack-s3-model/src/input/multipart.rs +++ b/crates/ruststack-s3-model/src/input/multipart.rs @@ -2,11 +2,13 @@ use std::collections::HashMap; -use crate::request::StreamingBlob; - -use crate::types::{ - ChecksumAlgorithm, ChecksumType, CompletedMultipartUpload, EncodingType, ObjectCannedACL, - ObjectLockLegalHoldStatus, ObjectLockMode, RequestPayer, ServerSideEncryption, StorageClass, +use crate::{ + request::StreamingBlob, + types::{ + ChecksumAlgorithm, ChecksumType, CompletedMultipartUpload, EncodingType, ObjectCannedACL, + ObjectLockLegalHoldStatus, ObjectLockMode, RequestPayer, ServerSideEncryption, + StorageClass, + }, }; /// S3 AbortMultipartUploadInput. diff --git a/crates/ruststack-s3-model/src/input/object.rs b/crates/ruststack-s3-model/src/input/object.rs index ae49338..a4b0057 100644 --- a/crates/ruststack-s3-model/src/input/object.rs +++ b/crates/ruststack-s3-model/src/input/object.rs @@ -2,12 +2,13 @@ use std::collections::HashMap; -use crate::request::StreamingBlob; - -use crate::types::{ - ChecksumAlgorithm, ChecksumMode, Delete, MetadataDirective, ObjectCannedACL, - ObjectLockLegalHoldStatus, ObjectLockMode, RequestPayer, ServerSideEncryption, StorageClass, - TaggingDirective, +use crate::{ + request::StreamingBlob, + types::{ + ChecksumAlgorithm, ChecksumMode, Delete, MetadataDirective, ObjectCannedACL, + ObjectLockLegalHoldStatus, ObjectLockMode, RequestPayer, ServerSideEncryption, + StorageClass, TaggingDirective, + }, }; /// S3 CopyObjectInput. diff --git a/crates/ruststack-s3-model/src/output/object.rs b/crates/ruststack-s3-model/src/output/object.rs index 7e8b437..717547a 100644 --- a/crates/ruststack-s3-model/src/output/object.rs +++ b/crates/ruststack-s3-model/src/output/object.rs @@ -2,11 +2,13 @@ use std::collections::HashMap; -use crate::request::StreamingBlob; - -use crate::types::{ - ArchiveStatus, ChecksumType, CopyObjectResult, DeletedObject, Error, ObjectLockLegalHoldStatus, - ObjectLockMode, ReplicationStatus, RequestCharged, ServerSideEncryption, StorageClass, +use crate::{ + request::StreamingBlob, + types::{ + ArchiveStatus, ChecksumType, CopyObjectResult, DeletedObject, Error, + ObjectLockLegalHoldStatus, ObjectLockMode, ReplicationStatus, RequestCharged, + ServerSideEncryption, StorageClass, + }, }; /// S3 CopyObjectOutput. diff --git a/crates/ruststack-s3-xml/src/deserialize.rs b/crates/ruststack-s3-xml/src/deserialize.rs index e5a3825..487915c 100644 --- a/crates/ruststack-s3-xml/src/deserialize.rs +++ b/crates/ruststack-s3-xml/src/deserialize.rs @@ -4,8 +4,7 @@ //! that need to be deserialized from XML request bodies. The deserialization follows the //! AWS S3 RestXml protocol conventions. -use quick_xml::Reader; -use quick_xml::events::Event; +use quick_xml::{Reader, events::Event}; use crate::error::XmlError; @@ -147,26 +146,23 @@ fn parse_timestamp(s: &str) -> Result, XmlError> { use ruststack_s3_model::types::{ AbortIncompleteMultipartUpload, AccelerateConfiguration, AccessControlPolicy, - BucketAccelerateStatus, BucketInfo, BucketLifecycleConfiguration, BucketLoggingStatus, - BucketLogsPermission, BucketVersioningStatus, CORSConfiguration, CORSRule, - CompletedMultipartUpload, CompletedPart, Condition, CreateBucketConfiguration, - DefaultRetention, Delete, ErrorDocument, EventBridgeConfiguration, ExpirationStatus, - FilterRule, FilterRuleName, Grant, Grantee, IndexDocument, LambdaFunctionConfiguration, - LifecycleExpiration, LifecycleRule, LifecycleRuleAndOperator, LifecycleRuleFilter, - LocationInfo, LoggingEnabled, MFADelete, NoncurrentVersionExpiration, - NoncurrentVersionTransition, NotificationConfiguration, NotificationConfigurationFilter, - ObjectIdentifier, ObjectLockConfiguration, ObjectLockEnabled, ObjectLockLegalHold, - ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode, ObjectLockRule, - ObjectOwnership, Owner, OwnershipControls, OwnershipControlsRule, PartitionedPrefix, Payer, - Permission, Protocol, PublicAccessBlockConfiguration, QueueConfiguration, Redirect, - RedirectAllRequestsTo, RequestPaymentConfiguration, RoutingRule, S3KeyFilter, - ServerSideEncryptionByDefault, ServerSideEncryptionConfiguration, ServerSideEncryptionRule, - SimplePrefix, Tag, Tagging, TargetGrant, TargetObjectKeyFormat, TopicConfiguration, Transition, - TransitionStorageClass, VersioningConfiguration, WebsiteConfiguration, -}; -use ruststack_s3_model::types::{ - BucketLocationConstraint, BucketType, DataRedundancy, Event as S3Event, LocationType, - ServerSideEncryption, + BucketAccelerateStatus, BucketInfo, BucketLifecycleConfiguration, BucketLocationConstraint, + BucketLoggingStatus, BucketLogsPermission, BucketType, BucketVersioningStatus, + CORSConfiguration, CORSRule, CompletedMultipartUpload, CompletedPart, Condition, + CreateBucketConfiguration, DataRedundancy, DefaultRetention, Delete, ErrorDocument, + Event as S3Event, EventBridgeConfiguration, ExpirationStatus, FilterRule, FilterRuleName, + Grant, Grantee, IndexDocument, LambdaFunctionConfiguration, LifecycleExpiration, LifecycleRule, + LifecycleRuleAndOperator, LifecycleRuleFilter, LocationInfo, LocationType, LoggingEnabled, + MFADelete, NoncurrentVersionExpiration, NoncurrentVersionTransition, NotificationConfiguration, + NotificationConfigurationFilter, ObjectIdentifier, ObjectLockConfiguration, ObjectLockEnabled, + ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode, + ObjectLockRule, ObjectOwnership, Owner, OwnershipControls, OwnershipControlsRule, + PartitionedPrefix, Payer, Permission, Protocol, PublicAccessBlockConfiguration, + QueueConfiguration, Redirect, RedirectAllRequestsTo, RequestPaymentConfiguration, RoutingRule, + S3KeyFilter, ServerSideEncryption, ServerSideEncryptionByDefault, + ServerSideEncryptionConfiguration, ServerSideEncryptionRule, SimplePrefix, Tag, Tagging, + TargetGrant, TargetObjectKeyFormat, TopicConfiguration, Transition, TransitionStorageClass, + VersioningConfiguration, WebsiteConfiguration, }; impl S3Deserialize for Tag { diff --git a/crates/ruststack-s3-xml/src/error.rs b/crates/ruststack-s3-xml/src/error.rs index 45198f6..78e9c3a 100644 --- a/crates/ruststack-s3-xml/src/error.rs +++ b/crates/ruststack-s3-xml/src/error.rs @@ -5,8 +5,10 @@ use std::io; -use quick_xml::Writer; -use quick_xml::events::{BytesDecl, BytesText, Event}; +use quick_xml::{ + Writer, + events::{BytesDecl, BytesText, Event}, +}; /// Errors that can occur during S3 XML serialization or deserialization. #[derive(Debug, thiserror::Error)] diff --git a/crates/ruststack-s3-xml/src/serialize.rs b/crates/ruststack-s3-xml/src/serialize.rs index b5596c5..aae1ed2 100644 --- a/crates/ruststack-s3-xml/src/serialize.rs +++ b/crates/ruststack-s3-xml/src/serialize.rs @@ -11,8 +11,10 @@ use std::io::{self, Write}; -use quick_xml::Writer; -use quick_xml::events::{BytesText, Event}; +use quick_xml::{ + Writer, + events::{BytesText, Event}, +}; use crate::error::XmlError; @@ -208,36 +210,39 @@ impl_as_str!( // S3Serialize implementations for shared types // --------------------------------------------------------------------------- -use ruststack_s3_model::output::{ - CompleteMultipartUploadOutput, CopyObjectOutput, CreateMultipartUploadOutput, - DeleteObjectsOutput, GetBucketAccelerateConfigurationOutput, GetBucketAclOutput, - GetBucketCorsOutput, GetBucketEncryptionOutput, GetBucketLifecycleConfigurationOutput, - GetBucketLoggingOutput, GetBucketNotificationConfigurationOutput, - GetBucketOwnershipControlsOutput, GetBucketPolicyStatusOutput, GetBucketRequestPaymentOutput, - GetBucketTaggingOutput, GetBucketVersioningOutput, GetBucketWebsiteOutput, GetObjectAclOutput, - GetObjectAttributesOutput, GetObjectLegalHoldOutput, GetObjectLockConfigurationOutput, - GetObjectRetentionOutput, GetObjectTaggingOutput, GetPublicAccessBlockOutput, - ListBucketsOutput, ListMultipartUploadsOutput, ListObjectVersionsOutput, ListObjectsOutput, - ListObjectsV2Output, ListPartsOutput, UploadPartCopyOutput, -}; -use ruststack_s3_model::types::{ - AbortIncompleteMultipartUpload, AccelerateConfiguration, AccessControlPolicy, Bucket, - BucketInfo, BucketLifecycleConfiguration, BucketLoggingStatus, CORSConfiguration, CORSRule, - Checksum, CommonPrefix, CompletedMultipartUpload, CompletedPart, Condition, CopyObjectResult, - CopyPartResult, CreateBucketConfiguration, DefaultRetention, Delete, DeleteMarkerEntry, - DeletedObject, Error, ErrorDocument, EventBridgeConfiguration, FilterRule, - GetObjectAttributesParts, Grant, Grantee, IndexDocument, Initiator, - LambdaFunctionConfiguration, LifecycleExpiration, LifecycleRule, LifecycleRuleAndOperator, - LifecycleRuleFilter, LocationInfo, LoggingEnabled, MultipartUpload, - NoncurrentVersionExpiration, NoncurrentVersionTransition, NotificationConfiguration, - NotificationConfigurationFilter, Object, ObjectIdentifier, ObjectLockConfiguration, - ObjectLockLegalHold, ObjectLockRetention, ObjectLockRule, ObjectPart, ObjectVersion, Owner, - OwnershipControls, OwnershipControlsRule, Part, PartitionedPrefix, PolicyStatus, - PublicAccessBlockConfiguration, QueueConfiguration, Redirect, RedirectAllRequestsTo, - RequestPaymentConfiguration, RoutingRule, S3KeyFilter, ServerSideEncryptionByDefault, - ServerSideEncryptionConfiguration, ServerSideEncryptionRule, SimplePrefix, Tag, Tagging, - TargetGrant, TargetObjectKeyFormat, TopicConfiguration, Transition, VersioningConfiguration, - WebsiteConfiguration, +use ruststack_s3_model::{ + output::{ + CompleteMultipartUploadOutput, CopyObjectOutput, CreateMultipartUploadOutput, + DeleteObjectsOutput, GetBucketAccelerateConfigurationOutput, GetBucketAclOutput, + GetBucketCorsOutput, GetBucketEncryptionOutput, GetBucketLifecycleConfigurationOutput, + GetBucketLoggingOutput, GetBucketNotificationConfigurationOutput, + GetBucketOwnershipControlsOutput, GetBucketPolicyStatusOutput, + GetBucketRequestPaymentOutput, GetBucketTaggingOutput, GetBucketVersioningOutput, + GetBucketWebsiteOutput, GetObjectAclOutput, GetObjectAttributesOutput, + GetObjectLegalHoldOutput, GetObjectLockConfigurationOutput, GetObjectRetentionOutput, + GetObjectTaggingOutput, GetPublicAccessBlockOutput, ListBucketsOutput, + ListMultipartUploadsOutput, ListObjectVersionsOutput, ListObjectsOutput, + ListObjectsV2Output, ListPartsOutput, UploadPartCopyOutput, + }, + types::{ + AbortIncompleteMultipartUpload, AccelerateConfiguration, AccessControlPolicy, Bucket, + BucketInfo, BucketLifecycleConfiguration, BucketLoggingStatus, CORSConfiguration, CORSRule, + Checksum, CommonPrefix, CompletedMultipartUpload, CompletedPart, Condition, + CopyObjectResult, CopyPartResult, CreateBucketConfiguration, DefaultRetention, Delete, + DeleteMarkerEntry, DeletedObject, Error, ErrorDocument, EventBridgeConfiguration, + FilterRule, GetObjectAttributesParts, Grant, Grantee, IndexDocument, Initiator, + LambdaFunctionConfiguration, LifecycleExpiration, LifecycleRule, LifecycleRuleAndOperator, + LifecycleRuleFilter, LocationInfo, LoggingEnabled, MultipartUpload, + NoncurrentVersionExpiration, NoncurrentVersionTransition, NotificationConfiguration, + NotificationConfigurationFilter, Object, ObjectIdentifier, ObjectLockConfiguration, + ObjectLockLegalHold, ObjectLockRetention, ObjectLockRule, ObjectPart, ObjectVersion, Owner, + OwnershipControls, OwnershipControlsRule, Part, PartitionedPrefix, PolicyStatus, + PublicAccessBlockConfiguration, QueueConfiguration, Redirect, RedirectAllRequestsTo, + RequestPaymentConfiguration, RoutingRule, S3KeyFilter, ServerSideEncryptionByDefault, + ServerSideEncryptionConfiguration, ServerSideEncryptionRule, SimplePrefix, Tag, Tagging, + TargetGrant, TargetObjectKeyFormat, TopicConfiguration, Transition, + VersioningConfiguration, WebsiteConfiguration, + }, }; impl S3Serialize for Tag { diff --git a/crates/ruststack-secretsmanager-core/src/handler.rs b/crates/ruststack-secretsmanager-core/src/handler.rs index e38e40a..bbc826c 100644 --- a/crates/ruststack-secretsmanager-core/src/handler.rs +++ b/crates/ruststack-secretsmanager-core/src/handler.rs @@ -1,16 +1,14 @@ //! Secrets Manager handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_secretsmanager_http::body::SecretsManagerResponseBody; -use ruststack_secretsmanager_http::dispatch::SecretsManagerHandler; -use ruststack_secretsmanager_http::response::json_response; -use ruststack_secretsmanager_model::error::SecretsManagerError; -use ruststack_secretsmanager_model::operations::SecretsManagerOperation; +use ruststack_secretsmanager_http::{ + body::SecretsManagerResponseBody, dispatch::SecretsManagerHandler, response::json_response, +}; +use ruststack_secretsmanager_model::{ + error::SecretsManagerError, operations::SecretsManagerOperation, +}; use crate::provider::RustStackSecretsManager; diff --git a/crates/ruststack-secretsmanager-core/src/password.rs b/crates/ruststack-secretsmanager-core/src/password.rs index 5b03e5f..0b2678b 100644 --- a/crates/ruststack-secretsmanager-core/src/password.rs +++ b/crates/ruststack-secretsmanager-core/src/password.rs @@ -1,7 +1,6 @@ //! Random password generation with configurable constraints. use rand::Rng; - use ruststack_secretsmanager_model::error::{SecretsManagerError, SecretsManagerErrorCode}; /// Default password length. diff --git a/crates/ruststack-secretsmanager-core/src/provider.rs b/crates/ruststack-secretsmanager-core/src/provider.rs index 789d2c7..e539ad8 100644 --- a/crates/ruststack-secretsmanager-core/src/provider.rs +++ b/crates/ruststack-secretsmanager-core/src/provider.rs @@ -3,40 +3,44 @@ use std::collections::HashMap; use chrono::Utc; - -use ruststack_secretsmanager_model::error::{SecretsManagerError, SecretsManagerErrorCode}; -use ruststack_secretsmanager_model::input::{ - BatchGetSecretValueInput, CancelRotateSecretInput, CreateSecretInput, - DeleteResourcePolicyInput, DeleteSecretInput, DescribeSecretInput, GetRandomPasswordInput, - GetResourcePolicyInput, GetSecretValueInput, ListSecretVersionIdsInput, ListSecretsInput, - PutResourcePolicyInput, PutSecretValueInput, RemoveRegionsFromReplicationInput, - ReplicateSecretToRegionsInput, RestoreSecretInput, RotateSecretInput, - StopReplicationToReplicaInput, TagResourceInput, UntagResourceInput, UpdateSecretInput, - UpdateSecretVersionStageInput, ValidateResourcePolicyInput, -}; -use ruststack_secretsmanager_model::output::{ - BatchGetSecretValueResponse, CancelRotateSecretResponse, CreateSecretResponse, - DeleteResourcePolicyResponse, DeleteSecretResponse, DescribeSecretResponse, - GetRandomPasswordResponse, GetResourcePolicyResponse, GetSecretValueResponse, - ListSecretVersionIdsResponse, ListSecretsResponse, PutResourcePolicyResponse, - PutSecretValueResponse, RemoveRegionsFromReplicationResponse, ReplicateSecretToRegionsResponse, - RestoreSecretResponse, RotateSecretResponse, StopReplicationToReplicaResponse, - UpdateSecretResponse, UpdateSecretVersionStageResponse, ValidateResourcePolicyResponse, -}; -use ruststack_secretsmanager_model::types::{ - APIErrorType, SecretListEntry, SecretValueEntry, SecretVersionsListEntry, SortByType, - SortOrderType, +use ruststack_secretsmanager_model::{ + error::{SecretsManagerError, SecretsManagerErrorCode}, + input::{ + BatchGetSecretValueInput, CancelRotateSecretInput, CreateSecretInput, + DeleteResourcePolicyInput, DeleteSecretInput, DescribeSecretInput, GetRandomPasswordInput, + GetResourcePolicyInput, GetSecretValueInput, ListSecretVersionIdsInput, ListSecretsInput, + PutResourcePolicyInput, PutSecretValueInput, RemoveRegionsFromReplicationInput, + ReplicateSecretToRegionsInput, RestoreSecretInput, RotateSecretInput, + StopReplicationToReplicaInput, TagResourceInput, UntagResourceInput, UpdateSecretInput, + UpdateSecretVersionStageInput, ValidateResourcePolicyInput, + }, + output::{ + BatchGetSecretValueResponse, CancelRotateSecretResponse, CreateSecretResponse, + DeleteResourcePolicyResponse, DeleteSecretResponse, DescribeSecretResponse, + GetRandomPasswordResponse, GetResourcePolicyResponse, GetSecretValueResponse, + ListSecretVersionIdsResponse, ListSecretsResponse, PutResourcePolicyResponse, + PutSecretValueResponse, RemoveRegionsFromReplicationResponse, + ReplicateSecretToRegionsResponse, RestoreSecretResponse, RotateSecretResponse, + StopReplicationToReplicaResponse, UpdateSecretResponse, UpdateSecretVersionStageResponse, + ValidateResourcePolicyResponse, + }, + types::{ + APIErrorType, SecretListEntry, SecretValueEntry, SecretVersionsListEntry, SortByType, + SortOrderType, + }, }; -use crate::config::SecretsManagerConfig; -use crate::filter::matches_filters; -use crate::password::generate_random_password; -use crate::storage::{SecretRecord, SecretStore, SecretVersion}; -use crate::validation::{ - MAX_TAGS, validate_client_request_token, validate_description, validate_recovery_window, - validate_secret_name, validate_secret_value, validate_tags, +use crate::{ + config::SecretsManagerConfig, + filter::matches_filters, + password::generate_random_password, + storage::{SecretRecord, SecretStore, SecretVersion}, + validation::{ + MAX_TAGS, validate_client_request_token, validate_description, validate_recovery_window, + validate_secret_name, validate_secret_value, validate_tags, + }, + version::AWSCURRENT, }; -use crate::version::AWSCURRENT; /// Default max results for `ListSecrets`. const DEFAULT_LIST_MAX_RESULTS: i32 = 100; @@ -176,7 +180,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -231,7 +236,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -305,7 +311,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() && !force { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was already marked for deletion.", + "You can't perform this operation on the secret because it was already marked \ + for deletion.", )); } } @@ -314,7 +321,8 @@ impl RustStackSecretsManager { if force && recovery_days.is_some() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidParameterException, - "You can't use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays.", + "You can't use ForceDeleteWithoutRecovery in conjunction with \ + RecoveryWindowInDays.", )); } @@ -393,7 +401,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -615,7 +624,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -675,7 +685,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -745,7 +756,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -896,7 +908,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -925,7 +938,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -1029,7 +1043,8 @@ impl RustStackSecretsManager { if record.is_pending_deletion() { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidRequestException, - "You can't perform this operation on the secret because it was marked for deletion.", + "You can't perform this operation on the secret because it was marked for \ + deletion.", )); } @@ -1078,7 +1093,8 @@ fn resolve_version_from_record<'a>( return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::ResourceNotFoundException, format!( - "Secrets Manager can't find the secret value for VersionId: {vid} and staging label: {stage}" + "Secrets Manager can't find the secret value for VersionId: {vid} and \ + staging label: {stage}" ), )); } diff --git a/crates/ruststack-secretsmanager-core/src/storage.rs b/crates/ruststack-secretsmanager-core/src/storage.rs index 50ba7a4..5690da9 100644 --- a/crates/ruststack-secretsmanager-core/src/storage.rs +++ b/crates/ruststack-secretsmanager-core/src/storage.rs @@ -8,9 +8,10 @@ use std::collections::HashMap; use dashmap::DashMap; use rand::Rng; - -use ruststack_secretsmanager_model::error::{SecretsManagerError, SecretsManagerErrorCode}; -use ruststack_secretsmanager_model::types::{RotationRulesType, Tag}; +use ruststack_secretsmanager_model::{ + error::{SecretsManagerError, SecretsManagerErrorCode}, + types::{RotationRulesType, Tag}, +}; use crate::version::{AWSCURRENT, AWSPENDING, AWSPREVIOUS, MAX_VERSIONS}; diff --git a/crates/ruststack-secretsmanager-core/src/validation.rs b/crates/ruststack-secretsmanager-core/src/validation.rs index 216e806..ddf2d81 100644 --- a/crates/ruststack-secretsmanager-core/src/validation.rs +++ b/crates/ruststack-secretsmanager-core/src/validation.rs @@ -1,7 +1,9 @@ //! Validation rules for Secrets Manager resources. -use ruststack_secretsmanager_model::error::{SecretsManagerError, SecretsManagerErrorCode}; -use ruststack_secretsmanager_model::types::Tag; +use ruststack_secretsmanager_model::{ + error::{SecretsManagerError, SecretsManagerErrorCode}, + types::Tag, +}; /// Maximum secret name length. const MAX_NAME_LENGTH: usize = 512; @@ -40,8 +42,8 @@ pub fn validate_secret_name(name: &str) -> Result<(), SecretsManagerError> { return Err(SecretsManagerError::with_message( SecretsManagerErrorCode::InvalidParameterException, format!( - "The parameter Name \"{name}\" contains invalid characters. \ - Only ASCII letters, digits, and /_+=.@- are allowed." + "The parameter Name \"{name}\" contains invalid characters. Only ASCII letters, \ + digits, and /_+=.@- are allowed." ), )); } diff --git a/crates/ruststack-secretsmanager-http/src/body.rs b/crates/ruststack-secretsmanager-http/src/body.rs index 47f7159..3952c10 100644 --- a/crates/ruststack-secretsmanager-http/src/body.rs +++ b/crates/ruststack-secretsmanager-http/src/body.rs @@ -1,7 +1,9 @@ //! Secrets Manager HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-secretsmanager-http/src/dispatch.rs b/crates/ruststack-secretsmanager-http/src/dispatch.rs index 6484113..be884bc 100644 --- a/crates/ruststack-secretsmanager-http/src/dispatch.rs +++ b/crates/ruststack-secretsmanager-http/src/dispatch.rs @@ -1,12 +1,11 @@ //! Secrets Manager handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_secretsmanager_model::error::SecretsManagerError; -use ruststack_secretsmanager_model::operations::SecretsManagerOperation; +use ruststack_secretsmanager_model::{ + error::SecretsManagerError, operations::SecretsManagerOperation, +}; use crate::body::SecretsManagerResponseBody; diff --git a/crates/ruststack-secretsmanager-http/src/response.rs b/crates/ruststack-secretsmanager-http/src/response.rs index 74353cd..d751e51 100644 --- a/crates/ruststack-secretsmanager-http/src/response.rs +++ b/crates/ruststack-secretsmanager-http/src/response.rs @@ -90,9 +90,10 @@ pub fn json_response( #[cfg(test)] mod tests { - use super::*; use ruststack_secretsmanager_model::error::SecretsManagerErrorCode; + use super::*; + #[test] fn test_should_format_error_json_with_capital_message() { let err = SecretsManagerError::with_message( diff --git a/crates/ruststack-secretsmanager-http/src/router.rs b/crates/ruststack-secretsmanager-http/src/router.rs index e15b340..b5c4dff 100644 --- a/crates/ruststack-secretsmanager-http/src/router.rs +++ b/crates/ruststack-secretsmanager-http/src/router.rs @@ -7,8 +7,9 @@ //! X-Amz-Target: secretsmanager.CreateSecret //! ``` -use ruststack_secretsmanager_model::error::SecretsManagerError; -use ruststack_secretsmanager_model::operations::SecretsManagerOperation; +use ruststack_secretsmanager_model::{ + error::SecretsManagerError, operations::SecretsManagerOperation, +}; /// The expected prefix for the `X-Amz-Target` header value. const TARGET_PREFIX: &str = "secretsmanager."; diff --git a/crates/ruststack-secretsmanager-http/src/service.rs b/crates/ruststack-secretsmanager-http/src/service.rs index 28b2744..39dfd24 100644 --- a/crates/ruststack-secretsmanager-http/src/service.rs +++ b/crates/ruststack-secretsmanager-http/src/service.rs @@ -1,20 +1,18 @@ //! Secrets Manager HTTP service implementing the hyper `Service` trait. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_secretsmanager_model::error::{SecretsManagerError, SecretsManagerErrorCode}; -use crate::body::SecretsManagerResponseBody; -use crate::dispatch::{SecretsManagerHandler, dispatch_operation}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::SecretsManagerResponseBody, + dispatch::{SecretsManagerHandler, dispatch_operation}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the Secrets Manager HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-secretsmanager-model/src/blob.rs b/crates/ruststack-secretsmanager-model/src/blob.rs index 6d654e5..046cf13 100644 --- a/crates/ruststack-secretsmanager-model/src/blob.rs +++ b/crates/ruststack-secretsmanager-model/src/blob.rs @@ -4,8 +4,7 @@ //! as base64-encoded strings in JSON. This module provides custom serde //! serializers/deserializers for `bytes::Bytes` that handle the base64 encoding. -use base64::Engine; -use base64::engine::general_purpose::STANDARD; +use base64::{Engine, engine::general_purpose::STANDARD}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Serialize `bytes::Bytes` as a base64 string. diff --git a/crates/ruststack-ses-core/src/config_set.rs b/crates/ruststack-ses-core/src/config_set.rs index 927a019..00368d9 100644 --- a/crates/ruststack-ses-core/src/config_set.rs +++ b/crates/ruststack-ses-core/src/config_set.rs @@ -3,10 +3,11 @@ //! Stores configuration sets and their associated event destinations //! using `DashMap` for concurrent access. -use dashmap::DashMap; -use dashmap::mapref::entry::Entry; -use ruststack_ses_model::error::{SesError, SesErrorCode}; -use ruststack_ses_model::types::EventDestination; +use dashmap::{DashMap, mapref::entry::Entry}; +use ruststack_ses_model::{ + error::{SesError, SesErrorCode}, + types::EventDestination, +}; /// Internal configuration set record with event destinations. #[derive(Debug, Clone)] diff --git a/crates/ruststack-ses-core/src/handler.rs b/crates/ruststack-ses-core/src/handler.rs index 5b2323d..20461a3 100644 --- a/crates/ruststack-ses-core/src/handler.rs +++ b/crates/ruststack-ses-core/src/handler.rs @@ -7,42 +7,44 @@ //! configuration sets, event destinations, receipt rules, identity //! notification/DKIM/mail-from configuration, and sending authorization. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use base64::Engine; use bytes::Bytes; - -use ruststack_ses_http::body::SesResponseBody; -use ruststack_ses_http::dispatch::SesHandler; -use ruststack_ses_http::request::{ - get_optional_bool, get_optional_param, get_required_param, parse_form_params, - parse_member_list, parse_tag_list, -}; -use ruststack_ses_http::response::{XmlWriter, xml_response}; -use ruststack_ses_model::error::SesError; -use ruststack_ses_model::input::{ - CloneReceiptRuleSetInput, CreateConfigurationSetEventDestinationInput, - CreateConfigurationSetInput, CreateReceiptRuleInput, CreateReceiptRuleSetInput, - CreateTemplateInput, DeleteConfigurationSetEventDestinationInput, DeleteConfigurationSetInput, - DeleteIdentityInput, DeleteIdentityPolicyInput, DeleteReceiptRuleInput, - DeleteReceiptRuleSetInput, DeleteTemplateInput, DeleteVerifiedEmailAddressInput, - DescribeActiveReceiptRuleSetInput, DescribeConfigurationSetInput, DescribeReceiptRuleSetInput, - GetIdentityDkimAttributesInput, GetIdentityMailFromDomainAttributesInput, - GetIdentityNotificationAttributesInput, GetIdentityPoliciesInput, - GetIdentityVerificationAttributesInput, GetTemplateInput, ListConfigurationSetsInput, - ListIdentitiesInput, ListIdentityPoliciesInput, ListTemplatesInput, PutIdentityPolicyInput, - SendEmailInput, SendRawEmailInput, SendTemplatedEmailInput, SetActiveReceiptRuleSetInput, - SetIdentityFeedbackForwardingEnabledInput, SetIdentityMailFromDomainInput, - SetIdentityNotificationTopicInput, UpdateConfigurationSetEventDestinationInput, - UpdateTemplateInput, VerifyDomainDkimInput, VerifyDomainIdentityInput, VerifyEmailAddressInput, - VerifyEmailIdentityInput, +use ruststack_ses_http::{ + body::SesResponseBody, + dispatch::SesHandler, + request::{ + get_optional_bool, get_optional_param, get_required_param, parse_form_params, + parse_member_list, parse_tag_list, + }, + response::{XmlWriter, xml_response}, }; -use ruststack_ses_model::operations::SesOperation; -use ruststack_ses_model::types::{ - BehaviorOnMXFailure, Body, ConfigurationSet, Content, Destination, EventDestination, - IdentityType, Message, MessageTag, NotificationType, RawMessage, ReceiptRule, Template, +use ruststack_ses_model::{ + error::SesError, + input::{ + CloneReceiptRuleSetInput, CreateConfigurationSetEventDestinationInput, + CreateConfigurationSetInput, CreateReceiptRuleInput, CreateReceiptRuleSetInput, + CreateTemplateInput, DeleteConfigurationSetEventDestinationInput, + DeleteConfigurationSetInput, DeleteIdentityInput, DeleteIdentityPolicyInput, + DeleteReceiptRuleInput, DeleteReceiptRuleSetInput, DeleteTemplateInput, + DeleteVerifiedEmailAddressInput, DescribeActiveReceiptRuleSetInput, + DescribeConfigurationSetInput, DescribeReceiptRuleSetInput, GetIdentityDkimAttributesInput, + GetIdentityMailFromDomainAttributesInput, GetIdentityNotificationAttributesInput, + GetIdentityPoliciesInput, GetIdentityVerificationAttributesInput, GetTemplateInput, + ListConfigurationSetsInput, ListIdentitiesInput, ListIdentityPoliciesInput, + ListTemplatesInput, PutIdentityPolicyInput, SendEmailInput, SendRawEmailInput, + SendTemplatedEmailInput, SetActiveReceiptRuleSetInput, + SetIdentityFeedbackForwardingEnabledInput, SetIdentityMailFromDomainInput, + SetIdentityNotificationTopicInput, UpdateConfigurationSetEventDestinationInput, + UpdateTemplateInput, VerifyDomainDkimInput, VerifyDomainIdentityInput, + VerifyEmailAddressInput, VerifyEmailIdentityInput, + }, + operations::SesOperation, + types::{ + BehaviorOnMXFailure, Body, ConfigurationSet, Content, Destination, EventDestination, + IdentityType, Message, MessageTag, NotificationType, RawMessage, ReceiptRule, Template, + }, }; use crate::provider::RustStackSes; diff --git a/crates/ruststack-ses-core/src/provider.rs b/crates/ruststack-ses-core/src/provider.rs index 4826189..7aabd4f 100644 --- a/crates/ruststack-ses-core/src/provider.rs +++ b/crates/ruststack-ses-core/src/provider.rs @@ -9,61 +9,59 @@ use std::sync::Arc; -use tracing::debug; - -use ruststack_ses_model::error::{SesError, SesErrorCode}; -use ruststack_ses_model::input::{ - CloneReceiptRuleSetInput, CreateConfigurationSetEventDestinationInput, - CreateConfigurationSetInput, CreateReceiptRuleInput, CreateReceiptRuleSetInput, - CreateTemplateInput, DeleteConfigurationSetEventDestinationInput, DeleteConfigurationSetInput, - DeleteIdentityInput, DeleteIdentityPolicyInput, DeleteReceiptRuleInput, - DeleteReceiptRuleSetInput, DeleteTemplateInput, DeleteVerifiedEmailAddressInput, - DescribeActiveReceiptRuleSetInput, DescribeConfigurationSetInput, DescribeReceiptRuleSetInput, - GetIdentityDkimAttributesInput, GetIdentityMailFromDomainAttributesInput, - GetIdentityNotificationAttributesInput, GetIdentityPoliciesInput, - GetIdentityVerificationAttributesInput, GetTemplateInput, ListConfigurationSetsInput, - ListIdentitiesInput, ListIdentityPoliciesInput, ListTemplatesInput, PutIdentityPolicyInput, - SendEmailInput, SendRawEmailInput, SendTemplatedEmailInput, SetActiveReceiptRuleSetInput, - SetIdentityFeedbackForwardingEnabledInput, SetIdentityMailFromDomainInput, - SetIdentityNotificationTopicInput, UpdateConfigurationSetEventDestinationInput, - UpdateTemplateInput, VerifyDomainDkimInput, VerifyDomainIdentityInput, VerifyEmailAddressInput, - VerifyEmailIdentityInput, -}; -use ruststack_ses_model::output::{ - CloneReceiptRuleSetResponse, CreateConfigurationSetEventDestinationResponse, - CreateConfigurationSetResponse, CreateReceiptRuleResponse, CreateReceiptRuleSetResponse, - CreateTemplateResponse, DeleteConfigurationSetEventDestinationResponse, - DeleteConfigurationSetResponse, DeleteIdentityPolicyResponse, DeleteIdentityResponse, - DeleteReceiptRuleResponse, DeleteReceiptRuleSetResponse, DeleteTemplateResponse, - DescribeActiveReceiptRuleSetResponse, DescribeConfigurationSetResponse, - DescribeReceiptRuleSetResponse, GetIdentityDkimAttributesResponse, - GetIdentityMailFromDomainAttributesResponse, GetIdentityNotificationAttributesResponse, - GetIdentityPoliciesResponse, GetIdentityVerificationAttributesResponse, GetSendQuotaResponse, - GetSendStatisticsResponse, GetTemplateResponse, ListConfigurationSetsResponse, - ListIdentitiesResponse, ListIdentityPoliciesResponse, ListTemplatesResponse, - ListVerifiedEmailAddressesResponse, PutIdentityPolicyResponse, SendEmailResponse, - SendRawEmailResponse, SendTemplatedEmailResponse, SetActiveReceiptRuleSetResponse, - SetIdentityFeedbackForwardingEnabledResponse, SetIdentityMailFromDomainResponse, - SetIdentityNotificationTopicResponse, UpdateConfigurationSetEventDestinationResponse, - UpdateTemplateResponse, VerifyDomainDkimResponse, VerifyDomainIdentityResponse, - VerifyEmailIdentityResponse, +use ruststack_ses_model::{ + error::{SesError, SesErrorCode}, + input::{ + CloneReceiptRuleSetInput, CreateConfigurationSetEventDestinationInput, + CreateConfigurationSetInput, CreateReceiptRuleInput, CreateReceiptRuleSetInput, + CreateTemplateInput, DeleteConfigurationSetEventDestinationInput, + DeleteConfigurationSetInput, DeleteIdentityInput, DeleteIdentityPolicyInput, + DeleteReceiptRuleInput, DeleteReceiptRuleSetInput, DeleteTemplateInput, + DeleteVerifiedEmailAddressInput, DescribeActiveReceiptRuleSetInput, + DescribeConfigurationSetInput, DescribeReceiptRuleSetInput, GetIdentityDkimAttributesInput, + GetIdentityMailFromDomainAttributesInput, GetIdentityNotificationAttributesInput, + GetIdentityPoliciesInput, GetIdentityVerificationAttributesInput, GetTemplateInput, + ListConfigurationSetsInput, ListIdentitiesInput, ListIdentityPoliciesInput, + ListTemplatesInput, PutIdentityPolicyInput, SendEmailInput, SendRawEmailInput, + SendTemplatedEmailInput, SetActiveReceiptRuleSetInput, + SetIdentityFeedbackForwardingEnabledInput, SetIdentityMailFromDomainInput, + SetIdentityNotificationTopicInput, UpdateConfigurationSetEventDestinationInput, + UpdateTemplateInput, VerifyDomainDkimInput, VerifyDomainIdentityInput, + VerifyEmailAddressInput, VerifyEmailIdentityInput, + }, + output::{ + CloneReceiptRuleSetResponse, CreateConfigurationSetEventDestinationResponse, + CreateConfigurationSetResponse, CreateReceiptRuleResponse, CreateReceiptRuleSetResponse, + CreateTemplateResponse, DeleteConfigurationSetEventDestinationResponse, + DeleteConfigurationSetResponse, DeleteIdentityPolicyResponse, DeleteIdentityResponse, + DeleteReceiptRuleResponse, DeleteReceiptRuleSetResponse, DeleteTemplateResponse, + DescribeActiveReceiptRuleSetResponse, DescribeConfigurationSetResponse, + DescribeReceiptRuleSetResponse, GetIdentityDkimAttributesResponse, + GetIdentityMailFromDomainAttributesResponse, GetIdentityNotificationAttributesResponse, + GetIdentityPoliciesResponse, GetIdentityVerificationAttributesResponse, + GetSendQuotaResponse, GetSendStatisticsResponse, GetTemplateResponse, + ListConfigurationSetsResponse, ListIdentitiesResponse, ListIdentityPoliciesResponse, + ListTemplatesResponse, ListVerifiedEmailAddressesResponse, PutIdentityPolicyResponse, + SendEmailResponse, SendRawEmailResponse, SendTemplatedEmailResponse, + SetActiveReceiptRuleSetResponse, SetIdentityFeedbackForwardingEnabledResponse, + SetIdentityMailFromDomainResponse, SetIdentityNotificationTopicResponse, + UpdateConfigurationSetEventDestinationResponse, UpdateTemplateResponse, + VerifyDomainDkimResponse, VerifyDomainIdentityResponse, VerifyEmailIdentityResponse, + }, + types::{ConfigurationSet, IdentityType, MessageTag, ReceiptRuleSetMetadata, SendDataPoint}, }; -use ruststack_ses_model::types::{ - ConfigurationSet, IdentityType, ReceiptRuleSetMetadata, SendDataPoint, -}; - -use ruststack_ses_model::types::MessageTag; +use tracing::debug; -use crate::config::SesConfig; -use crate::config_set::ConfigurationSetStore; -use crate::identity::IdentityStore; -use crate::receipt_rule::ReceiptRuleSetStore; -use crate::retrospection::{ - EmailStore, SentEmail, SentEmailBody, SentEmailDestination, SentEmailTag, +use crate::{ + config::SesConfig, + config_set::ConfigurationSetStore, + identity::IdentityStore, + receipt_rule::ReceiptRuleSetStore, + retrospection::{EmailStore, SentEmail, SentEmailBody, SentEmailDestination, SentEmailTag}, + statistics::SendStatistics, + template::{TemplateStore, render_template}, + validation::validate_tags, }; -use crate::statistics::SendStatistics; -use crate::template::{TemplateStore, render_template}; -use crate::validation::validate_tags; /// Validate a slice of `MessageTag` values. /// @@ -231,8 +229,8 @@ impl RustStackSes { return Err(SesError::with_message( SesErrorCode::MessageRejected, format!( - "Email address is not verified. The following identities failed \ - the check in region {}: {}", + "Email address is not verified. The following identities failed the check in \ + region {}: {}", self.config.default_region, input.source ), )); @@ -288,8 +286,8 @@ impl RustStackSes { return Err(SesError::with_message( SesErrorCode::MessageRejected, format!( - "Email address is not verified. The following identities failed \ - the check in region {}: {source}", + "Email address is not verified. The following identities failed the check in \ + region {}: {source}", self.config.default_region ), )); @@ -411,8 +409,8 @@ impl RustStackSes { return Err(SesError::with_message( SesErrorCode::MessageRejected, format!( - "Email address is not verified. The following identities failed \ - the check in region {}: {}", + "Email address is not verified. The following identities failed the check in \ + region {}: {}", self.config.default_region, input.source ), )); diff --git a/crates/ruststack-ses-core/src/receipt_rule.rs b/crates/ruststack-ses-core/src/receipt_rule.rs index f7a300e..56e8295 100644 --- a/crates/ruststack-ses-core/src/receipt_rule.rs +++ b/crates/ruststack-ses-core/src/receipt_rule.rs @@ -4,11 +4,12 @@ //! incoming email in the local development emulator. This is for //! API compatibility only. -use dashmap::DashMap; -use dashmap::mapref::entry::Entry; +use dashmap::{DashMap, mapref::entry::Entry}; use parking_lot::RwLock; -use ruststack_ses_model::error::{SesError, SesErrorCode}; -use ruststack_ses_model::types::{ReceiptRule, ReceiptRuleSetMetadata}; +use ruststack_ses_model::{ + error::{SesError, SesErrorCode}, + types::{ReceiptRule, ReceiptRuleSetMetadata}, +}; /// Internal receipt rule set record. #[derive(Debug, Clone)] diff --git a/crates/ruststack-ses-core/src/template.rs b/crates/ruststack-ses-core/src/template.rs index c989ad7..f252290 100644 --- a/crates/ruststack-ses-core/src/template.rs +++ b/crates/ruststack-ses-core/src/template.rs @@ -3,10 +3,11 @@ //! Templates contain `{{variable}}` placeholders that are substituted //! with values from a JSON data object during rendering. -use dashmap::DashMap; -use dashmap::mapref::entry::Entry; -use ruststack_ses_model::error::{SesError, SesErrorCode}; -use ruststack_ses_model::types::{Template, TemplateMetadata}; +use dashmap::{DashMap, mapref::entry::Entry}; +use ruststack_ses_model::{ + error::{SesError, SesErrorCode}, + types::{Template, TemplateMetadata}, +}; /// Store for email templates. /// diff --git a/crates/ruststack-ses-http/src/body.rs b/crates/ruststack-ses-http/src/body.rs index 0be1326..c46b4d6 100644 --- a/crates/ruststack-ses-http/src/body.rs +++ b/crates/ruststack-ses-http/src/body.rs @@ -1,7 +1,9 @@ //! SES HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-ses-http/src/dispatch.rs b/crates/ruststack-ses-http/src/dispatch.rs index 93d2831..57e5b77 100644 --- a/crates/ruststack-ses-http/src/dispatch.rs +++ b/crates/ruststack-ses-http/src/dispatch.rs @@ -1,13 +1,9 @@ //! SES handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_ses_model::error::SesError; -use ruststack_ses_model::operations::SesOperation; +use ruststack_ses_model::{error::SesError, operations::SesOperation}; use crate::body::SesResponseBody; diff --git a/crates/ruststack-ses-http/src/response.rs b/crates/ruststack-ses-http/src/response.rs index 567f768..1895e07 100644 --- a/crates/ruststack-ses-http/src/response.rs +++ b/crates/ruststack-ses-http/src/response.rs @@ -56,14 +56,9 @@ pub fn json_response(json: String, status: http::StatusCode) -> http::Response String { format!( - "\ - \ - {}\ - {}\ - {}\ - \ - {}\ - ", + "{}{}{}{}", error.code.fault(), error.code.code(), xml_escape(&error.message), diff --git a/crates/ruststack-ses-http/src/router.rs b/crates/ruststack-ses-http/src/router.rs index 4b23df9..5339a48 100644 --- a/crates/ruststack-ses-http/src/router.rs +++ b/crates/ruststack-ses-http/src/router.rs @@ -4,8 +4,7 @@ //! `Content-Type: application/x-www-form-urlencoded`. The operation is //! specified by the `Action=` form parameter. -use ruststack_ses_model::error::SesError; -use ruststack_ses_model::operations::SesOperation; +use ruststack_ses_model::{error::SesError, operations::SesOperation}; /// Resolve an SES v1 action string to an `SesOperation`. /// diff --git a/crates/ruststack-ses-http/src/service.rs b/crates/ruststack-ses-http/src/service.rs index baef2bc..19e948d 100644 --- a/crates/ruststack-ses-http/src/service.rs +++ b/crates/ruststack-ses-http/src/service.rs @@ -3,22 +3,20 @@ //! SES v1 uses the `awsQuery` protocol (form-urlencoded request, XML response). //! SES v2 uses `restJson1` (JSON request/response, path-based routing). -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_ses_model::error::SesError; -use crate::body::SesResponseBody; -use crate::dispatch::{SesHandler, dispatch_operation}; -use crate::request::parse_form_params; -use crate::response::{XML_CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::SesResponseBody, + dispatch::{SesHandler, dispatch_operation}, + request::parse_form_params, + response::{XML_CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the SES HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-ses-http/src/v2/mod.rs b/crates/ruststack-ses-http/src/v2/mod.rs index ad78d84..e49dc69 100644 --- a/crates/ruststack-ses-http/src/v2/mod.rs +++ b/crates/ruststack-ses-http/src/v2/mod.rs @@ -2,20 +2,18 @@ //! //! SES v2 uses path-based routing under `/v2/email/` with JSON request/response bodies. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_ses_model::error::SesError; -use crate::body::SesResponseBody; -use crate::dispatch::SesHandler; -use crate::request::parse_query_params; -use crate::response::{JSON_CONTENT_TYPE, error_to_json_response, json_response}; +use crate::{ + body::SesResponseBody, + dispatch::SesHandler, + request::parse_query_params, + response::{JSON_CONTENT_TYPE, error_to_json_response, json_response}, +}; /// Hyper `Service` implementation for SES v2 (restJson1). #[derive(Debug)] diff --git a/crates/ruststack-sns-core/src/delivery.rs b/crates/ruststack-sns-core/src/delivery.rs index 27f7511..f32a691 100644 --- a/crates/ruststack-sns-core/src/delivery.rs +++ b/crates/ruststack-sns-core/src/delivery.rs @@ -5,9 +5,8 @@ use std::collections::HashMap; -use serde::Serialize; - use ruststack_sns_model::types::MessageAttributeValue; +use serde::Serialize; /// SNS message envelope sent to SQS queues and HTTP endpoints. /// diff --git a/crates/ruststack-sns-core/src/filter.rs b/crates/ruststack-sns-core/src/filter.rs index e0ca96a..c5b7d72 100644 --- a/crates/ruststack-sns-core/src/filter.rs +++ b/crates/ruststack-sns-core/src/filter.rs @@ -13,8 +13,7 @@ use std::collections::HashMap; -use ruststack_sns_model::error::SnsError; -use ruststack_sns_model::types::MessageAttributeValue; +use ruststack_sns_model::{error::SnsError, types::MessageAttributeValue}; use crate::subscription::FilterPolicyScope; @@ -389,15 +388,15 @@ fn evaluate_anything_but( pub fn resolve_protocol_message(message: &str, protocol: &str) -> Result { let parsed: serde_json::Value = serde_json::from_str(message).map_err(|_| { SnsError::invalid_parameter( - "Invalid parameter: Message Reason: \ - When MessageStructure is 'json', the message must be valid JSON", + "Invalid parameter: Message Reason: When MessageStructure is 'json', the message must \ + be valid JSON", ) })?; let obj = parsed.as_object().ok_or_else(|| { SnsError::invalid_parameter( - "Invalid parameter: Message Reason: \ - When MessageStructure is 'json', the message must be a JSON object", + "Invalid parameter: Message Reason: When MessageStructure is 'json', the message must \ + be a JSON object", ) })?; @@ -411,8 +410,8 @@ pub fn resolve_protocol_message(message: &str, protocol: &str) -> Result Result<&str, SnsError> { fn validate_publish_message(input: &PublishInput) -> Result<(), SnsError> { if input.message.len() > MAX_MESSAGE_SIZE { return Err(SnsError::invalid_parameter(format!( - "Invalid parameter: Message Reason: \ - Message must be shorter than {MAX_MESSAGE_SIZE} bytes" + "Invalid parameter: Message Reason: Message must be shorter than {MAX_MESSAGE_SIZE} \ + bytes" ))); } @@ -1734,15 +1740,15 @@ fn validate_publish_message(input: &PublishInput) -> Result<(), SnsError> { Ok(val) => { if val.get("default").is_none() { return Err(SnsError::invalid_parameter( - "Invalid parameter: Message Reason: \ - When MessageStructure is 'json', the message must contain a 'default' key", + "Invalid parameter: Message Reason: When MessageStructure is 'json', the \ + message must contain a 'default' key", )); } } Err(_) => { return Err(SnsError::invalid_parameter( - "Invalid parameter: Message Reason: \ - When MessageStructure is 'json', the message must be valid JSON", + "Invalid parameter: Message Reason: When MessageStructure is 'json', the \ + message must be valid JSON", )); } } @@ -1759,16 +1765,15 @@ fn validate_fifo_publish( if topic.is_fifo { if input.message_group_id.is_none() { return Err(SnsError::invalid_parameter( - "Invalid parameter: MessageGroupId Reason: \ - The MessageGroupId parameter is required for FIFO topics", + "Invalid parameter: MessageGroupId Reason: The MessageGroupId parameter is \ + required for FIFO topics", )); } if input.message_deduplication_id.is_none() && !topic.attributes.content_based_deduplication { return Err(SnsError::invalid_parameter( - "Invalid parameter: MessageDeduplicationId Reason: \ - The topic does not have ContentBasedDeduplication enabled, \ - so MessageDeduplicationId is required", + "Invalid parameter: MessageDeduplicationId Reason: The topic does not have \ + ContentBasedDeduplication enabled, so MessageDeduplicationId is required", )); } } @@ -1782,8 +1787,7 @@ fn validate_fifo_publish( fn validate_topic_name(name: &str) -> Result<(), SnsError> { if name.is_empty() || name.len() > 256 { return Err(SnsError::invalid_parameter( - "Invalid parameter: Name Reason: \ - Topic name must be between 1 and 256 characters", + "Invalid parameter: Name Reason: Topic name must be between 1 and 256 characters", )); } @@ -1797,8 +1801,7 @@ fn validate_topic_name(name: &str) -> Result<(), SnsError> { .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_'); if !valid { return Err(SnsError::invalid_parameter( - "Invalid parameter: Name Reason: \ - Topic name can only contain alphanumeric characters, \ + "Invalid parameter: Name Reason: Topic name can only contain alphanumeric characters, \ hyphens (-), and underscores (_)", )); } diff --git a/crates/ruststack-sns-core/src/subscription.rs b/crates/ruststack-sns-core/src/subscription.rs index 34d4d2c..873d3d4 100644 --- a/crates/ruststack-sns-core/src/subscription.rs +++ b/crates/ruststack-sns-core/src/subscription.rs @@ -1,7 +1,6 @@ //! Subscription record and attributes. -use std::collections::HashMap; -use std::fmt; +use std::{collections::HashMap, fmt}; use ruststack_sns_model::error::SnsError; diff --git a/crates/ruststack-sns-core/src/topic.rs b/crates/ruststack-sns-core/src/topic.rs index c5eefc5..ad0331f 100644 --- a/crates/ruststack-sns-core/src/topic.rs +++ b/crates/ruststack-sns-core/src/topic.rs @@ -1,8 +1,10 @@ //! Topic record and attributes. -use std::collections::HashMap; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::time::Instant; +use std::{ + collections::HashMap, + sync::atomic::{AtomicU64, Ordering}, + time::Instant, +}; use crate::subscription::SubscriptionRecord; diff --git a/crates/ruststack-sns-http/src/body.rs b/crates/ruststack-sns-http/src/body.rs index c52cf6a..fddfe17 100644 --- a/crates/ruststack-sns-http/src/body.rs +++ b/crates/ruststack-sns-http/src/body.rs @@ -1,7 +1,9 @@ //! SNS HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-sns-http/src/dispatch.rs b/crates/ruststack-sns-http/src/dispatch.rs index 1a8c72a..41e9bb2 100644 --- a/crates/ruststack-sns-http/src/dispatch.rs +++ b/crates/ruststack-sns-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! SNS handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_sns_model::error::SnsError; -use ruststack_sns_model::operations::SnsOperation; +use ruststack_sns_model::{error::SnsError, operations::SnsOperation}; use crate::body::SnsResponseBody; diff --git a/crates/ruststack-sns-http/src/request.rs b/crates/ruststack-sns-http/src/request.rs index ebf15f6..33c225d 100644 --- a/crates/ruststack-sns-http/src/request.rs +++ b/crates/ruststack-sns-http/src/request.rs @@ -5,8 +5,10 @@ use std::collections::HashMap; -use ruststack_sns_model::error::SnsError; -use ruststack_sns_model::types::{MessageAttributeValue, PublishBatchRequestEntry, Tag}; +use ruststack_sns_model::{ + error::SnsError, + types::{MessageAttributeValue, PublishBatchRequestEntry, Tag}, +}; /// Parse a URL-encoded body into a list of key-value pairs. #[must_use] diff --git a/crates/ruststack-sns-http/src/response.rs b/crates/ruststack-sns-http/src/response.rs index a49a539..29b37c5 100644 --- a/crates/ruststack-sns-http/src/response.rs +++ b/crates/ruststack-sns-http/src/response.rs @@ -53,14 +53,9 @@ pub fn xml_response(xml: String, request_id: &str) -> http::Response String { format!( - "\ - \ - {}\ - {}\ - {}\ - \ - {}\ - ", + "{}{}{}{}", error.code.fault(), error.code.code(), xml_escape(&error.message), diff --git a/crates/ruststack-sns-http/src/router.rs b/crates/ruststack-sns-http/src/router.rs index abe5e83..2c8ab4c 100644 --- a/crates/ruststack-sns-http/src/router.rs +++ b/crates/ruststack-sns-http/src/router.rs @@ -4,8 +4,7 @@ //! `Content-Type: application/x-www-form-urlencoded`. The operation is //! specified by the `Action=` form parameter. -use ruststack_sns_model::error::SnsError; -use ruststack_sns_model::operations::SnsOperation; +use ruststack_sns_model::{error::SnsError, operations::SnsOperation}; /// Resolve an SNS operation from parsed form parameters. /// diff --git a/crates/ruststack-sns-http/src/service.rs b/crates/ruststack-sns-http/src/service.rs index 2472d00..830a614 100644 --- a/crates/ruststack-sns-http/src/service.rs +++ b/crates/ruststack-sns-http/src/service.rs @@ -5,22 +5,20 @@ //! Unlike SQS/SSM (which use `X-Amz-Target` header), SNS uses the //! `Action=` form parameter for operation routing. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_sns_model::error::SnsError; -use crate::body::SnsResponseBody; -use crate::dispatch::{SnsHandler, dispatch_operation}; -use crate::request::parse_form_params; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::SnsResponseBody, + dispatch::{SnsHandler, dispatch_operation}, + request::parse_form_params, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the SNS HTTP service. #[derive(Clone)] diff --git a/crates/ruststack-sqs-core/src/handler.rs b/crates/ruststack-sqs-core/src/handler.rs index 1bda320..8cc304d 100644 --- a/crates/ruststack-sqs-core/src/handler.rs +++ b/crates/ruststack-sqs-core/src/handler.rs @@ -1,16 +1,10 @@ //! SQS handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_sqs_http::body::SqsResponseBody; -use ruststack_sqs_http::dispatch::SqsHandler; -use ruststack_sqs_http::response::json_response; -use ruststack_sqs_model::error::SqsError; -use ruststack_sqs_model::operations::SqsOperation; +use ruststack_sqs_http::{body::SqsResponseBody, dispatch::SqsHandler, response::json_response}; +use ruststack_sqs_model::{error::SqsError, operations::SqsOperation}; use crate::provider::RustStackSqs; diff --git a/crates/ruststack-sqs-core/src/provider.rs b/crates/ruststack-sqs-core/src/provider.rs index 65900b7..0647377 100644 --- a/crates/ruststack-sqs-core/src/provider.rs +++ b/crates/ruststack-sqs-core/src/provider.rs @@ -3,38 +3,44 @@ //! Acts as the queue manager that owns all queue actors, creating and //! destroying them as queues are created and deleted. -use std::collections::HashMap; -use std::sync::Arc; -use std::sync::atomic::AtomicBool; +use std::{ + collections::HashMap, + sync::{Arc, atomic::AtomicBool}, +}; -use dashmap::DashMap; -use dashmap::mapref::multiple::RefMulti; +use dashmap::{DashMap, mapref::multiple::RefMulti}; +use ruststack_sqs_model::{ + error::SqsError, + input::{ + AddPermissionInput, CancelMessageMoveTaskInput, ChangeMessageVisibilityBatchInput, + ChangeMessageVisibilityInput, CreateQueueInput, DeleteMessageBatchInput, + DeleteMessageInput, DeleteQueueInput, GetQueueAttributesInput, GetQueueUrlInput, + ListDeadLetterSourceQueuesInput, ListMessageMoveTasksInput, ListQueueTagsInput, + ListQueuesInput, PurgeQueueInput, ReceiveMessageInput, RemovePermissionInput, + SendMessageBatchInput, SendMessageInput, SetQueueAttributesInput, + StartMessageMoveTaskInput, TagQueueInput, UntagQueueInput, + }, + output::{ + AddPermissionOutput, CancelMessageMoveTaskOutput, ChangeMessageVisibilityBatchOutput, + ChangeMessageVisibilityOutput, CreateQueueOutput, DeleteMessageBatchOutput, + DeleteMessageOutput, DeleteQueueOutput, GetQueueAttributesOutput, GetQueueUrlOutput, + ListDeadLetterSourceQueuesOutput, ListMessageMoveTasksOutput, ListQueueTagsOutput, + ListQueuesOutput, PurgeQueueOutput, ReceiveMessageOutput, RemovePermissionOutput, + SendMessageBatchOutput, SendMessageOutput, SetQueueAttributesOutput, + StartMessageMoveTaskOutput, TagQueueOutput, UntagQueueOutput, + }, +}; use tokio::sync::{Notify, mpsc}; -use ruststack_sqs_model::error::SqsError; -use ruststack_sqs_model::input::{ - AddPermissionInput, CancelMessageMoveTaskInput, ChangeMessageVisibilityBatchInput, - ChangeMessageVisibilityInput, CreateQueueInput, DeleteMessageBatchInput, DeleteMessageInput, - DeleteQueueInput, GetQueueAttributesInput, GetQueueUrlInput, ListDeadLetterSourceQueuesInput, - ListMessageMoveTasksInput, ListQueueTagsInput, ListQueuesInput, PurgeQueueInput, - ReceiveMessageInput, RemovePermissionInput, SendMessageBatchInput, SendMessageInput, - SetQueueAttributesInput, StartMessageMoveTaskInput, TagQueueInput, UntagQueueInput, +use crate::{ + config::SqsConfig, + message::now_epoch_seconds, + queue::{ + actor::{QueueActor, QueueHandle, QueueMetadata}, + attributes::QueueAttributes, + url::{extract_queue_name, queue_arn, queue_url}, + }, }; -use ruststack_sqs_model::output::{ - AddPermissionOutput, CancelMessageMoveTaskOutput, ChangeMessageVisibilityBatchOutput, - ChangeMessageVisibilityOutput, CreateQueueOutput, DeleteMessageBatchOutput, - DeleteMessageOutput, DeleteQueueOutput, GetQueueAttributesOutput, GetQueueUrlOutput, - ListDeadLetterSourceQueuesOutput, ListMessageMoveTasksOutput, ListQueueTagsOutput, - ListQueuesOutput, PurgeQueueOutput, ReceiveMessageOutput, RemovePermissionOutput, - SendMessageBatchOutput, SendMessageOutput, SetQueueAttributesOutput, - StartMessageMoveTaskOutput, TagQueueOutput, UntagQueueOutput, -}; - -use crate::config::SqsConfig; -use crate::message::now_epoch_seconds; -use crate::queue::actor::{QueueActor, QueueHandle, QueueMetadata}; -use crate::queue::attributes::QueueAttributes; -use crate::queue::url::{extract_queue_name, queue_arn, queue_url}; /// Main SQS provider. Acts as the queue manager that owns all queue actors. #[derive(Debug)] @@ -61,7 +67,8 @@ impl RustStackSqs { .map(String::from) .ok_or_else(|| { SqsError::non_existent_queue(format!( - "The specified queue does not exist for this wsdl version. QueueUrl: {queue_url_str}" + "The specified queue does not exist for this wsdl version. QueueUrl: \ + {queue_url_str}" )) }) } diff --git a/crates/ruststack-sqs-core/src/queue/actor.rs b/crates/ruststack-sqs-core/src/queue/actor.rs index 1d0ca95..354be09 100644 --- a/crates/ruststack-sqs-core/src/queue/actor.rs +++ b/crates/ruststack-sqs-core/src/queue/actor.rs @@ -4,27 +4,35 @@ //! communicates via a `tokio::sync::mpsc` channel. The actor supports both //! standard and FIFO queue types. -use std::collections::HashMap; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::time::Duration; - -use tokio::sync::{Notify, mpsc, oneshot}; -use tokio::time::Instant; +use std::{ + collections::HashMap, + sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + }, + time::Duration, +}; -use ruststack_sqs_model::error::SqsError; -use ruststack_sqs_model::input::{ReceiveMessageInput, SendMessageInput}; -use ruststack_sqs_model::output::{ReceiveMessageOutput, SendMessageOutput}; -use ruststack_sqs_model::types::Message; +use ruststack_sqs_model::{ + error::SqsError, + input::{ReceiveMessageInput, SendMessageInput}, + output::{ReceiveMessageOutput, SendMessageOutput}, + types::Message, +}; +use tokio::{ + sync::{Notify, mpsc, oneshot}, + time::Instant, +}; +use super::{ + attributes::QueueAttributes, + storage::{EnqueueResult, FifoQueueStorage, StandardQueueStorage}, +}; use crate::message::{ InFlightMessage, QueueMessage, generate_receipt_handle, md5_of_body, md5_of_message_attributes, now_epoch_millis, }; -use super::attributes::QueueAttributes; -use super::storage::{EnqueueResult, FifoQueueStorage, StandardQueueStorage}; - /// Commands sent to a queue actor via its channel. pub enum QueueCommand { /// Send a message to the queue. @@ -349,7 +357,8 @@ impl QueueActor { let body_bytes = input.message_body.len(); if body_bytes > self.attributes.maximum_message_size as usize { return Err(SqsError::invalid_parameter_value(format!( - "One or more parameters are invalid. Reason: Message must be shorter than {} bytes.", + "One or more parameters are invalid. Reason: Message must be shorter than {} \ + bytes.", self.attributes.maximum_message_size ))); } @@ -377,14 +386,14 @@ impl QueueActor { // Reject FIFO-only fields on standard queues. if input.message_group_id.is_some() { return Err(SqsError::invalid_parameter_value( - "Value for parameter MessageGroupId is invalid. \ - Reason: The request includes a parameter that is not valid for this queue type.", + "Value for parameter MessageGroupId is invalid. Reason: The request includes a \ + parameter that is not valid for this queue type.", )); } if input.message_deduplication_id.is_some() { return Err(SqsError::invalid_parameter_value( - "Value for parameter MessageDeduplicationId is invalid. \ - Reason: The request includes a parameter that is not valid for this queue type.", + "Value for parameter MessageDeduplicationId is invalid. Reason: The request \ + includes a parameter that is not valid for this queue type.", )); } @@ -448,8 +457,8 @@ impl QueueActor { // FIFO queues do not support per-message delay. if input.delay_seconds.is_some_and(|d| d > 0) { return Err(SqsError::invalid_parameter_value( - "Value 0 for parameter DelaySeconds is invalid. Reason: \ - The request includes a parameter that is not valid for this queue type.", + "Value 0 for parameter DelaySeconds is invalid. Reason: The request includes a \ + parameter that is not valid for this queue type.", )); } @@ -468,8 +477,8 @@ impl QueueActor { hex::encode(hash) } else { return Err(SqsError::invalid_parameter_value( - "The queue should either have ContentBasedDeduplication enabled \ - or MessageDeduplicationId provided explicitly.", + "The queue should either have ContentBasedDeduplication enabled or \ + MessageDeduplicationId provided explicitly.", )); }; diff --git a/crates/ruststack-sqs-core/src/queue/attributes.rs b/crates/ruststack-sqs-core/src/queue/attributes.rs index fd1fd06..cdfc445 100644 --- a/crates/ruststack-sqs-core/src/queue/attributes.rs +++ b/crates/ruststack-sqs-core/src/queue/attributes.rs @@ -2,8 +2,7 @@ use std::collections::HashMap; -use ruststack_sqs_model::error::SqsError; -use ruststack_sqs_model::types::RedrivePolicy; +use ruststack_sqs_model::{error::SqsError, types::RedrivePolicy}; /// Queue attributes with validated values and defaults. #[derive(Debug, Clone)] @@ -90,7 +89,8 @@ impl QueueAttributes { })?; if policy.max_receive_count < 1 { return Err(SqsError::invalid_parameter_value( - "Value for parameter RedrivePolicy is invalid. Reason: maxReceiveCount must be between 1 and 1000.", + "Value for parameter RedrivePolicy is invalid. Reason: \ + maxReceiveCount must be between 1 and 1000.", )); } result.redrive_policy = Some(policy); diff --git a/crates/ruststack-sqs-core/src/queue/storage.rs b/crates/ruststack-sqs-core/src/queue/storage.rs index 0642ae9..d5b483a 100644 --- a/crates/ruststack-sqs-core/src/queue/storage.rs +++ b/crates/ruststack-sqs-core/src/queue/storage.rs @@ -1,7 +1,9 @@ //! Queue message storage for both standard and FIFO queues. -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + sync::atomic::{AtomicU64, Ordering}, +}; use tokio::time::Instant; diff --git a/crates/ruststack-sqs-http/src/body.rs b/crates/ruststack-sqs-http/src/body.rs index 690b707..af503ed 100644 --- a/crates/ruststack-sqs-http/src/body.rs +++ b/crates/ruststack-sqs-http/src/body.rs @@ -1,7 +1,9 @@ //! SQS HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-sqs-http/src/dispatch.rs b/crates/ruststack-sqs-http/src/dispatch.rs index dc85752..d5d6454 100644 --- a/crates/ruststack-sqs-http/src/dispatch.rs +++ b/crates/ruststack-sqs-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! SQS handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_sqs_model::error::SqsError; -use ruststack_sqs_model::operations::SqsOperation; +use ruststack_sqs_model::{error::SqsError, operations::SqsOperation}; use crate::body::SqsResponseBody; diff --git a/crates/ruststack-sqs-http/src/response.rs b/crates/ruststack-sqs-http/src/response.rs index f5683ba..2e2dc96 100644 --- a/crates/ruststack-sqs-http/src/response.rs +++ b/crates/ruststack-sqs-http/src/response.rs @@ -72,9 +72,10 @@ pub fn json_response(json: Vec, request_id: &str) -> http::Response Result<(), SsmError return Err(SsmError::with_message( SsmErrorCode::InvalidFilterOption, format!( - "The filter option '{opt}' is not valid for key '{key}'. \ - Valid options: Equals." + "The filter option '{opt}' is not valid for key '{key}'. Valid options: \ + Equals." ), )); } @@ -72,8 +74,8 @@ fn validate_single_filter(filter: &ParameterStringFilter) -> Result<(), SsmError return Err(SsmError::with_message( SsmErrorCode::InvalidFilterKey, format!( - "The filter key '{key}' is not valid. \ - Valid filter keys: Name, Type, KeyId, Path, Tier, DataType, Label, tag:." + "The filter key '{key}' is not valid. Valid filter keys: Name, Type, KeyId, Path, \ + Tier, DataType, Label, tag:." ), )); } @@ -90,8 +92,8 @@ fn validate_single_filter(filter: &ParameterStringFilter) -> Result<(), SsmError return Err(SsmError::with_message( SsmErrorCode::InvalidFilterOption, format!( - "The filter option '{opt}' is not valid for key '{key}'. \ - Valid options: {valid_options:?}." + "The filter option '{opt}' is not valid for key '{key}'. Valid options: \ + {valid_options:?}." ), )); } diff --git a/crates/ruststack-ssm-core/src/handler.rs b/crates/ruststack-ssm-core/src/handler.rs index 9cb7353..11f5e2d 100644 --- a/crates/ruststack-ssm-core/src/handler.rs +++ b/crates/ruststack-ssm-core/src/handler.rs @@ -1,16 +1,10 @@ //! SSM handler implementation bridging HTTP to business logic. -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; - -use ruststack_ssm_http::body::SsmResponseBody; -use ruststack_ssm_http::dispatch::SsmHandler; -use ruststack_ssm_http::response::json_response; -use ruststack_ssm_model::error::SsmError; -use ruststack_ssm_model::operations::SsmOperation; +use ruststack_ssm_http::{body::SsmResponseBody, dispatch::SsmHandler, response::json_response}; +use ruststack_ssm_model::{error::SsmError, operations::SsmOperation}; use crate::provider::RustStackSsm; diff --git a/crates/ruststack-ssm-core/src/provider.rs b/crates/ruststack-ssm-core/src/provider.rs index 5d08b95..ad85aea 100644 --- a/crates/ruststack-ssm-core/src/provider.rs +++ b/crates/ruststack-ssm-core/src/provider.rs @@ -1,28 +1,33 @@ //! SSM provider implementing Phase 0, Phase 1, and Phase 2 operations. -use ruststack_ssm_model::error::{SsmError, SsmErrorCode}; -use ruststack_ssm_model::input::{ - AddTagsToResourceInput, DeleteParameterInput, DeleteParametersInput, DescribeParametersInput, - GetParameterHistoryInput, GetParameterInput, GetParametersByPathInput, GetParametersInput, - LabelParameterVersionInput, ListTagsForResourceInput, PutParameterInput, - RemoveTagsFromResourceInput, UnlabelParameterVersionInput, +use ruststack_ssm_model::{ + error::{SsmError, SsmErrorCode}, + input::{ + AddTagsToResourceInput, DeleteParameterInput, DeleteParametersInput, + DescribeParametersInput, GetParameterHistoryInput, GetParameterInput, + GetParametersByPathInput, GetParametersInput, LabelParameterVersionInput, + ListTagsForResourceInput, PutParameterInput, RemoveTagsFromResourceInput, + UnlabelParameterVersionInput, + }, + output::{ + AddTagsToResourceOutput, DeleteParameterOutput, DeleteParametersOutput, + DescribeParametersOutput, GetParameterHistoryOutput, GetParameterOutput, + GetParametersByPathOutput, GetParametersOutput, LabelParameterVersionOutput, + ListTagsForResourceOutput, PutParameterOutput, RemoveTagsFromResourceOutput, + UnlabelParameterVersionOutput, + }, + types::ParameterTier, }; -use ruststack_ssm_model::output::{ - AddTagsToResourceOutput, DeleteParameterOutput, DeleteParametersOutput, - DescribeParametersOutput, GetParameterHistoryOutput, GetParameterOutput, - GetParametersByPathOutput, GetParametersOutput, LabelParameterVersionOutput, - ListTagsForResourceOutput, PutParameterOutput, RemoveTagsFromResourceOutput, - UnlabelParameterVersionOutput, -}; -use ruststack_ssm_model::types::ParameterTier; - -use crate::config::SsmConfig; -use crate::filter::validate_filters; -use crate::selector::parse_name_with_selector; -use crate::storage::ParameterStore; -use crate::validation::{ - MAX_BATCH_SIZE, parse_parameter_type, parse_tier, validate_allowed_pattern, - validate_description, validate_name, validate_tags, validate_value, + +use crate::{ + config::SsmConfig, + filter::validate_filters, + selector::parse_name_with_selector, + storage::ParameterStore, + validation::{ + MAX_BATCH_SIZE, parse_parameter_type, parse_tier, validate_allowed_pattern, + validate_description, validate_name, validate_tags, validate_value, + }, }; /// Default max results for `GetParametersByPath`. @@ -359,8 +364,8 @@ fn validate_resource_type(resource_type: &str) -> Result<(), SsmError> { return Err(SsmError::with_message( SsmErrorCode::InvalidResourceType, format!( - "The resource type '{resource_type}' is not valid. \ - Valid resource types: Parameter." + "The resource type '{resource_type}' is not valid. Valid resource types: \ + Parameter." ), )); } diff --git a/crates/ruststack-ssm-core/src/storage.rs b/crates/ruststack-ssm-core/src/storage.rs index a73e0df..a8338ac 100644 --- a/crates/ruststack-ssm-core/src/storage.rs +++ b/crates/ruststack-ssm-core/src/storage.rs @@ -7,17 +7,19 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use dashmap::DashMap; - -use ruststack_ssm_model::error::{SsmError, SsmErrorCode}; -use ruststack_ssm_model::types::{ - Parameter, ParameterHistory, ParameterInlinePolicy, ParameterMetadata, ParameterTier, - ParameterType, Tag, +use ruststack_ssm_model::{ + error::{SsmError, SsmErrorCode}, + types::{ + Parameter, ParameterHistory, ParameterInlinePolicy, ParameterMetadata, ParameterTier, + ParameterType, Tag, + }, }; -use crate::filter::matches_filters; - -use crate::selector::ParameterSelector; -use crate::validation::{MAX_LABELS_PER_VERSION, MAX_VERSIONS, is_valid_label}; +use crate::{ + filter::matches_filters, + selector::ParameterSelector, + validation::{MAX_LABELS_PER_VERSION, MAX_VERSIONS, is_valid_label}, +}; /// A snapshot of a single parameter version. #[derive(Debug, Clone)] @@ -133,8 +135,8 @@ impl ParameterStore { return Err(SsmError::with_message( ruststack_ssm_model::error::SsmErrorCode::ParameterMaxVersionLimitExceeded, format!( - "Parameter {name} has reached the maximum number of \ - {MAX_VERSIONS} versions." + "Parameter {name} has reached the maximum number of {MAX_VERSIONS} \ + versions." ), )); } @@ -545,8 +547,8 @@ impl ParameterStore { return Err(SsmError::with_message( SsmErrorCode::ParameterVersionNotFound, format!( - "Systems Manager could not find version {target_version} of {name}. \ - Verify the version and try again." + "Systems Manager could not find version {target_version} of {name}. Verify \ + the version and try again." ), )); } @@ -577,8 +579,8 @@ impl ParameterStore { return Err(SsmError::with_message( SsmErrorCode::ParameterVersionLabelLimitExceeded, format!( - "A parameter version can have a maximum of {MAX_LABELS_PER_VERSION} labels. \ - Move one or more labels to a different version and try again." + "A parameter version can have a maximum of {MAX_LABELS_PER_VERSION} \ + labels. Move one or more labels to a different version and try again." ), )); } @@ -627,8 +629,8 @@ impl ParameterStore { SsmError::with_message( SsmErrorCode::ParameterVersionNotFound, format!( - "Systems Manager could not find version {version} of {name}. \ - Verify the version and try again." + "Systems Manager could not find version {version} of {name}. Verify the \ + version and try again." ), ) })?; diff --git a/crates/ruststack-ssm-core/src/validation.rs b/crates/ruststack-ssm-core/src/validation.rs index b99c83f..ced904a 100644 --- a/crates/ruststack-ssm-core/src/validation.rs +++ b/crates/ruststack-ssm-core/src/validation.rs @@ -3,8 +3,10 @@ //! Implements AWS SSM validation constraints for parameter names, values, //! descriptions, hierarchy depth, and allowed patterns. -use ruststack_ssm_model::error::{SsmError, SsmErrorCode}; -use ruststack_ssm_model::types::ParameterTier; +use ruststack_ssm_model::{ + error::{SsmError, SsmErrorCode}, + types::ParameterTier, +}; /// Maximum parameter name length. const MAX_NAME_LENGTH: usize = 2048; @@ -50,8 +52,8 @@ pub fn validate_name(name: &str) -> Result<(), SsmError> { .all(|c| c.is_ascii_alphanumeric() || "_./-".contains(c)) { return Err(SsmError::validation(format!( - "Parameter name '{name}' contains invalid characters. \ - Only [a-zA-Z0-9_./-] are allowed." + "Parameter name '{name}' contains invalid characters. Only [a-zA-Z0-9_./-] are \ + allowed." ))); } @@ -61,8 +63,8 @@ pub fn validate_name(name: &str) -> Result<(), SsmError> { let check_name = lower.trim_start_matches('/'); if check_name.starts_with("aws") || check_name.starts_with("ssm") { return Err(SsmError::validation(format!( - "Parameter name '{name}' is not allowed. \ - Names beginning with 'aws' or 'ssm' (case-insensitive) are reserved." + "Parameter name '{name}' is not allowed. Names beginning with 'aws' or 'ssm' \ + (case-insensitive) are reserved." ))); } @@ -72,8 +74,8 @@ pub fn validate_name(name: &str) -> Result<(), SsmError> { return Err(SsmError::with_message( SsmErrorCode::HierarchyLevelLimitExceeded, format!( - "Parameter name '{name}' exceeds the maximum hierarchy depth \ - of {MAX_HIERARCHY_DEPTH} levels." + "Parameter name '{name}' exceeds the maximum hierarchy depth of \ + {MAX_HIERARCHY_DEPTH} levels." ), )); } @@ -90,8 +92,7 @@ pub fn validate_value(value: &str, tier: &ParameterTier) -> Result<(), SsmError> if value.len() > max_size { return Err(SsmError::validation(format!( - "Parameter value exceeds the maximum size of {max_size} bytes for \ - {tier} tier." + "Parameter value exceeds the maximum size of {max_size} bytes for {tier} tier." ))); } diff --git a/crates/ruststack-ssm-http/src/body.rs b/crates/ruststack-ssm-http/src/body.rs index 0d6414c..bf21654 100644 --- a/crates/ruststack-ssm-http/src/body.rs +++ b/crates/ruststack-ssm-http/src/body.rs @@ -1,7 +1,9 @@ //! SSM HTTP response body type. -use std::pin::Pin; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; use bytes::Bytes; use http_body_util::Full; diff --git a/crates/ruststack-ssm-http/src/dispatch.rs b/crates/ruststack-ssm-http/src/dispatch.rs index d300273..9fbdef1 100644 --- a/crates/ruststack-ssm-http/src/dispatch.rs +++ b/crates/ruststack-ssm-http/src/dispatch.rs @@ -1,12 +1,9 @@ //! SSM handler trait and operation dispatch. -use std::future::Future; -use std::pin::Pin; +use std::{future::Future, pin::Pin}; use bytes::Bytes; - -use ruststack_ssm_model::error::SsmError; -use ruststack_ssm_model::operations::SsmOperation; +use ruststack_ssm_model::{error::SsmError, operations::SsmOperation}; use crate::body::SsmResponseBody; diff --git a/crates/ruststack-ssm-http/src/response.rs b/crates/ruststack-ssm-http/src/response.rs index c35a68a..23c4177 100644 --- a/crates/ruststack-ssm-http/src/response.rs +++ b/crates/ruststack-ssm-http/src/response.rs @@ -72,9 +72,10 @@ pub fn json_response(json: Vec, request_id: &str) -> http::Response http::Response String { format!( - "\ - \ - {}\ - {}\ - {}\ - \ - {}\ - ", + "{}{}{}{}", error.code.fault(), error.code.code(), xml_escape(&error.message), diff --git a/crates/ruststack-sts-http/src/router.rs b/crates/ruststack-sts-http/src/router.rs index 7f17138..7039927 100644 --- a/crates/ruststack-sts-http/src/router.rs +++ b/crates/ruststack-sts-http/src/router.rs @@ -4,8 +4,7 @@ //! `Content-Type: application/x-www-form-urlencoded`. The operation is //! specified by the `Action=` form parameter. -use ruststack_sts_model::error::StsError; -use ruststack_sts_model::operations::StsOperation; +use ruststack_sts_model::{error::StsError, operations::StsOperation}; /// Resolve an STS operation from parsed form parameters. /// diff --git a/crates/ruststack-sts-http/src/service.rs b/crates/ruststack-sts-http/src/service.rs index a1adeea..93e8685 100644 --- a/crates/ruststack-sts-http/src/service.rs +++ b/crates/ruststack-sts-http/src/service.rs @@ -4,22 +4,20 @@ //! `application/x-www-form-urlencoded` and the response is `text/xml`. //! The `Action=` form parameter routes to the appropriate operation. -use std::convert::Infallible; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; +use std::{convert::Infallible, future::Future, pin::Pin, sync::Arc}; use bytes::Bytes; use http_body_util::BodyExt; use hyper::body::Incoming; - use ruststack_sts_model::error::StsError; -use crate::body::StsResponseBody; -use crate::dispatch::{StsHandler, dispatch_operation}; -use crate::request::{extract_access_key_from_auth, parse_form_params}; -use crate::response::{CONTENT_TYPE, error_to_response}; -use crate::router::resolve_operation; +use crate::{ + body::StsResponseBody, + dispatch::{StsHandler, dispatch_operation}, + request::{extract_access_key_from_auth, parse_form_params}, + response::{CONTENT_TYPE, error_to_response}, + router::resolve_operation, +}; /// Configuration for the STS HTTP service. #[derive(Clone)] diff --git a/specs/README.md b/specs/README.md index 882747c..52d9f5d 100644 --- a/specs/README.md +++ b/specs/README.md @@ -23,3 +23,4 @@ All specs that for AI to generate code. | [s3-checksum-parity-design](./s3-checksum-parity-design.md) | Design | Draft | S3 checksum parity -- CRC64NVME, ChecksumMode gating, aws-chunked trailing headers, multipart checksum combination, upload validation | | [ruststack-ses-design](./ruststack-ses-design.md) | Design | Draft | Native Rust SES implementation -- ~30 v1 operations (awsQuery) + ~10 v2 operations (restJson1), email retrospection endpoint, template rendering, identity management | | [smithy-codegen-all-services-design](./smithy-codegen-all-services-design.md) | Design | Draft | Universal Smithy codegen -- extend S3-only codegen to all 7+ services with TOML configs, protocol-aware serde, error extraction, overlay system | +| [service-operations-gap-impl-plan](./service-operations-gap-impl-plan.md) | Design | Draft | Operations gap analysis and implementation plan -- 44 Tier 1 ops (DynamoDB transactions/TTL/tagging, Lambda layers/ESM, IAM OIDC), 56 Tier 2 ops, phased delivery across 6 phases | diff --git a/specs/service-operations-gap-impl-plan.md b/specs/service-operations-gap-impl-plan.md new file mode 100644 index 0000000..cc7c0ed --- /dev/null +++ b/specs/service-operations-gap-impl-plan.md @@ -0,0 +1,1061 @@ +# RustStack Service Operations Gap: Implementation Plan + +**Date:** 2026-03-26 +**Status:** Draft +**Depends on:** All existing service design specs +**Scope:** Concrete plan to close the operations gap between RustStack (17 services, ~586 ops) and LocalStack (34+ services, ~2,300+ ops) for shared services, based on analysis of which missing operations matter for local development. + +--- + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Methodology](#2-methodology) +3. [Gap Analysis by Service](#3-gap-analysis-by-service) +4. [Priority Tiers](#4-priority-tiers) +5. [Tier 1: Must-Have Operations](#5-tier-1-must-have-operations) +6. [Tier 2: Should-Have Operations](#6-tier-2-should-have-operations) +7. [Tier 3: Nice-to-Have Operations](#7-tier-3-nice-to-have-operations) +8. [Tier 4: Skip](#8-tier-4-skip) +9. [Phased Implementation Plan](#9-phased-implementation-plan) +10. [Detailed Design: DynamoDB Transactions](#10-detailed-design-dynamodb-transactions) +11. [Detailed Design: Lambda Layers & Event Source Mappings](#11-detailed-design-lambda-layers--event-source-mappings) +12. [Detailed Design: DynamoDB TTL & Tagging](#12-detailed-design-dynamodb-ttl--tagging) +13. [Cross-Cutting Concerns](#13-cross-cutting-concerns) +14. [Testing Strategy](#14-testing-strategy) +15. [Risk Analysis](#15-risk-analysis) +16. [Success Metrics](#16-success-metrics) + +--- + +## 1. Executive Summary + +RustStack implements 17 AWS services with ~586 operations. LocalStack implements 34+ services with ~2,300+ operations. For the 17 shared services, RustStack covers ~586 of LocalStack's ~1,100 operations (53%). + +This spec proposes closing the gap strategically — not by implementing every operation, but by categorizing the ~514 missing operations into priority tiers based on real-world local development usage. The core finding: + +- **Tier 1 (Must-Have):** 44 operations across 4 services. These are operations that real applications call frequently, and their absence forces users back to LocalStack or AWS. Primarily: DynamoDB transactions + TTL + tagging, Lambda layers + event source mappings, and IAM OIDC providers. +- **Tier 2 (Should-Have):** 56 operations across 7 services. Important for completeness when testing IaC workflows (Terraform/CDK), but applications can work without them. Primarily: DynamoDB PartiQL, Lambda concurrency configs, S3 replication + analytics, KMS key import, SSM documents. +- **Tier 3 (Nice-to-Have):** ~80 operations. Advanced features with niche usage — IAM MFA/SAML, SES bounce handling, CloudWatch Logs anomaly detection. +- **Tier 4 (Skip):** ~334 operations. Production infrastructure concerns with zero value in local emulation — SSM patch management, IAM Organizations, DynamoDB global tables/backup, CloudWatch Logs delivery pipelines. + +Implementing Tiers 1 + 2 (100 operations) would bring RustStack to ~686 operations and cover effectively 100% of common local-development API calls across all 17 services. This spec provides concrete designs for the highest-impact additions. + +--- + +## 2. Methodology + +Each missing operation was evaluated against three criteria: + +1. **Application-level usage frequency** — Does typical application code call this operation? (e.g., `TransactWriteItems` is called by application code; `CreateBackup` is called by infrastructure automation.) +2. **IaC workflow impact** — Does Terraform/CDK/Pulumi require this operation during `plan`/`apply`/`destroy`? (e.g., `TagResource` is emitted by every Terraform resource with tags; `DescribeLimits` is never called by IaC.) +3. **Blocking behavior** — Does the absence cause a hard failure (SDK throws, IaC aborts) vs. a soft degradation (feature unavailable but app works)? + +Operations that score high on all three criteria are Tier 1. Operations that score high on one are Tier 2-3. Operations that score zero on all three are Tier 4. + +--- + +## 3. Gap Analysis by Service + +### 3.1 DynamoDB (13 → target 28, gap: 44 in LocalStack) + +| Missing Operation | Tier | Justification | +|---|---|---| +| TransactGetItems | 1 | Core application pattern for consistent reads across items | +| TransactWriteItems | 1 | Core application pattern for atomic multi-item writes | +| UpdateTimeToLive | 1 | Common for session stores, caches, TTL-based cleanup | +| DescribeTimeToLive | 1 | Read companion to UpdateTimeToLive | +| TagResource | 1 | Required by every Terraform/CDK resource with tags | +| UntagResource | 1 | Required by Terraform tag lifecycle | +| ListTagsOfResource | 1 | Required by Terraform plan/refresh | +| ExecuteStatement (PartiQL) | 2 | Growing adoption, AWS Console uses it | +| BatchExecuteStatement | 2 | Batch companion to ExecuteStatement | +| ExecuteTransaction | 2 | Transaction companion to ExecuteStatement | +| DescribeLimits | 2 | Some SDKs call this during initialization | +| DescribeEndpoints | 2 | SDK endpoint discovery | +| DescribeContinuousBackups | 3 | Terraform reads this for PITR config | +| UpdateContinuousBackups | 3 | Terraform sets PITR config | +| DescribeContributorInsights | 3 | Monitoring feature, rare in local dev | +| UpdateContributorInsights | 3 | Monitoring feature, rare in local dev | +| ListContributorInsights | 3 | Monitoring feature, rare in local dev | +| CreateBackup | 4 | Production DR, no value locally | +| DeleteBackup | 4 | Production DR | +| DescribeBackup | 4 | Production DR | +| ListBackups | 4 | Production DR | +| RestoreTableFromBackup | 4 | Production DR | +| RestoreTableToPointInTime | 4 | Production DR | +| CreateGlobalTable | 4 | Multi-region replication | +| DescribeGlobalTable | 4 | Multi-region replication | +| DescribeGlobalTableSettings | 4 | Multi-region replication | +| ListGlobalTables | 4 | Multi-region replication | +| UpdateGlobalTable | 4 | Multi-region replication | +| UpdateGlobalTableSettings | 4 | Multi-region replication | +| EnableKinesisStreamingDestination | 4 | Cross-service integration | +| DisableKinesisStreamingDestination | 4 | Cross-service integration | +| DescribeKinesisStreamingDestination | 4 | Cross-service integration | +| UpdateKinesisStreamingDestination | 4 | Cross-service integration | +| ExportTableToPointInTime | 4 | Bulk export to S3 | +| ImportTable | 4 | Bulk import from S3 | +| DescribeExport | 4 | Bulk export status | +| DescribeImport | 4 | Bulk import status | +| ListExports | 4 | Bulk export listing | +| ListImports | 4 | Bulk import listing | +| GetResourcePolicy | 4 | Resource-based policies, rare | +| PutResourcePolicy | 4 | Resource-based policies, rare | +| DeleteResourcePolicy | 4 | Resource-based policies, rare | +| DescribeTableReplicaAutoScaling | 4 | Global tables auto-scaling | +| UpdateTableReplicaAutoScaling | 4 | Global tables auto-scaling | + +### 3.2 Lambda (29 → target 46, gap: 72 in LocalStack) + +| Missing Operation | Tier | Justification | +|---|---|---| +| PublishLayerVersion | 1 | Lambda Layers are widely used for shared dependencies | +| GetLayerVersion | 1 | Required to resolve layers during function create/update | +| GetLayerVersionByArn | 1 | ARN-based layer resolution | +| ListLayerVersions | 1 | Layer management | +| ListLayers | 1 | Layer discovery | +| DeleteLayerVersion | 1 | Layer lifecycle | +| AddLayerVersionPermission | 1 | Cross-account layer sharing | +| GetLayerVersionPolicy | 1 | Read companion to AddLayerVersionPermission | +| RemoveLayerVersionPermission | 1 | Layer permission lifecycle | +| CreateEventSourceMapping | 1 | Required for SQS/Kinesis/DDB trigger-based Lambdas | +| GetEventSourceMapping | 1 | Read companion | +| UpdateEventSourceMapping | 1 | Event source configuration | +| DeleteEventSourceMapping | 1 | Event source lifecycle | +| ListEventSourceMappings | 1 | Event source discovery | +| PutFunctionConcurrency | 2 | Concurrency limits, used in production configs | +| GetFunctionConcurrency | 2 | Read companion | +| DeleteFunctionConcurrency | 2 | Lifecycle | +| PutFunctionEventInvokeConfig | 2 | DLQ/destination config for async invocations | +| GetFunctionEventInvokeConfig | 2 | Read companion | +| UpdateFunctionEventInvokeConfig | 2 | Update companion | +| DeleteFunctionEventInvokeConfig | 2 | Lifecycle | +| ListFunctionEventInvokeConfigs | 2 | Listing | +| InvokeAsync | 2 | Deprecated but some legacy code uses it | +| GetProvisionedConcurrencyConfig | 3 | Production scaling, Terraform reads it | +| PutProvisionedConcurrencyConfig | 3 | Production scaling | +| DeleteProvisionedConcurrencyConfig | 3 | Lifecycle | +| ListProvisionedConcurrencyConfigs | 3 | Listing | +| CreateCodeSigningConfig | 3 | Security feature, rare in local dev | +| GetCodeSigningConfig | 3 | Read companion | +| UpdateCodeSigningConfig | 3 | Update companion | +| DeleteCodeSigningConfig | 3 | Lifecycle | +| ListCodeSigningConfigs | 3 | Listing | +| ListFunctionsByCodeSigningConfig | 3 | Listing | +| PutFunctionCodeSigningConfig | 3 | Binding | +| GetFunctionCodeSigningConfig | 3 | Read companion | +| DeleteFunctionCodeSigningConfig | 3 | Lifecycle | +| GetRuntimeManagementConfig | 3 | Runtime version pinning | +| PutRuntimeManagementConfig | 3 | Runtime version pinning | +| InvokeWithResponseStream | 3 | Streaming responses, newer feature | +| GetFunctionRecursionConfig | 4 | Recursive invocation guard | +| PutFunctionRecursionConfig | 4 | Recursive invocation guard | +| GetFunctionScalingConfig | 4 | Auto-scaling config | +| PutFunctionScalingConfig | 4 | Auto-scaling config | +| CreateCapacityProvider | 4 | Managed instance pools | +| DeleteCapacityProvider | 4 | Managed instance pools | +| GetCapacityProvider | 4 | Managed instance pools | +| ListCapacityProviders | 4 | Managed instance pools | +| UpdateCapacityProvider | 4 | Managed instance pools | +| ListFunctionVersionsByCapacityProvider | 4 | Managed instance pools | +| All Durable Execution ops (9) | 4 | Preview feature, not GA | + +### 3.3 IAM (76 → target 86, gap: 100 in LocalStack) + +| Missing Operation | Tier | Justification | +|---|---|---| +| CreateOpenIDConnectProvider | 1 | Required for EKS IRSA, Cognito federation | +| GetOpenIDConnectProvider | 1 | Read companion | +| DeleteOpenIDConnectProvider | 1 | Lifecycle | +| ListOpenIDConnectProviders | 1 | Discovery | +| TagPolicy | 1 | Terraform tags on managed policies | +| UntagPolicy | 1 | Terraform tag lifecycle | +| ListPolicyTags | 1 | Terraform plan/refresh | +| TagInstanceProfile | 1 | Terraform tags on instance profiles | +| UntagInstanceProfile | 1 | Terraform tag lifecycle | +| ListInstanceProfileTags | 1 | Terraform plan/refresh | +| CreateSAMLProvider | 2 | Federated auth testing | +| GetSAMLProvider | 2 | Read companion | +| DeleteSAMLProvider | 2 | Lifecycle | +| ListSAMLProviders | 2 | Discovery | +| UpdateSAMLProvider | 2 | Update companion | +| CreateAccountAlias | 2 | Account identification | +| DeleteAccountAlias | 2 | Lifecycle | +| ListAccountAliases | 2 | Discovery | +| GetAccountSummary | 2 | Account resource counts | +| PutRolePermissionsBoundary | 2 | Used in production IAM configs | +| DeleteRolePermissionsBoundary | 2 | Lifecycle | +| PutUserPermissionsBoundary | 2 | Used in production IAM configs | +| DeleteUserPermissionsBoundary | 2 | Lifecycle | +| SetSecurityTokenServicePreferences | 2 | STS endpoint config | +| AddClientIDToOpenIDConnectProvider | 3 | OIDC provider management | +| RemoveClientIDFromOpenIDConnectProvider | 3 | OIDC provider management | +| UpdateOpenIDConnectProviderThumbprint | 3 | OIDC provider maintenance | +| TagOpenIDConnectProvider | 3 | Tagging | +| UntagOpenIDConnectProvider | 3 | Tagging | +| ListOpenIDConnectProviderTags | 3 | Tagging | +| TagSAMLProvider | 3 | Tagging | +| UntagSAMLProvider | 3 | Tagging | +| ListSAMLProviderTags | 3 | Tagging | +| CreateLoginProfile | 4 | Console password, irrelevant locally | +| GetLoginProfile | 4 | Console password | +| UpdateLoginProfile | 4 | Console password | +| DeleteLoginProfile | 4 | Console password | +| ChangePassword | 4 | Console password | +| CreateVirtualMFADevice | 4 | MFA device management | +| DeleteVirtualMFADevice | 4 | MFA device management | +| EnableMFADevice | 4 | MFA device management | +| DeactivateMFADevice | 4 | MFA device management | +| ListMFADevices | 4 | MFA device management | +| ResyncMFADevice | 4 | MFA device management | +| All SSH/ServerCert ops (13) | 4 | Legacy cert management | +| All signing cert ops (4) | 4 | Legacy cert management | +| All service-specific credential ops (5) | 4 | Niche use case | +| GenerateCredentialReport | 4 | Audit feature | +| GetCredentialReport | 4 | Audit feature | +| All Organizations ops (9+) | 4 | Multi-account governance | +| All Access Advisor ops (5) | 4 | Audit/compliance | +| GetContextKeysForCustomPolicy | 4 | Policy simulation | +| GetContextKeysForPrincipalPolicy | 4 | Policy simulation | +| GetAccountPasswordPolicy | 4 | Console password policy | +| All password policy ops (3) | 4 | Console password policy | +| All delegation request ops (9) | 4 | Organizations feature | + +### 3.4 SSM (13 → target 18, gap: 133 in LocalStack) + +| Missing Operation | Tier | Justification | +|---|---|---| +| CreateDocument | 2 | SSM documents used in Ansible-like workflows | +| GetDocument | 2 | Read companion | +| DeleteDocument | 2 | Lifecycle | +| ListDocuments | 2 | Discovery | +| DescribeDocument | 2 | Read companion | +| SendCommand | 3 | Remote execution | +| All maintenance window ops (23) | 4 | EC2 fleet management | +| All patch management ops (19) | 4 | EC2 fleet management | +| All automation ops (9) | 4 | EC2 fleet management | +| All association ops (15) | 4 | EC2 fleet management | +| All inventory ops (6) | 4 | EC2 fleet management | +| All session manager ops (5) | 4 | Interactive shell, irrelevant locally | +| All OpsItems/OpsMetadata ops (15) | 4 | Operational management | +| All compliance ops (3) | 4 | Compliance reporting | +| All activation/instance ops (10) | 4 | Hybrid instance registration | + +### 3.5 S3 (70 → target 78, gap: 41 in LocalStack) + +| Missing Operation | Tier | Justification | +|---|---|---| +| RestoreObject | 2 | Glacier restore, some apps test this flow | +| SelectObjectContent | 2 | S3 Select (SQL queries on objects), growing usage | +| WriteGetObjectResponse | 2 | Lambda@S3 Object Lambda responses | +| PutBucketReplication | 2 | Terraform configures replication | +| GetBucketReplication | 2 | Terraform plan reads replication | +| DeleteBucketReplication | 2 | Terraform lifecycle | +| ListDirectoryBuckets | 2 | S3 Express One Zone | +| PutBucketAnalyticsConfiguration | 3 | Cost optimization feature | +| GetBucketAnalyticsConfiguration | 3 | Read companion | +| DeleteBucketAnalyticsConfiguration | 3 | Lifecycle | +| ListBucketAnalyticsConfigurations | 3 | Listing | +| PutBucketInventoryConfiguration | 3 | Audit feature | +| GetBucketInventoryConfiguration | 3 | Read companion | +| DeleteBucketInventoryConfiguration | 3 | Lifecycle | +| ListBucketInventoryConfigurations | 3 | Listing | +| PutBucketMetricsConfiguration | 3 | CloudWatch integration | +| GetBucketMetricsConfiguration | 3 | Read companion | +| DeleteBucketMetricsConfiguration | 3 | Lifecycle | +| ListBucketMetricsConfigurations | 3 | Listing | +| PutBucketIntelligentTieringConfiguration | 3 | Storage class automation | +| GetBucketIntelligentTieringConfiguration | 3 | Read companion | +| DeleteBucketIntelligentTieringConfiguration | 3 | Lifecycle | +| ListBucketIntelligentTieringConfigurations | 3 | Listing | +| RenameObject | 4 | Not standard AWS API | +| GetObjectTorrent | 4 | Deprecated feature | +| All metadata table ops (8) | 4 | LocalStack internal, not AWS API | + +### 3.6 CloudWatch Logs (42 → target 42, gap: 62 in LocalStack) + +All missing operations are Tier 3-4: + +| Category | Count | Tier | Justification | +|---|---|---|---| +| Delivery/Integration ops | 16 | 4 | Log routing pipelines | +| Anomaly detection ops | 7 | 4 | ML-based monitoring | +| Scheduled query ops | 6 | 4 | Recurring queries | +| Transformer ops | 4 | 4 | Log parsing | +| S3 table integration ops | 7 | 4 | Storage integration | +| Data protection ops | 3 | 3 | Sensitive data masking | +| Account policy ops | 3 | 3 | Account-level settings | +| Index policy ops | 3 | 3 | Search indexing | +| Import task ops | 4 | 4 | Bulk import | +| Live tail | 1 | 3 | Real-time streaming | +| Other | 8 | 4 | Misc. | + +CloudWatch Logs is already well-covered. No Tier 1 gaps. + +### 3.7 Remaining Services (KMS, Kinesis, SES, SNS, SQS, EventBridge, CloudWatch Metrics, STS, DynamoDB Streams) + +| Service | Gap | Tier 1 | Tier 2 | Notes | +|---|---|---|---|---| +| KMS (38 → 53) | 15 | 0 | 3 (ImportKeyMaterial, GetParametersForImport, DeleteImportedKeyMaterial) | Custom key stores and multi-region are Tier 4 | +| Kinesis (27 → 39) | 12 | 0 | 2 (UpdateStreamMode, UpdateShardCount are already implemented; TagResource/UntagResource) | Enhanced monitoring and account settings are Tier 4 | +| SES (44 → 71) | 27 | 0 | 3 (SendBulkTemplatedEmail, TestRenderTemplate, PutConfigurationSetDeliveryOptions) | Custom verification and bounce ops are Tier 3-4 | +| SNS (47 → 42) | -5 | 0 | 0 | RustStack already has MORE ops than LocalStack | +| SQS (23 → 23) | 0 | 0 | 0 | Full parity | +| EventBridge (43 → 57) | 14 | 0 | 0 | All 43 operation types are defined; 22 are implemented. Remaining 21 are already in the model, just need handler code | +| CloudWatch Metrics (31 → 39) | 8 | 0 | 0 | Missing ops are managed insight rules and metric streams, Tier 3-4 | +| STS (8 → 11) | 3 | 0 | 0 | Missing: AssumeRoot, GetDelegatedAccessToken, GetWebIdentityToken — niche ops | +| DynamoDB Streams (4 → 4) | 0 | 0 | 0 | Full parity | + +--- + +## 4. Priority Tiers + +### Summary + +| Tier | Operations | Services Affected | Effort Estimate | +|---|---|---|---| +| **Tier 1: Must-Have** | 44 | DynamoDB (7), Lambda (14), IAM (10), EventBridge (13 existing model ops to implement) | ~3,000-4,000 LoC | +| **Tier 2: Should-Have** | 56 | DynamoDB (6), Lambda (8), IAM (14), S3 (7), SSM (5), KMS (3), SES (3), Kinesis (2), other (8) | ~4,000-5,000 LoC | +| **Tier 3: Nice-to-Have** | ~80 | Spread across all services | ~5,000-6,000 LoC | +| **Tier 4: Skip** | ~334 | SSM (93), IAM (66), CloudWatch Logs (62), DynamoDB (35), Lambda (22), other | N/A | + +### Decision Framework + +**Tier 1** operations meet ALL of: +- Called by application code in >30% of projects using the service +- Absence causes hard SDK failure or IaC abort +- Implementation complexity is bounded (no new subsystem needed) + +**Tier 2** operations meet AT LEAST ONE of: +- Called by IaC tools during plan/apply +- Used by >10% of projects +- Enables testing a complete feature (e.g., PartiQL as alternative query syntax) + +**Tier 3** operations meet AT LEAST ONE of: +- Used by <10% of projects but has passionate users +- Terraform reads it during refresh (will fail without it) +- Implementation is trivial (metadata CRUD) + +**Tier 4** operations meet ALL of: +- Never called by application code in local dev +- Purely production infrastructure management +- Complex to implement correctly with no local dev value + +--- + +## 5. Tier 1: Must-Have Operations + +### 5.1 DynamoDB — Transactions + TTL + Tagging (7 ops) + +**Operations:** TransactGetItems, TransactWriteItems, UpdateTimeToLive, DescribeTimeToLive, TagResource, UntagResource, ListTagsOfResource + +**Why Must-Have:** +- DynamoDB transactions are used by ~40% of DynamoDB applications. Common patterns: creating a user + updating a counter atomically, transferring balance between accounts, idempotent writes with condition checks. Without transactions, any app using `TransactWriteItems` fails with `UnknownOperationException`. +- TTL is used by ~35% of DynamoDB applications. Common patterns: session expiration, cache entries, temporary records. Terraform/CDK configures TTL on nearly every table that stores ephemeral data. Without `UpdateTimeToLive`, Terraform apply fails. +- Tagging is used by every Terraform/CDK resource. Without `TagResource`, every DynamoDB table created by IaC fails if tags are specified. + +### 5.2 Lambda — Layers + Event Source Mappings (14 ops) + +**Operations:** +- Layers: PublishLayerVersion, GetLayerVersion, GetLayerVersionByArn, ListLayerVersions, ListLayers, DeleteLayerVersion, AddLayerVersionPermission, GetLayerVersionPolicy, RemoveLayerVersionPermission +- Event Source Mappings: CreateEventSourceMapping, GetEventSourceMapping, UpdateEventSourceMapping, DeleteEventSourceMapping, ListEventSourceMappings + +**Why Must-Have:** +- Lambda Layers are used by ~50% of Lambda deployments. Layers provide shared dependencies (e.g., AWS SDK, database drivers) across multiple functions. Without layers, any function that references a layer ARN in its configuration fails to create. +- Event Source Mappings are the mechanism by which Lambda polls SQS queues, Kinesis streams, and DynamoDB Streams. Without ESM, it's impossible to test the most common serverless pattern: "SQS queue triggers Lambda." ~60% of Lambda functions use an event source mapping. Without `CreateEventSourceMapping`, CDK/Terraform/SAM deployments that define triggers fail. + +### 5.3 IAM — OIDC Providers + Resource Tagging (10 ops) + +**Operations:** +- OIDC: CreateOpenIDConnectProvider, GetOpenIDConnectProvider, DeleteOpenIDConnectProvider, ListOpenIDConnectProviders +- Policy tags: TagPolicy, UntagPolicy, ListPolicyTags +- Instance profile tags: TagInstanceProfile, UntagInstanceProfile, ListInstanceProfileTags + +**Why Must-Have:** +- OIDC providers are required for EKS IRSA (IAM Roles for Service Accounts), which is the standard way to give Kubernetes pods AWS credentials. Any project testing EKS infrastructure locally needs OIDC providers. Also required for Cognito user pool federation testing. +- Resource tagging on policies and instance profiles is emitted by Terraform for any IAM resource with tags. Without these ops, Terraform apply succeeds but subsequent plan/refresh fails trying to read tags. + +### 5.4 EventBridge — Remaining Implemented Operations (13 ops) + +**Operations:** CreateArchive, DeleteArchive, DescribeArchive, ListArchives, UpdateArchive, StartReplay, CancelReplay, DescribeReplay, ListReplays, CreateApiDestination, DeleteApiDestination, DescribeApiDestination, ListApiDestinations, UpdateApiDestination, CreateConnection, DeleteConnection, DescribeConnection, ListConnections, UpdateConnection, DeauthorizeConnection, CreateEndpoint, DeleteEndpoint, DescribeEndpoint, ListEndpoints, UpdateEndpoint + +**Why Must-Have:** These operations are already defined in the RustStack EventBridge model enum. The types are generated. The HTTP routing is in place. Only the handler dispatch and provider methods need implementation. This is low-effort, high-value work — approximately 50-100 lines per operation since the infrastructure exists. + +--- + +## 6. Tier 2: Should-Have Operations + +### 6.1 DynamoDB — PartiQL (3 ops) + +**Operations:** ExecuteStatement, BatchExecuteStatement, ExecuteTransaction + +**Design:** Requires a PartiQL parser. PartiQL is a SQL-compatible query language. For DynamoDB, the supported subset is limited: `SELECT`, `INSERT`, `UPDATE`, `DELETE` with `WHERE` clauses mapping to key conditions. The parser translates PartiQL statements into existing DynamoDB operations internally. + +**Complexity:** Medium. Requires a new parser (~500-800 LoC) but reuses the existing expression evaluator and storage engine. + +### 6.2 DynamoDB — Service Metadata (2 ops) + +**Operations:** DescribeLimits, DescribeEndpoints + +**Design:** Return static/hardcoded responses. `DescribeLimits` returns account limits (table count, GSI count, etc.) — all hardcoded maximums. `DescribeEndpoints` returns the local endpoint URL. + +**Complexity:** Trivial. ~30 lines each. + +### 6.3 Lambda — Concurrency & Event Invoke Config (8 ops) + +**Operations:** PutFunctionConcurrency, GetFunctionConcurrency, DeleteFunctionConcurrency, PutFunctionEventInvokeConfig, GetFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, DeleteFunctionEventInvokeConfig, ListFunctionEventInvokeConfigs + +**Design:** Metadata CRUD stored alongside function records. `FunctionConcurrency` is a single integer. `EventInvokeConfig` is a struct with max retry attempts, max event age, and destination configs. Store in `FunctionRecord`, return on read. No behavioral enforcement needed — just store and return the configuration. + +**Complexity:** Low. ~200-300 LoC total. All metadata CRUD following existing patterns. + +### 6.4 IAM — SAML Providers + Account Aliases + Permission Boundaries (14 ops) + +**Operations:** CreateSAMLProvider, GetSAMLProvider, DeleteSAMLProvider, ListSAMLProviders, UpdateSAMLProvider, CreateAccountAlias, DeleteAccountAlias, ListAccountAliases, GetAccountSummary, PutRolePermissionsBoundary, DeleteRolePermissionsBoundary, PutUserPermissionsBoundary, DeleteUserPermissionsBoundary, SetSecurityTokenServicePreferences + +**Design:** SAML providers are metadata objects (name, SAML metadata XML document, ARN). Store in a `DashMap`. Permission boundaries are an optional ARN field on roles/users — add a field to existing storage structs. Account aliases are a list of strings (max 1 alias per account). + +**Complexity:** Low-Medium. ~600-800 LoC. Standard CRUD patterns. + +### 6.5 S3 — Replication + Select + Restore (7 ops) + +**Operations:** PutBucketReplication, GetBucketReplication, DeleteBucketReplication, SelectObjectContent, RestoreObject, WriteGetObjectResponse, ListDirectoryBuckets + +**Design:** +- Replication: Store configuration as metadata. No actual cross-region replication needed — just accept and return the config. +- SelectObjectContent: Parse SQL-like queries against CSV/JSON objects. This is a significant feature (~1,000-1,500 LoC for the SQL parser + CSV/JSON reader). +- RestoreObject: For Glacier restore emulation, immediately mark the object as restored (no actual delay). + +**Complexity:** Medium (SelectObjectContent) + Low (rest). + +### 6.6 SSM — Documents (5 ops) + +**Operations:** CreateDocument, GetDocument, DeleteDocument, ListDocuments, DescribeDocument + +**Design:** SSM Documents are JSON/YAML configuration documents with name, version, content, and type. Store in a `DashMap` with version tracking. No execution semantics needed — just store and return. + +**Complexity:** Low. ~400-500 LoC. + +### 6.7 KMS — Key Import (3 ops) + +**Operations:** GetParametersForImport, ImportKeyMaterial, DeleteImportedKeyMaterial + +**Design:** Generate import parameters (wrapping key + import token), accept key material import (store raw key bytes), delete imported material. The wrapping key is an RSA key pair generated per import request. The imported key material replaces the auto-generated key material for the target CMK. + +**Complexity:** Medium. Crypto operations require care. ~400-500 LoC. + +--- + +## 7. Tier 3: Nice-to-Have Operations + +Summary of ~80 operations across services: + +| Service | Operations | Notes | +|---|---|---| +| Lambda | Provisioned concurrency (4), code signing (9), runtime management (2), response streaming (1) | Production tuning features | +| IAM | OIDC provider extras (3), SAML provider tags (3), MFA tag ops (3) | Niche admin features | +| S3 | Analytics (4), inventory (4), metrics (4), intelligent tiering (4) | Cost optimization metadata | +| CloudWatch Logs | Data protection (3), account policies (3), index policies (3), live tail (1) | Newer observability features | +| SES | Custom verification templates (6), bounce handling (2), receipt filters (3), tracking options (3) | Email operations features | +| DynamoDB | Continuous backups (2), contributor insights (3) | Terraform-optional features | +| KMS | Multi-region (2), DeriveSharedSecret (1), ListKeyRotations (1) | Enterprise crypto | +| EventBridge | Partner event source ops (10) | Marketplace integrations | + +Implementation approach for Tier 3: **stub with accept-and-store semantics**. Accept the API call, store metadata, return success. No behavioral implementation needed. This prevents hard failures in IaC while keeping implementation effort minimal (~20-50 LoC per operation). + +--- + +## 8. Tier 4: Skip + +~334 operations that provide zero value in local development: + +| Category | Operations | Services | Reason | +|---|---|---|---| +| EC2 fleet management | ~93 | SSM | Maintenance windows, patching, automation, session manager — requires actual EC2 instances | +| IAM governance | ~66 | IAM | Login profiles, credential reports, Organizations, password policies — console/account management | +| Log pipelines | ~62 | CloudWatch Logs | Delivery, anomaly detection, scheduled queries, transformers — production observability | +| DR/replication | ~35 | DynamoDB | Backups, global tables, import/export — disaster recovery | +| Lambda managed infra | ~22 | Lambda | Capacity providers, durable executions, scaling configs — managed runtime internals | +| Other | ~56 | Various | Scattered low-value operations | + +These operations should return `NotImplementedError` with a clear message indicating the operation is not supported in local emulation. + +--- + +## 9. Phased Implementation Plan + +### Phase 1: DynamoDB Transactions + TTL + Tagging (7 ops) + +**Estimated effort:** ~1,200-1,500 LoC +**Priority:** Highest — unblocks the most users + +#### Step 1.1: Tagging (TagResource, UntagResource, ListTagsOfResource) + +The `DynamoDBTable` struct already has a `tags: parking_lot::RwLock>` field. Implementation: + +1. Add 3 new variants to `DynamoDBOperation` enum +2. Add 3 match arms in `handler.rs` dispatch +3. Implement `handle_tag_resource()`: + - Validate table ARN exists + - Merge new tags into existing (max 50 tags) + - Validate tag key/value constraints (key: 1-128 chars, value: 0-256 chars) +4. Implement `handle_untag_resource()`: + - Validate table ARN exists + - Remove specified tag keys +5. Implement `handle_list_tags_of_resource()`: + - Validate table ARN exists + - Return current tag map + +**Estimated:** ~150 LoC + +#### Step 1.2: TTL (UpdateTimeToLive, DescribeTimeToLive) + +The `DynamoDBTable` struct already has a `ttl: parking_lot::RwLock>` field. Implementation: + +1. Add 2 new variants to `DynamoDBOperation` enum +2. Implement `handle_update_time_to_live()`: + - Validate table exists + - Validate only one TTL attribute allowed per table + - Store `TimeToLiveSpecification { attribute_name, enabled }` in table + - Return the specification +3. Implement `handle_describe_time_to_live()`: + - Return stored specification or default (disabled) + +**Note:** Actual TTL deletion behavior (background task removing expired items) is a Tier 3 enhancement. For now, just store and return the configuration. Applications that rely on TTL deletion timing in local dev are rare — most apps verify items are *written* with a TTL attribute, not that they're *deleted* on time. + +**Estimated:** ~100 LoC + +#### Step 1.3: Transactions (TransactGetItems, TransactWriteItems) + +This is the most complex addition. It requires atomicity across multiple items/tables. + +1. Add 2 new variants to `DynamoDBOperation` enum +2. Add model types: `TransactGetItemsInput/Output`, `TransactWriteItemsInput/Output`, `TransactGetItem`, `TransactWriteItem`, `Get`, `Put`, `Update`, `Delete`, `ConditionCheck` +3. Implement `handle_transact_get_items()`: + - Validate max 100 items + - For each `Get`: resolve table, extract key, fetch item, apply projection + - All reads are atomic (snapshot isolation) — take read locks on all involved partitions + - Return items in order matching input +4. Implement `handle_transact_write_items()`: + - Validate max 100 actions + - Validate no two actions target the same item (primary key collision detection) + - **Phase 1 — Validate all conditions:** + - For each `ConditionCheck`: evaluate condition expression, fail entire transaction if any condition fails + - For each `Put` with condition: evaluate condition expression + - For each `Update` with condition: evaluate condition expression + - For each `Delete` with condition: evaluate condition expression + - **Phase 2 — Apply all writes:** + - Acquire write locks on all involved partitions (sorted by partition key to prevent deadlocks) + - For each `Put`: insert/replace item + - For each `Update`: apply update expression + - For each `Delete`: remove item + - Emit stream events for all changes + - Release all locks + - **On any failure:** release all locks, return `TransactionCanceledException` with per-item cancellation reasons + +**Concurrency strategy:** Sort all involved (table, partition_key) pairs lexicographically. Acquire DashMap shard locks in this order to prevent deadlocks. This is the same approach used by real DynamoDB (deterministic lock ordering). + +**Estimated:** ~800-1,000 LoC + +### Phase 2: Lambda Layers (9 ops) + +**Estimated effort:** ~800-1,000 LoC +**Priority:** High — unblocks Lambda deployments that use layers + +#### Step 2.1: Layer Storage + +Add to Lambda storage: + +```rust +pub struct LayerStore { + /// layer_name -> versions + layers: DashMap, +} + +pub struct LayerRecord { + name: String, + versions: BTreeMap, + next_version: u64, +} + +pub struct LayerVersionRecord { + version: u64, + description: String, + compatible_runtimes: Vec, + compatible_architectures: Vec, + license_info: Option, + code_sha256: String, + code_size: u64, + code_path: PathBuf, + created_date: String, + layer_arn: String, + layer_version_arn: String, + policy: PolicyDocument, +} +``` + +#### Step 2.2: Operations + +1. **PublishLayerVersion:** Accept zip/S3 code, store on disk, create version record, return ARN +2. **GetLayerVersion:** Lookup by name + version number, return metadata + code location +3. **GetLayerVersionByArn:** Parse ARN → (name, version), delegate to GetLayerVersion +4. **ListLayerVersions:** Return all versions for a layer, sorted descending +5. **ListLayers:** Return latest version of each layer +6. **DeleteLayerVersion:** Remove specific version, cleanup code on disk +7. **AddLayerVersionPermission:** Add statement to layer version's policy +8. **GetLayerVersionPolicy:** Return layer version's policy document +9. **RemoveLayerVersionPermission:** Remove statement from policy by SID + +#### Step 2.3: Integration with CreateFunction/UpdateFunctionConfiguration + +Modify existing function creation to validate layer ARNs: +- Each layer ARN must reference an existing layer version +- Max 5 layers per function +- Store resolved layer ARNs in version record (already a `Vec` field) + +### Phase 3: Lambda Event Source Mappings (5 ops) + +**Estimated effort:** ~1,000-1,200 LoC +**Priority:** High — unblocks SQS/Kinesis/DDB trigger-based Lambdas + +#### Step 3.1: Event Source Mapping Storage + +```rust +pub struct EventSourceMappingStore { + mappings: DashMap, +} + +pub struct EventSourceMappingRecord { + uuid: String, + event_source_arn: String, + function_arn: String, + state: String, // Enabled, Disabled, Creating, Updating, Deleting + batch_size: u32, + maximum_batching_window_in_seconds: u32, + starting_position: Option, // TRIM_HORIZON, LATEST, AT_TIMESTAMP + starting_position_timestamp: Option, + enabled: bool, + filter_criteria: Option, + maximum_record_age_in_seconds: Option, + bisect_batch_on_function_error: Option, + maximum_retry_attempts: Option, + parallelization_factor: Option, + destination_config: Option, + function_response_types: Vec, + last_modified: String, + last_processing_result: String, + state_transition_reason: String, +} +``` + +#### Step 3.2: Operations + +1. **CreateEventSourceMapping:** Validate event source ARN (SQS/Kinesis/DDB Streams), validate function exists, create UUID, store record, return mapping. State is `Creating` → `Enabled`. +2. **GetEventSourceMapping:** Lookup by UUID, return record. +3. **UpdateEventSourceMapping:** Update mutable fields (batch_size, enabled, filter_criteria, etc.) +4. **DeleteEventSourceMapping:** Remove by UUID. State transitions to `Deleting`. +5. **ListEventSourceMappings:** Filter by function name and/or event source ARN. + +#### Step 3.3: Polling Engine (Optional) + +For full behavioral emulation, an optional background polling engine: +- Polls SQS queues, Kinesis shards, or DDB Streams on a timer +- Invokes the mapped Lambda function with the polled records +- Handles success (delete from SQS / advance shard iterator) and failure (retry, DLQ) + +This is a significant feature (~2,000+ LoC) and can be deferred. The metadata CRUD alone (Tier 1) unblocks IaC deployments. The polling engine is a Tier 2 enhancement. + +### Phase 4: IAM OIDC + Resource Tagging (10 ops) + +**Estimated effort:** ~500-600 LoC +**Priority:** High for EKS users + +#### Step 4.1: OIDC Provider Storage + +```rust +pub struct OIDCProviderRecord { + arn: String, + url: String, // issuer URL (https://...) + client_id_list: Vec, + thumbprint_list: Vec, + tags: HashMap, + create_date: String, +} +``` + +Add `oidc_providers: DashMap` to `IAMServiceState`. + +#### Step 4.2: Operations + +Standard CRUD pattern. ARN format: `arn:aws:iam::{account}:oidc-provider/{url_host_and_path}`. + +#### Step 4.3: Policy/InstanceProfile Tagging + +Add `tags: HashMap` to `PolicyRecord` and `InstanceProfileRecord` (if not already present). Implement standard TagResource/UntagResource/ListTags pattern. + +### Phase 5: EventBridge Remaining Handlers (13+ ops) + +**Estimated effort:** ~600-800 LoC +**Priority:** Medium — operations are already in the model + +Implement handler methods for already-defined operation variants: +- Archives: Store event archive metadata, accept replay requests +- API Destinations: Store HTTP endpoint configurations +- Connections: Store authentication configurations (API key, OAuth, Basic) +- Endpoints: Store global endpoint configurations + +All are metadata CRUD with no behavioral side effects needed for local dev. + +### Phase 6: Tier 2 Grab Bag + +Implement remaining Tier 2 operations across services. Each is independent and can be parallelized: +- DynamoDB PartiQL (3 ops, ~800 LoC for parser) +- DynamoDB DescribeLimits/DescribeEndpoints (2 ops, ~60 LoC) +- Lambda concurrency + event invoke configs (8 ops, ~300 LoC) +- IAM SAML + account aliases + permission boundaries (14 ops, ~700 LoC) +- S3 replication config + SelectObjectContent + RestoreObject (7 ops, ~1,500 LoC) +- SSM documents (5 ops, ~500 LoC) +- KMS key import (3 ops, ~500 LoC) + +--- + +## 10. Detailed Design: DynamoDB Transactions + +### 10.1 Model Types + +```rust +/// Input for TransactWriteItems +pub struct TransactWriteItemsInput { + pub transact_items: Vec, + pub return_consumed_capacity: Option, + pub return_item_collection_metrics: Option, + pub client_request_token: Option, // idempotency token +} + +pub struct TransactWriteItem { + pub condition_check: Option, + pub put: Option, + pub delete: Option, + pub update: Option, +} + +pub struct ConditionCheck { + pub table_name: String, + pub key: HashMap, + pub condition_expression: String, + pub expression_attribute_names: Option>, + pub expression_attribute_values: Option>, + pub return_values_on_condition_check_failure: Option, +} + +pub struct Put { + pub table_name: String, + pub item: HashMap, + pub condition_expression: Option, + pub expression_attribute_names: Option>, + pub expression_attribute_values: Option>, + pub return_values_on_condition_check_failure: Option, +} + +// Delete and Update follow same pattern as existing operations +// but embedded within a transaction context +``` + +### 10.2 Concurrency Protocol + +``` +TransactWriteItems(items): + 1. Validate: max 100 items, no duplicate (table, key) pairs + 2. Collect all (table_name, partition_key) pairs + 3. Sort pairs lexicographically (prevents deadlock) + 4. For each pair, acquire write access on the DashMap shard: + - DashMap internally hashes the key to a shard + - We use DashMap::get_mut() which holds the shard lock + 5. Check all conditions: + - For each ConditionCheck/Put/Update/Delete with condition_expression: + fetch current item, evaluate condition + - If ANY condition fails: release all locks, return TransactionCanceledException + with per-item cancellation reasons + 6. Apply all writes: + - Put: insert/replace item + - Update: apply update expression + - Delete: remove item + 7. Emit stream events for all changes + 8. Release all locks (automatic via drop) + 9. Return success +``` + +### 10.3 Idempotency Token + +If `client_request_token` is provided, cache the token with a 10-minute TTL. If the same token is seen again within the window, return the cached response without re-executing. Use a `DashMap` with periodic cleanup. + +### 10.4 Error Responses + +```rust +pub struct TransactionCanceledException { + pub message: String, + pub cancellation_reasons: Vec, +} + +pub struct CancellationReason { + pub code: String, // "None", "ConditionalCheckFailed", "ItemCollectionSizeLimitExceeded", etc. + pub message: Option, + pub item: Option>, // if return_values_on_condition_check_failure = ALL_OLD +} +``` + +--- + +## 11. Detailed Design: Lambda Layers & Event Source Mappings + +### 11.1 Layer ARN Format + +``` +arn:aws:lambda:{region}:{account}:layer:{layer_name}:{version} +``` + +The ARN without version refers to the layer itself. With version, it refers to a specific layer version. + +### 11.2 Layer Code Storage + +Follow the same pattern as function code: + +``` +{code_dir}/layers/{layer_name}/{version}/layer.zip +``` + +Layer zip files are stored on disk, referenced by `PathBuf` in the `LayerVersionRecord`. SHA-256 hash computed on ingestion for integrity. + +### 11.3 Layer Validation During Function Create/Update + +When `CreateFunction` or `UpdateFunctionConfiguration` specifies layers: + +```rust +fn validate_layers(&self, layer_arns: &[String]) -> Result<(), LambdaServiceError> { + if layer_arns.len() > 5 { + return Err(LambdaServiceError::InvalidParameter( + "Layers list exceeds maximum of 5".into() + )); + } + for arn in layer_arns { + let (name, version) = parse_layer_arn(arn)?; + self.layer_store.get_version(&name, version)?; + } + Ok(()) +} +``` + +### 11.4 Event Source Mapping UUID Generation + +Use UUID v4 for mapping identifiers. The UUID is the primary key for all ESM operations. + +### 11.5 Event Source Mapping State Machine + +``` +Creating → Enabled ↔ Disabled +Creating → CreateFailed +Enabled → Updating → Enabled +Enabled → Deleting → Deleted +Disabled → Deleting → Deleted +``` + +For local dev, transitions are instant (no async provisioning). + +--- + +## 12. Detailed Design: DynamoDB TTL & Tagging + +### 12.1 TTL Storage + +Already implemented in `DynamoDBTable`: + +```rust +pub struct DynamoDBTable { + // ... existing fields ... + pub ttl: parking_lot::RwLock>, +} + +pub struct TimeToLiveSpecification { + pub attribute_name: String, + pub enabled: bool, +} +``` + +### 12.2 TTL Validation Rules + +- Only one TTL attribute per table +- Attribute name must be a top-level attribute (no nested paths) +- Attribute name must be 1-255 characters +- Cannot enable/disable TTL within 1 hour of previous change (enforce with `last_ttl_change: Option`) + +### 12.3 Tagging Validation Rules + +- Max 50 tags per resource +- Tag key: 1-128 Unicode characters +- Tag value: 0-256 Unicode characters +- Tag keys cannot start with `aws:` prefix (reserved) +- Resource is identified by ARN, not table name + +### 12.4 Tag ARN Resolution + +The tagging API uses resource ARNs, not table names. Implement ARN → table name resolution: + +```rust +fn resolve_table_from_arn(&self, arn: &str) -> Result<&str, DynamoDBError> { + // arn:aws:dynamodb:{region}:{account}:table/{table_name} + arn.strip_prefix("arn:aws:dynamodb:") + .and_then(|s| s.split('/').nth(1)) + .ok_or_else(|| DynamoDBError::validation("Invalid resource ARN")) +} +``` + +--- + +## 13. Cross-Cutting Concerns + +### 13.1 Operation Registration Pattern + +Every new operation follows the same three-step pattern across all services: + +1. **Model:** Add enum variant to `{Service}Operation`, implement `as_str()` and `from_name()` +2. **HTTP:** Add match arm in `handler.rs` dispatch function +3. **Core:** Implement `handle_{operation}()` method on the provider + +For services using `awsJson1_0`/`awsJson1_1` (DynamoDB, Lambda, KMS, Logs, CloudWatch), the HTTP layer is a single dispatch function keyed on the `X-Amz-Target` header. Adding an operation is adding one match arm. + +For services using `restJson1` (Lambda, API Gateway v2), adding an operation requires adding a route entry to the route table. + +For services using `awsQuery` (IAM, SES, STS, SNS), adding an operation requires adding a match arm keyed on the `Action` query parameter. + +### 13.2 Smithy Model Regeneration + +New model types for added operations should be generated from Smithy models where possible. For DynamoDB transactions, the Smithy model already contains `TransactGetItems`, `TransactWriteItems`, and all associated types. Run `cargo run -p codegen -- --service dynamodb` to regenerate and pick up the new types. + +For operations where Smithy codegen doesn't yet support the service (Lambda, IAM), hand-write the types following existing patterns. The types are straightforward serde structs. + +### 13.3 Error Code Consistency + +All new operations must use the service-specific error types already defined. New error variants needed: + +- DynamoDB: `TransactionCanceledException`, `TransactionConflictException`, `TransactionInProgressException`, `IdempotentParameterMismatchException` +- Lambda: `LayerVersionNotFoundException`, `LayerNotFoundException`, `EventSourceMappingNotFoundException` +- IAM: `EntityAlreadyExistsException` (for OIDC providers), `NoSuchEntityException` + +### 13.4 Pagination + +Several new list operations return paginated results: + +- `ListLayers`, `ListLayerVersions`: Marker-based (same as `ListFunctions`) +- `ListEventSourceMappings`: Marker-based with optional filters +- `ListOpenIDConnectProviders`: Returns all (no pagination, max ~100) + +Follow existing pagination patterns: accept `Marker`/`MaxItems`, return `NextMarker` when more results exist. + +--- + +## 14. Testing Strategy + +### 14.1 Unit Tests + +For each new operation, test: +- Happy path with minimal input +- Happy path with all optional fields +- Validation error cases (missing required fields, invalid values, limit exceeded) +- Idempotency (where applicable) +- Concurrent access (for transactions) + +### 14.2 Integration Tests + +For each phase, write integration tests using `aws-sdk-rust`: + +**Phase 1 (DynamoDB Transactions):** +- Create table → TransactWriteItems (Put 3 items) → TransactGetItems (read all 3) +- TransactWriteItems with condition check failure → verify no items written +- Concurrent TransactWriteItems targeting same item → verify one succeeds +- UpdateTimeToLive → DescribeTimeToLive → verify round-trip +- TagResource → ListTagsOfResource → verify tags present +- UntagResource → ListTagsOfResource → verify tag removed + +**Phase 2 (Lambda Layers):** +- PublishLayerVersion with zip → GetLayerVersion → verify metadata +- CreateFunction with layer ARN → GetFunction → verify layer in config +- DeleteLayerVersion → CreateFunction with deleted layer → verify error +- ListLayers → verify all layers returned + +**Phase 3 (Lambda ESM):** +- CreateEventSourceMapping (SQS) → GetEventSourceMapping → verify +- UpdateEventSourceMapping (disable) → GetEventSourceMapping → verify state +- DeleteEventSourceMapping → ListEventSourceMappings → verify removed +- CreateEventSourceMapping with non-existent function → verify error + +### 14.3 LocalStack Test Suite Compatibility + +Where applicable, port relevant LocalStack test cases: +- `tests/aws/services/dynamodb/test_dynamodb.py` — transaction tests +- `tests/aws/services/lambda_/test_lambda_api.py` — layer and ESM tests +- `tests/aws/services/iam/test_iam.py` — OIDC provider tests + +Run these against RustStack to validate behavioral parity. + +--- + +## 15. Risk Analysis + +### 15.1 Technical Risks + +| Risk | Likelihood | Impact | Mitigation | +|---|---|---|---| +| Transaction deadlock under concurrent load | Medium | High | Deterministic lock ordering (sorted partition keys). Extensive concurrent test suite. | +| Transaction condition evaluation semantics diverge from AWS | Medium | Medium | Test against real DynamoDB for edge cases. Port LocalStack transaction tests. | +| PartiQL parser incomplete for complex queries | High | Low | Start with simple SELECT/INSERT/UPDATE/DELETE. Add complexity incrementally. | +| Layer zip extraction interferes with function invoke | Low | Medium | Layers are stored but not extracted during invoke (invoke is still stubbed). | +| Event Source Mapping polling introduces async complexity | Medium | Medium | Defer polling engine. Phase 3 is metadata CRUD only. | + +### 15.2 Scope Risks + +| Risk | Likelihood | Impact | Mitigation | +|---|---|---|---| +| Users demand Tier 4 operations | Medium | Low | Return clear `NotImplementedError` with message. Accept feature requests. | +| Tier 2 operations become blocking for a major user | Medium | Medium | Tier 2 is designed to be independently implementable. Can fast-track individual operations. | +| New AWS operations added to services we support | High | Low | Regenerate from Smithy models quarterly. New ops default to NotImplemented. | + +### 15.3 Behavioral Differences + +| Behavior | AWS | RustStack | Justification | +|---|---|---|---| +| TTL deletion timing | Items deleted within 48 hours | Items not deleted (metadata only) | Background deletion adds complexity with minimal local dev value | +| Transaction conflict window | Strict serializable isolation | Partition-level lock ordering | Sufficient for local dev correctness | +| Event Source Mapping polling | AWS manages polling infrastructure | No polling (metadata only, Phase 3) | IaC needs metadata; polling is optional enhancement | +| Layer code extraction | Layers merged into function runtime | Layers stored but not merged into invoke | Invoke is already stubbed; full layer merging deferred to Docker execution engine | +| Idempotency token TTL | 10 minutes | 10 minutes | Match AWS behavior | + +--- + +## 16. Success Metrics + +### 16.1 Coverage Targets + +| Milestone | Operations | % of LocalStack shared ops | Phase | +|---|---|---|---| +| Current | 586 | 53% | — | +| After Phase 1-4 (Tier 1) | 630 | 57% | 1-4 | +| After Phase 5 (EventBridge) | 643 | 58% | 5 | +| After Phase 6 (Tier 2) | 699 | 64% | 6 | +| Tier 3 stubs | 779 | 71% | Future | + +### 16.2 Qualitative Targets + +- **DynamoDB:** Any application using transactions, TTL, or tagging works without modification +- **Lambda:** Any IaC deployment using layers or event source mappings succeeds +- **IAM:** EKS IRSA testing works (OIDC providers) +- **EventBridge:** All defined operations have working handlers +- **Zero regressions:** All existing tests continue to pass + +### 16.3 Compatibility Validation + +For each phase, validate against: +1. `aws-sdk-rust` integration tests +2. `aws-cli` manual smoke tests +3. Terraform `plan`/`apply`/`destroy` cycle for resources using the new operations +4. CDK `synth`/`deploy` for representative stacks diff --git a/tests/integration/src/test_dynamodb.rs b/tests/integration/src/test_dynamodb.rs index 35a835d..6c7d5d3 100644 --- a/tests/integration/src/test_dynamodb.rs +++ b/tests/integration/src/test_dynamodb.rs @@ -645,4 +645,341 @@ mod tests { .await .unwrap(); } + + // ----------------------------------------------------------------------- + // Tagging + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_tag_and_list_tags() { + use aws_sdk_dynamodb::types::Tag; + + let client = dynamodb_client(); + let table_name = test_table_name("tag"); + + create_simple_table(&client, &table_name).await; + + // Get the table ARN for tagging operations. + let desc = client + .describe_table() + .table_name(&table_name) + .send() + .await + .unwrap(); + let table_arn = desc.table().unwrap().table_arn().unwrap().to_owned(); + + // Tag the table with 2 tags. + client + .tag_resource() + .resource_arn(&table_arn) + .tags(Tag::builder().key("env").value("test").build().unwrap()) + .tags(Tag::builder().key("team").value("backend").build().unwrap()) + .send() + .await + .unwrap(); + + // List tags and verify both are present. + let resp = client + .list_tags_of_resource() + .resource_arn(&table_arn) + .send() + .await + .unwrap(); + + let tags = resp.tags(); + assert_eq!(tags.len(), 2); + + let tag_map: std::collections::HashMap<&str, &str> = + tags.iter().map(|t| (t.key(), t.value())).collect(); + assert_eq!(tag_map.get("env"), Some(&"test")); + assert_eq!(tag_map.get("team"), Some(&"backend")); + + // Untag one tag. + client + .untag_resource() + .resource_arn(&table_arn) + .tag_keys("team") + .send() + .await + .unwrap(); + + // Verify only one tag remains. + let resp = client + .list_tags_of_resource() + .resource_arn(&table_arn) + .send() + .await + .unwrap(); + + let tags = resp.tags(); + assert_eq!(tags.len(), 1); + assert_eq!(tags[0].key(), "env"); + assert_eq!(tags[0].value(), "test"); + + // Cleanup. + client + .delete_table() + .table_name(&table_name) + .send() + .await + .unwrap(); + } + + // ----------------------------------------------------------------------- + // TTL + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_update_and_describe_ttl() { + use aws_sdk_dynamodb::types::TimeToLiveSpecification; + + let client = dynamodb_client(); + let table_name = test_table_name("ttl"); + + create_simple_table(&client, &table_name).await; + + // Enable TTL on the "expires_at" attribute. + client + .update_time_to_live() + .table_name(&table_name) + .time_to_live_specification( + TimeToLiveSpecification::builder() + .enabled(true) + .attribute_name("expires_at") + .build() + .unwrap(), + ) + .send() + .await + .unwrap(); + + // Describe TTL and verify it is enabled. + let resp = client + .describe_time_to_live() + .table_name(&table_name) + .send() + .await + .unwrap(); + + let ttl_desc = resp.time_to_live_description().unwrap(); + assert_eq!(ttl_desc.attribute_name(), Some("expires_at")); + // TTL status should be ENABLED or ENABLING. + let status = ttl_desc.time_to_live_status().unwrap(); + assert!( + matches!( + status, + aws_sdk_dynamodb::types::TimeToLiveStatus::Enabled + | aws_sdk_dynamodb::types::TimeToLiveStatus::Enabling + ), + "expected TTL to be ENABLED or ENABLING, got {status:?}" + ); + + // Cleanup. + client + .delete_table() + .table_name(&table_name) + .send() + .await + .unwrap(); + } + + // ----------------------------------------------------------------------- + // Transactions + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_transact_write_and_get_items() { + use aws_sdk_dynamodb::types::{Get, Put, TransactGetItem, TransactWriteItem}; + + let client = dynamodb_client(); + let table_name = test_table_name("txwr"); + + create_simple_table(&client, &table_name).await; + + // TransactWriteItems: put 3 items in a single transaction. + let write_items: Vec = (1..=3) + .map(|i| { + TransactWriteItem::builder() + .put( + Put::builder() + .table_name(&table_name) + .item("pk", AttributeValue::S(format!("txn{i}"))) + .item("data", AttributeValue::S(format!("value{i}"))) + .build() + .unwrap(), + ) + .build() + }) + .collect(); + + client + .transact_write_items() + .set_transact_items(Some(write_items)) + .send() + .await + .unwrap(); + + // TransactGetItems: read all 3 items back. + let get_items: Vec = (1..=3) + .map(|i| { + TransactGetItem::builder() + .get( + Get::builder() + .table_name(&table_name) + .key("pk", AttributeValue::S(format!("txn{i}"))) + .build() + .unwrap(), + ) + .build() + }) + .collect(); + + let resp = client + .transact_get_items() + .set_transact_items(Some(get_items)) + .send() + .await + .unwrap(); + + let responses = resp.responses(); + assert_eq!(responses.len(), 3); + + for (i, item_resp) in responses.iter().enumerate() { + let item = item_resp.item().unwrap(); + assert_eq!( + item.get("pk").unwrap().as_s().unwrap(), + &format!("txn{}", i + 1) + ); + assert_eq!( + item.get("data").unwrap().as_s().unwrap(), + &format!("value{}", i + 1) + ); + } + + // Cleanup. + client + .delete_table() + .table_name(&table_name) + .send() + .await + .unwrap(); + } + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_fail_transact_write_on_condition() { + use aws_sdk_dynamodb::types::{ConditionCheck, Put, TransactWriteItem}; + + let client = dynamodb_client(); + let table_name = test_table_name("txcond"); + + create_simple_table(&client, &table_name).await; + + // Put an existing item. + client + .put_item() + .table_name(&table_name) + .item("pk", AttributeValue::S("existing".to_owned())) + .item("status", AttributeValue::S("active".to_owned())) + .send() + .await + .unwrap(); + + // TransactWriteItems with a ConditionCheck that fails: + // check that "existing" item has status = "inactive" (it does not). + let err = client + .transact_write_items() + .transact_items( + TransactWriteItem::builder() + .condition_check( + ConditionCheck::builder() + .table_name(&table_name) + .key("pk", AttributeValue::S("existing".to_owned())) + .condition_expression("#s = :expected") + .expression_attribute_names("#s", "status") + .expression_attribute_values( + ":expected", + AttributeValue::S("inactive".to_owned()), + ) + .build() + .unwrap(), + ) + .build(), + ) + .transact_items( + TransactWriteItem::builder() + .put( + Put::builder() + .table_name(&table_name) + .item("pk", AttributeValue::S("new_item".to_owned())) + .item("data", AttributeValue::S("should_not_exist".to_owned())) + .build() + .unwrap(), + ) + .build(), + ) + .send() + .await; + + // The transaction should fail with TransactionCanceledException. + assert!(err.is_err()); + let err_str = format!("{:?}", err.unwrap_err()); + assert!( + err_str.contains("TransactionCanceled"), + "expected TransactionCanceledException, got: {err_str}" + ); + + // Verify the new item was NOT written (transaction is atomic). + let resp = client + .get_item() + .table_name(&table_name) + .key("pk", AttributeValue::S("new_item".to_owned())) + .send() + .await + .unwrap(); + assert!(resp.item().is_none()); + + // Cleanup. + client + .delete_table() + .table_name(&table_name) + .send() + .await + .unwrap(); + } + + // ----------------------------------------------------------------------- + // Describe Limits & Endpoints + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_describe_limits() { + let client = dynamodb_client(); + + let resp = client.describe_limits().send().await.unwrap(); + + // DynamoDB default account limits. + assert_eq!(resp.table_max_read_capacity_units(), Some(40_000)); + assert_eq!(resp.table_max_write_capacity_units(), Some(40_000)); + assert_eq!(resp.account_max_read_capacity_units(), Some(80_000)); + assert_eq!(resp.account_max_write_capacity_units(), Some(80_000)); + } + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_describe_endpoints() { + let client = dynamodb_client(); + + let resp = client.describe_endpoints().send().await.unwrap(); + + let endpoints = resp.endpoints(); + assert!( + !endpoints.is_empty(), + "expected at least one endpoint from DescribeEndpoints" + ); + } } diff --git a/tests/integration/src/test_events.rs b/tests/integration/src/test_events.rs index ad8df56..6a8c183 100644 --- a/tests/integration/src/test_events.rs +++ b/tests/integration/src/test_events.rs @@ -533,4 +533,274 @@ mod tests { events.delete_rule().name(&rule_name).send().await.unwrap(); delete_test_queue(&sqs, &queue_url).await; } + + // ----------------------------------------------------------------------- + // Archives + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_create_and_describe_archive() { + let client = events_client(); + let archive_name = format!( + "test-archive-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + + let create = client + .create_archive() + .archive_name(&archive_name) + .event_source_arn("arn:aws:events:us-east-1:000000000000:event-bus/default") + .send() + .await + .unwrap(); + + assert!(create.archive_arn().is_some()); + assert_eq!( + create.state(), + Some(&eventbridge::types::ArchiveState::Enabled) + ); + + // Describe + let describe = client + .describe_archive() + .archive_name(&archive_name) + .send() + .await + .unwrap(); + + assert_eq!(describe.archive_name(), Some(archive_name.as_str())); + assert_eq!( + describe.state(), + Some(&eventbridge::types::ArchiveState::Enabled) + ); + assert!(describe.archive_arn().is_some()); + + // Cleanup + client + .delete_archive() + .archive_name(&archive_name) + .send() + .await + .unwrap(); + } + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_list_archives() { + let client = events_client(); + let archive_name = format!( + "test-archive-list-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + + client + .create_archive() + .archive_name(&archive_name) + .event_source_arn("arn:aws:events:us-east-1:000000000000:event-bus/default") + .send() + .await + .unwrap(); + + let list = client.list_archives().send().await.unwrap(); + assert!( + list.archives() + .iter() + .any(|a| a.archive_name() == Some(archive_name.as_str())), + "archive should appear in list" + ); + + // Cleanup + client + .delete_archive() + .archive_name(&archive_name) + .send() + .await + .unwrap(); + } + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_delete_archive() { + let client = events_client(); + let archive_name = format!( + "test-archive-del-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + + client + .create_archive() + .archive_name(&archive_name) + .event_source_arn("arn:aws:events:us-east-1:000000000000:event-bus/default") + .send() + .await + .unwrap(); + + // Delete + client + .delete_archive() + .archive_name(&archive_name) + .send() + .await + .unwrap(); + + // Describe should fail + let result = client + .describe_archive() + .archive_name(&archive_name) + .send() + .await; + assert!(result.is_err(), "describe after delete should fail"); + } + + // ----------------------------------------------------------------------- + // Connections + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_create_and_describe_connection() { + let client = events_client(); + let conn_name = format!( + "test-conn-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + + let auth_params = eventbridge::types::CreateConnectionAuthRequestParameters::builder() + .api_key_auth_parameters( + eventbridge::types::CreateConnectionApiKeyAuthRequestParameters::builder() + .api_key_name("x-api-key") + .api_key_value("secret-key-value") + .build() + .unwrap(), + ) + .build(); + + let create = client + .create_connection() + .name(&conn_name) + .authorization_type(eventbridge::types::ConnectionAuthorizationType::ApiKey) + .auth_parameters(auth_params) + .send() + .await + .unwrap(); + + assert!(create.connection_arn().is_some()); + assert_eq!( + create.connection_state(), + Some(&eventbridge::types::ConnectionState::Authorized) + ); + + // Describe + let describe = client + .describe_connection() + .name(&conn_name) + .send() + .await + .unwrap(); + + assert_eq!(describe.name(), Some(conn_name.as_str())); + assert_eq!( + describe.authorization_type(), + Some(&eventbridge::types::ConnectionAuthorizationType::ApiKey) + ); + + // Cleanup + client + .delete_connection() + .name(&conn_name) + .send() + .await + .unwrap(); + } + + // ----------------------------------------------------------------------- + // API Destinations + // ----------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_create_and_describe_api_destination() { + let client = events_client(); + let conn_name = format!( + "test-apidest-conn-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + let dest_name = format!( + "test-apidest-{}", + uuid::Uuid::new_v4().to_string()[..8].to_owned() + ); + + // Create a connection first (required for API destinations) + let auth_params = eventbridge::types::CreateConnectionAuthRequestParameters::builder() + .api_key_auth_parameters( + eventbridge::types::CreateConnectionApiKeyAuthRequestParameters::builder() + .api_key_name("x-api-key") + .api_key_value("secret-key-value") + .build() + .unwrap(), + ) + .build(); + + let conn = client + .create_connection() + .name(&conn_name) + .authorization_type(eventbridge::types::ConnectionAuthorizationType::ApiKey) + .auth_parameters(auth_params) + .send() + .await + .unwrap(); + + let connection_arn = conn.connection_arn().unwrap().to_string(); + + // Create API destination + let create = client + .create_api_destination() + .name(&dest_name) + .connection_arn(&connection_arn) + .invocation_endpoint("https://httpbin.org/post") + .http_method(eventbridge::types::ApiDestinationHttpMethod::Post) + .send() + .await + .unwrap(); + + assert!(create.api_destination_arn().is_some()); + assert_eq!( + create.api_destination_state(), + Some(&eventbridge::types::ApiDestinationState::Active) + ); + + // Describe + let describe = client + .describe_api_destination() + .name(&dest_name) + .send() + .await + .unwrap(); + + assert_eq!(describe.name(), Some(dest_name.as_str())); + assert_eq!( + describe.invocation_endpoint(), + Some("https://httpbin.org/post") + ); + assert_eq!( + describe.http_method(), + Some(&eventbridge::types::ApiDestinationHttpMethod::Post) + ); + assert_eq!(describe.connection_arn(), Some(connection_arn.as_str())); + + // Cleanup + client + .delete_api_destination() + .name(&dest_name) + .send() + .await + .unwrap(); + client + .delete_connection() + .name(&conn_name) + .send() + .await + .unwrap(); + } } diff --git a/tests/integration/src/test_health.rs b/tests/integration/src/test_health.rs index 0966378..4abb4a4 100644 --- a/tests/integration/src/test_health.rs +++ b/tests/integration/src/test_health.rs @@ -6,8 +6,10 @@ #[cfg(test)] mod tests { - use tokio::io::{AsyncReadExt, AsyncWriteExt}; - use tokio::net::TcpStream; + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, + }; use crate::endpoint_url; diff --git a/tests/integration/src/test_iam.rs b/tests/integration/src/test_iam.rs index bc70c10..23d2fad 100644 --- a/tests/integration/src/test_iam.rs +++ b/tests/integration/src/test_iam.rs @@ -1,7 +1,8 @@ //! Integration tests for the IAM service. //! //! These tests require a running RustStack server at `localhost:4566`. -//! They are marked `#[ignore = "requires running RustStack server"]` so they don't run during normal `cargo test`. +//! They are marked `#[ignore = "requires running RustStack server"]` so they don't run during +//! normal `cargo test`. #[allow(unused_imports)] use crate::iam_client; @@ -848,3 +849,278 @@ async fn test_should_get_account_authorization_details() { .await .expect("delete role"); } + +// --------------------------------------------------------------------------- +// Phase 4: OIDC Providers + Policy Tags + Instance Profile Tags +// --------------------------------------------------------------------------- + +#[tokio::test] +#[ignore = "requires running RustStack server"] +async fn test_should_create_and_get_oidc_provider() { + let client = iam_client(); + + let create = client + .create_open_id_connect_provider() + .url("https://token.example.com") + .client_id_list("my-client-id") + .thumbprint_list("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + .send() + .await + .expect("create OIDC provider"); + + let provider_arn = create + .open_id_connect_provider_arn() + .expect("provider ARN") + .to_string(); + + assert!(provider_arn.contains("oidc-provider/token.example.com")); + + // Get and verify + let get = client + .get_open_id_connect_provider() + .open_id_connect_provider_arn(&provider_arn) + .send() + .await + .expect("get OIDC provider"); + + assert_eq!(get.url(), Some("https://token.example.com")); + assert!( + get.client_id_list().iter().any(|c| c == "my-client-id"), + "client ID list should contain my-client-id" + ); + assert!( + get.thumbprint_list() + .iter() + .any(|t| t == "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + "thumbprint list should contain the expected thumbprint" + ); + + // Cleanup + client + .delete_open_id_connect_provider() + .open_id_connect_provider_arn(&provider_arn) + .send() + .await + .expect("delete OIDC provider"); +} + +#[tokio::test] +#[ignore = "requires running RustStack server"] +async fn test_should_list_oidc_providers() { + let client = iam_client(); + + let create = client + .create_open_id_connect_provider() + .url("https://list-test.example.com") + .thumbprint_list("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + .send() + .await + .expect("create OIDC provider"); + + let provider_arn = create + .open_id_connect_provider_arn() + .expect("provider ARN") + .to_string(); + + let list = client + .list_open_id_connect_providers() + .send() + .await + .expect("list OIDC providers"); + + assert!( + list.open_id_connect_provider_list() + .iter() + .any(|p| p.arn() == Some(provider_arn.as_str())), + "provider should appear in the list" + ); + + // Cleanup + client + .delete_open_id_connect_provider() + .open_id_connect_provider_arn(&provider_arn) + .send() + .await + .expect("delete OIDC provider"); +} + +#[tokio::test] +#[ignore = "requires running RustStack server"] +async fn test_should_delete_oidc_provider() { + let client = iam_client(); + + let create = client + .create_open_id_connect_provider() + .url("https://delete-test.example.com") + .thumbprint_list("cccccccccccccccccccccccccccccccccccccccc") + .send() + .await + .expect("create OIDC provider"); + + let provider_arn = create + .open_id_connect_provider_arn() + .expect("provider ARN") + .to_string(); + + // Delete + client + .delete_open_id_connect_provider() + .open_id_connect_provider_arn(&provider_arn) + .send() + .await + .expect("delete OIDC provider"); + + // Get should fail with NoSuchEntity + let result = client + .get_open_id_connect_provider() + .open_id_connect_provider_arn(&provider_arn) + .send() + .await; + + assert!(result.is_err(), "get after delete should fail"); + let err = result.unwrap_err(); + let service_err = err.as_service_error().expect("should be a service error"); + assert!( + service_err.is_no_such_entity_exception(), + "error should be NoSuchEntity, got: {service_err:?}" + ); +} + +#[tokio::test] +#[ignore = "requires running RustStack server"] +async fn test_should_tag_and_list_policy_tags() { + let client = iam_client(); + let policy_doc = r#"{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"s3:GetObject","Resource":"*"}]}"#; + + let create = client + .create_policy() + .policy_name("tag-test-policy") + .policy_document(policy_doc) + .send() + .await + .expect("create policy"); + + let policy_arn = create + .policy() + .expect("policy") + .arn() + .expect("arn") + .to_string(); + + // Tag the policy + client + .tag_policy() + .policy_arn(&policy_arn) + .tags( + aws_sdk_iam::types::Tag::builder() + .key("CostCenter") + .value("12345") + .build() + .expect("tag"), + ) + .tags( + aws_sdk_iam::types::Tag::builder() + .key("Project") + .value("alpha") + .build() + .expect("tag"), + ) + .send() + .await + .expect("tag policy"); + + // List tags + let tags = client + .list_policy_tags() + .policy_arn(&policy_arn) + .send() + .await + .expect("list policy tags"); + + assert_eq!(tags.tags().len(), 2); + assert!( + tags.tags() + .iter() + .any(|t| t.key() == "CostCenter" && t.value() == "12345"), + "should contain CostCenter tag" + ); + assert!( + tags.tags() + .iter() + .any(|t| t.key() == "Project" && t.value() == "alpha"), + "should contain Project tag" + ); + + // Cleanup + client + .delete_policy() + .policy_arn(&policy_arn) + .send() + .await + .expect("delete policy"); +} + +#[tokio::test] +#[ignore = "requires running RustStack server"] +async fn test_should_tag_and_list_instance_profile_tags() { + let client = iam_client(); + + client + .create_instance_profile() + .instance_profile_name("tag-test-profile") + .send() + .await + .expect("create instance profile"); + + // Tag the instance profile + client + .tag_instance_profile() + .instance_profile_name("tag-test-profile") + .tags( + aws_sdk_iam::types::Tag::builder() + .key("Environment") + .value("staging") + .build() + .expect("tag"), + ) + .tags( + aws_sdk_iam::types::Tag::builder() + .key("Owner") + .value("platform-team") + .build() + .expect("tag"), + ) + .send() + .await + .expect("tag instance profile"); + + // List tags + let tags = client + .list_instance_profile_tags() + .instance_profile_name("tag-test-profile") + .send() + .await + .expect("list instance profile tags"); + + assert_eq!(tags.tags().len(), 2); + assert!( + tags.tags() + .iter() + .any(|t| t.key() == "Environment" && t.value() == "staging"), + "should contain Environment tag" + ); + assert!( + tags.tags() + .iter() + .any(|t| t.key() == "Owner" && t.value() == "platform-team"), + "should contain Owner tag" + ); + + // Cleanup + client + .delete_instance_profile() + .instance_profile_name("tag-test-profile") + .send() + .await + .expect("delete instance profile"); +} diff --git a/tests/integration/src/test_lambda.rs b/tests/integration/src/test_lambda.rs index 98a0261..d7e7d24 100644 --- a/tests/integration/src/test_lambda.rs +++ b/tests/integration/src/test_lambda.rs @@ -5,9 +5,9 @@ #[cfg(test)] mod tests { - use aws_sdk_lambda::primitives::Blob; - use aws_sdk_lambda::types::{ - Architecture, Environment, FunctionCode, FunctionUrlAuthType, Runtime, + use aws_sdk_lambda::{ + primitives::Blob, + types::{Architecture, Environment, FunctionCode, FunctionUrlAuthType, Runtime}, }; use crate::lambda_client; @@ -656,4 +656,438 @@ mod tests { cleanup_function(&client, &name).await; } + + // --------------------------------------------------------------------------- + // Layers: PublishLayerVersion + GetLayerVersion + // --------------------------------------------------------------------------- + + /// Minimal valid zip file (22-byte empty zip archive). + fn minimal_zip_blob() -> Blob { + Blob::new(vec![ + 80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]) + } + + /// Helper: generate a unique layer name. + fn layer_name(prefix: &str) -> String { + let id = uuid::Uuid::new_v4().to_string()[..8].to_owned(); + format!("test-layer-{prefix}-{id}") + } + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_publish_and_get_layer_version() { + use aws_sdk_lambda::types::LayerVersionContentInput; + + let client = lambda_client(); + let name = layer_name("pub-get"); + + // Publish a layer version. + let published = client + .publish_layer_version() + .layer_name(&name) + .description("Test layer v1") + .content( + LayerVersionContentInput::builder() + .zip_file(minimal_zip_blob()) + .build(), + ) + .compatible_runtimes(Runtime::Python312) + .send() + .await + .expect("publish layer version should succeed"); + + assert_eq!(published.version(), 1); + assert!( + published.layer_arn().is_some(), + "layer ARN should be present" + ); + assert!( + published.layer_arn().unwrap_or_default().contains(&name), + "layer ARN should contain the layer name" + ); + assert_eq!(published.description(), Some("Test layer v1")); + + // Get the layer version. + let got = client + .get_layer_version() + .layer_name(&name) + .version_number(1) + .send() + .await + .expect("get layer version should succeed"); + + assert_eq!(got.version(), 1); + assert_eq!(got.description(), Some("Test layer v1")); + assert!(got.content().is_some()); + + // Cleanup: delete the layer version. + let _ = client + .delete_layer_version() + .layer_name(&name) + .version_number(1) + .send() + .await; + } + + // --------------------------------------------------------------------------- + // Layers: ListLayers + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_list_layers() { + use aws_sdk_lambda::types::LayerVersionContentInput; + + let client = lambda_client(); + let name = layer_name("list"); + + // Publish a layer version. + client + .publish_layer_version() + .layer_name(&name) + .description("Layer for listing") + .content( + LayerVersionContentInput::builder() + .zip_file(minimal_zip_blob()) + .build(), + ) + .send() + .await + .expect("publish layer should succeed"); + + // List layers and verify ours appears. + let resp = client + .list_layers() + .send() + .await + .expect("list layers should succeed"); + + let found = resp + .layers() + .iter() + .any(|l| l.layer_name().is_some_and(|n| n == name)); + assert!(found, "published layer should appear in list_layers"); + + // Cleanup. + let _ = client + .delete_layer_version() + .layer_name(&name) + .version_number(1) + .send() + .await; + } + + // --------------------------------------------------------------------------- + // Layers: DeleteLayerVersion + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_delete_layer_version() { + use aws_sdk_lambda::types::LayerVersionContentInput; + + let client = lambda_client(); + let name = layer_name("delete"); + + // Publish a layer version. + client + .publish_layer_version() + .layer_name(&name) + .content( + LayerVersionContentInput::builder() + .zip_file(minimal_zip_blob()) + .build(), + ) + .send() + .await + .expect("publish layer should succeed"); + + // Delete the layer version. + client + .delete_layer_version() + .layer_name(&name) + .version_number(1) + .send() + .await + .expect("delete layer version should succeed"); + + // Getting the deleted layer version should fail. + let err = client + .get_layer_version() + .layer_name(&name) + .version_number(1) + .send() + .await; + assert!( + err.is_err(), + "get deleted layer version should return error" + ); + } + + // --------------------------------------------------------------------------- + // Event Source Mappings: Create + List + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_create_and_list_event_source_mappings() { + let client = lambda_client(); + let name = create_test_function(&client, "esm-create").await; + + let fake_sqs_arn = format!( + "arn:aws:sqs:us-east-1:000000000000:test-queue-{}", + &uuid::Uuid::new_v4().to_string()[..8] + ); + + // Create an event source mapping. + let created = client + .create_event_source_mapping() + .function_name(&name) + .event_source_arn(&fake_sqs_arn) + .batch_size(10) + .enabled(true) + .send() + .await + .expect("create event source mapping should succeed"); + + let esm_uuid = created.uuid().expect("should have UUID").to_owned(); + assert_eq!(created.batch_size(), Some(10)); + assert_eq!(created.event_source_arn(), Some(fake_sqs_arn.as_str())); + + // List event source mappings for the function. + let resp = client + .list_event_source_mappings() + .function_name(&name) + .send() + .await + .expect("list event source mappings should succeed"); + + let found = resp + .event_source_mappings() + .iter() + .any(|m| m.uuid() == Some(esm_uuid.as_str())); + assert!(found, "created ESM should appear in list"); + + // Cleanup. + let _ = client + .delete_event_source_mapping() + .uuid(&esm_uuid) + .send() + .await; + cleanup_function(&client, &name).await; + } + + // --------------------------------------------------------------------------- + // Event Source Mappings: Update + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_update_event_source_mapping() { + let client = lambda_client(); + let name = create_test_function(&client, "esm-update").await; + + let fake_sqs_arn = format!( + "arn:aws:sqs:us-east-1:000000000000:test-queue-{}", + &uuid::Uuid::new_v4().to_string()[..8] + ); + + // Create an event source mapping. + let created = client + .create_event_source_mapping() + .function_name(&name) + .event_source_arn(&fake_sqs_arn) + .batch_size(5) + .send() + .await + .expect("create ESM should succeed"); + + let esm_uuid = created.uuid().expect("should have UUID").to_owned(); + + // Update batch size. + let updated = client + .update_event_source_mapping() + .uuid(&esm_uuid) + .batch_size(20) + .send() + .await + .expect("update ESM should succeed"); + + assert_eq!(updated.batch_size(), Some(20)); + + // Get and verify. + let got = client + .get_event_source_mapping() + .uuid(&esm_uuid) + .send() + .await + .expect("get ESM should succeed"); + + assert_eq!(got.batch_size(), Some(20)); + + // Cleanup. + let _ = client + .delete_event_source_mapping() + .uuid(&esm_uuid) + .send() + .await; + cleanup_function(&client, &name).await; + } + + // --------------------------------------------------------------------------- + // Event Source Mappings: Delete + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_delete_event_source_mapping() { + let client = lambda_client(); + let name = create_test_function(&client, "esm-delete").await; + + let fake_sqs_arn = format!( + "arn:aws:sqs:us-east-1:000000000000:test-queue-{}", + &uuid::Uuid::new_v4().to_string()[..8] + ); + + // Create an event source mapping. + let created = client + .create_event_source_mapping() + .function_name(&name) + .event_source_arn(&fake_sqs_arn) + .batch_size(10) + .send() + .await + .expect("create ESM should succeed"); + + let esm_uuid = created.uuid().expect("should have UUID").to_owned(); + + // Delete the event source mapping. + client + .delete_event_source_mapping() + .uuid(&esm_uuid) + .send() + .await + .expect("delete ESM should succeed"); + + // Getting the deleted ESM should fail. + let err = client + .get_event_source_mapping() + .uuid(&esm_uuid) + .send() + .await; + assert!(err.is_err(), "get deleted ESM should return error"); + + cleanup_function(&client, &name).await; + } + + // --------------------------------------------------------------------------- + // Function Concurrency: Put + Get + Delete + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_put_and_get_function_concurrency() { + let client = lambda_client(); + let name = create_test_function(&client, "concurrency").await; + + // Put function concurrency. + let put_resp = client + .put_function_concurrency() + .function_name(&name) + .reserved_concurrent_executions(50) + .send() + .await + .expect("put concurrency should succeed"); + + assert_eq!(put_resp.reserved_concurrent_executions(), Some(50)); + + // Get function concurrency directly. + let get_resp = client + .get_function_concurrency() + .function_name(&name) + .send() + .await + .expect("get concurrency should succeed"); + + assert_eq!(get_resp.reserved_concurrent_executions(), Some(50)); + + // Delete function concurrency. + client + .delete_function_concurrency() + .function_name(&name) + .send() + .await + .expect("delete concurrency should succeed"); + + // Verify concurrency is removed — get_function_concurrency returns None or 0. + let after = client + .get_function_concurrency() + .function_name(&name) + .send() + .await + .expect("get concurrency after delete should succeed"); + + assert!( + after.reserved_concurrent_executions().is_none() + || after.reserved_concurrent_executions() == Some(0), + "concurrency should be cleared after deletion" + ); + + cleanup_function(&client, &name).await; + } + + // --------------------------------------------------------------------------- + // Event Invoke Config: Put + List + // --------------------------------------------------------------------------- + + #[tokio::test] + #[ignore = "requires running server"] + async fn test_should_put_and_list_event_invoke_config() { + let client = lambda_client(); + let name = create_test_function(&client, "invoke-cfg").await; + + // Put function event invoke config. + let put_resp = client + .put_function_event_invoke_config() + .function_name(&name) + .maximum_retry_attempts(1) + .maximum_event_age_in_seconds(300) + .send() + .await + .expect("put event invoke config should succeed"); + + assert_eq!(put_resp.maximum_retry_attempts(), Some(1)); + assert_eq!(put_resp.maximum_event_age_in_seconds(), Some(300)); + + // List function event invoke configs. + let list_resp = client + .list_function_event_invoke_configs() + .function_name(&name) + .send() + .await + .expect("list event invoke configs should succeed"); + + let configs = list_resp.function_event_invoke_configs(); + assert!( + !configs.is_empty(), + "should have at least one event invoke config" + ); + + let our_config = configs + .iter() + .find(|c| c.function_arn().is_some_and(|arn| arn.contains(&name))) + .expect("should find config for our function"); + + assert_eq!(our_config.maximum_retry_attempts(), Some(1)); + assert_eq!(our_config.maximum_event_age_in_seconds(), Some(300)); + + // Cleanup: delete event invoke config, then function. + let _ = client + .delete_function_event_invoke_config() + .function_name(&name) + .send() + .await; + cleanup_function(&client, &name).await; + } } diff --git a/tests/integration/src/test_multipart.rs b/tests/integration/src/test_multipart.rs index 5beb7fc..2f6fe59 100644 --- a/tests/integration/src/test_multipart.rs +++ b/tests/integration/src/test_multipart.rs @@ -2,8 +2,10 @@ #[cfg(test)] mod tests { - use aws_sdk_s3::primitives::ByteStream; - use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart}; + use aws_sdk_s3::{ + primitives::ByteStream, + types::{CompletedMultipartUpload, CompletedPart}, + }; use crate::{cleanup_bucket, create_test_bucket, s3_client}; diff --git a/tests/integration/src/test_sts.rs b/tests/integration/src/test_sts.rs index a017f64..3089635 100644 --- a/tests/integration/src/test_sts.rs +++ b/tests/integration/src/test_sts.rs @@ -260,7 +260,8 @@ mod tests { // A minimal base64-encoded SAML assertion (not cryptographically valid). let saml_assertion = base64::Engine::encode( &base64::engine::general_purpose::STANDARD, - "user@example.com", + "user@example.com", ); let result = client diff --git a/tests/integration/src/test_versioning.rs b/tests/integration/src/test_versioning.rs index a4cdaac..8ceb99e 100644 --- a/tests/integration/src/test_versioning.rs +++ b/tests/integration/src/test_versioning.rs @@ -2,8 +2,7 @@ #[cfg(test)] mod tests { - use aws_sdk_s3::primitives::ByteStream; - use aws_sdk_s3::types::BucketVersioningStatus; + use aws_sdk_s3::{primitives::ByteStream, types::BucketVersioningStatus}; use crate::{cleanup_bucket, create_test_bucket, s3_client};