Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,4 @@ alloy-signer-aws = { version = "1.0.23", features = ["eip712"] }
aws-config = "1.8.2"
aws-sdk-kms = "1.79.0"
aws-credential-types = "1.2.4"
moka = { version = "0.12", features = ["future"] }
89 changes: 87 additions & 2 deletions core/src/credentials.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,33 @@ use aws_config::{BehaviorVersion, Region};
use aws_credential_types::provider::future::ProvideCredentials as ProvideCredentialsFuture;
use aws_sdk_kms::config::{Credentials, ProvideCredentials};
use serde::{Deserialize, Serialize};
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use thirdweb_core::auth::ThirdwebAuth;
use thirdweb_core::iaw::AuthToken;
use vault_types::enclave::auth::Auth as VaultAuth;

use crate::error::EngineError;

/// Cache for AWS KMS clients to avoid recreating connections
pub type KmsClientCache = moka::future::Cache<u64, aws_sdk_kms::Client>;

impl SigningCredential {
/// Create a random private key credential for testing
pub fn random_local() -> Self {
SigningCredential::PrivateKey(PrivateKeySigner::random())
}

/// Inject KMS cache into AWS KMS credentials (useful after deserialization)
pub fn with_aws_kms_cache(self, kms_client_cache: &KmsClientCache) -> Self {
match self {
SigningCredential::AwsKms(creds) => {
SigningCredential::AwsKms(creds.with_cache(kms_client_cache.clone()))
}
other => other,
}
}
}

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand All @@ -38,6 +54,18 @@ pub struct AwsKmsCredential {
pub secret_access_key: String,
pub key_id: String,
pub region: String,
#[serde(skip)]
pub kms_client_cache: Option<KmsClientCache>,
}

impl Hash for AwsKmsCredential {
fn hash<H: Hasher>(&self, state: &mut H) {
self.access_key_id.hash(state);
self.secret_access_key.hash(state);
self.key_id.hash(state);
self.region.hash(state);
// Don't hash the cache - it's not part of the credential identity
}
}

impl ProvideCredentials for AwsKmsCredential {
Expand All @@ -57,14 +85,71 @@ impl ProvideCredentials for AwsKmsCredential {
}

impl AwsKmsCredential {
pub async fn get_signer(&self, chain_id: Option<ChainId>) -> Result<AwsSigner, EngineError> {
/// Create a new AwsKmsCredential with cache
pub fn new(
access_key_id: String,
secret_access_key: String,
key_id: String,
region: String,
kms_client_cache: KmsClientCache,
) -> Self {
Self {
access_key_id,
secret_access_key,
key_id,
region,
kms_client_cache: Some(kms_client_cache),
}
}

/// Inject cache into this credential (useful after deserialization)
pub fn with_cache(mut self, kms_client_cache: KmsClientCache) -> Self {
self.kms_client_cache = Some(kms_client_cache);
self
}

/// Create a cache key from the credential
fn cache_key(&self) -> u64 {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
hasher.finish()
}

/// Create a new AWS KMS client (without caching)
async fn create_kms_client(&self) -> Result<aws_sdk_kms::Client, EngineError> {
let config = aws_config::defaults(BehaviorVersion::latest())
.credentials_provider(self.clone())
.region(Region::new(self.region.clone()))
.load()
.await;
let client = aws_sdk_kms::Client::new(&config);
Ok(aws_sdk_kms::Client::new(&config))
}

/// Get a cached AWS KMS client, creating one if it doesn't exist
async fn get_cached_kms_client(&self) -> Result<aws_sdk_kms::Client, EngineError> {
match &self.kms_client_cache {
Some(cache) => {
let cache_key = self.cache_key();

cache
.try_get_with(cache_key, async {
tracing::debug!("Creating new KMS client for key: {}", cache_key);
self.create_kms_client().await
})
.await
.map_err(|e| e.deref().clone())
}
None => {
// Fallback to creating a new client without caching
tracing::debug!("No cache available, creating new KMS client");
self.create_kms_client().await
}
}
}

/// Get signer (uses cache if available)
pub async fn get_signer(&self, chain_id: Option<ChainId>) -> Result<AwsSigner, EngineError> {
let client = self.get_cached_kms_client().await?;
let signer = AwsSigner::new(client, self.key_id.clone(), chain_id).await?;
Ok(signer)
}
Expand Down
12 changes: 10 additions & 2 deletions executors/src/eoa/worker/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use alloy::primitives::{Address, U256};
use alloy::providers::Provider;
use engine_core::{
chain::{Chain, ChainService},
credentials::SigningCredential,
credentials::{SigningCredential, KmsClientCache},
error::AlloyRpcErrorToEngineError,
signer::EoaSigner,
};
Expand Down Expand Up @@ -127,6 +127,9 @@ where

// EOA metrics abstraction with encapsulated configuration
pub eoa_metrics: EoaMetrics,

// KMS client cache for AWS KMS credentials
pub kms_client_cache: KmsClientCache,
}

impl<CS> DurableExecution for EoaExecutorJobHandler<CS>
Expand Down Expand Up @@ -182,12 +185,15 @@ where

let chain_id = chain.chain_id();

// Inject KMS cache into the noop signing credential (after deserialization from Redis)
let noop_signing_credential = data.noop_signing_credential.clone().with_aws_kms_cache(&self.kms_client_cache);

let worker = EoaExecutorWorker {
store: scoped,
chain,
eoa: data.eoa_address,
chain_id: data.chain_id,
noop_signing_credential: data.noop_signing_credential.clone(),
noop_signing_credential,

max_inflight: if is_minimal_account {
1
Expand All @@ -199,6 +205,7 @@ where
max_recycled_nonces: self.max_recycled_nonces,
webhook_queue: self.webhook_queue.clone(),
signer: self.eoa_signer.clone(),
kms_client_cache: self.kms_client_cache.clone(),
};

let job_start_time = current_timestamp_ms();
Expand Down Expand Up @@ -311,6 +318,7 @@ pub struct EoaExecutorWorker<C: Chain> {

pub webhook_queue: Arc<Queue<WebhookJobHandler>>,
pub signer: Arc<EoaSigner>,
pub kms_client_cache: KmsClientCache,
}

impl<C: Chain> EoaExecutorWorker<C> {
Expand Down
8 changes: 6 additions & 2 deletions executors/src/eoa/worker/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ impl<C: Chain> EoaExecutorWorker<C> {
// Try EIP-1559 fees first, fall back to legacy if unsupported
match self.chain.provider().estimate_eip1559_fees().await {
Ok(eip1559_fees) => {
tracing::debug!(
tracing::trace!(
"Using EIP-1559 fees: max_fee={}, max_priority_fee={}",
eip1559_fees.max_fee_per_gas,
eip1559_fees.max_priority_fee_per_gas
Expand Down Expand Up @@ -397,7 +397,11 @@ impl<C: Chain> EoaExecutorWorker<C> {
nonce: u64,
) -> Result<Signed<TypedTransaction>, EoaExecutorWorkerError> {
let typed_tx = self.build_typed_transaction(request, nonce).await?;
self.sign_transaction(typed_tx, &request.signing_credential)

// Inject KMS cache into the signing credential (after deserialization from Redis)
let credential_with_cache = request.signing_credential.clone().with_aws_kms_cache(&self.kms_client_cache);

self.sign_transaction(typed_tx, &credential_with_cache)
.await
}

Expand Down
42 changes: 42 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Start local chain with anvil + speedbump proxy (300ms variable latency)
local-chain:
#!/usr/bin/env bash
set -e

echo "🧹 Cleaning up existing processes..."
# Kill any existing anvil or speedbump processes
pkill -f "anvil.*8546" || true
pkill -f "speedbump.*8545" || true
lsof -ti:8545 | xargs kill -9 2>/dev/null || true
lsof -ti:8546 | xargs kill -9 2>/dev/null || true
Comment on lines +8 to +11
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Cleanup logic may kill unrelated processes.

The pkill -f patterns and lsof | xargs kill -9 commands may be too broad and could terminate unrelated processes that happen to match the patterns or use the ports.

Consider using more precise process identification or maintaining PID files for safer cleanup.

-    pkill -f "anvil.*8546" || true
-    pkill -f "speedbump.*8545" || true
-    lsof -ti:8545 | xargs kill -9 2>/dev/null || true
-    lsof -ti:8546 | xargs kill -9 2>/dev/null || true
+    # Kill by port more safely
+    fuser -k 8545/tcp 2>/dev/null || true
+    fuser -k 8546/tcp 2>/dev/null || true


echo "🔨 Starting anvil on port 8546 (1s blocktime)..."
anvil --port 8546 --block-time 1 &
ANVIL_PID=$!

# Cleanup function
cleanup() {
echo ""
echo "🛑 Stopping services..."
kill $ANVIL_PID 2>/dev/null || true
pkill -f "speedbump.*8545" || true
exit 0
}

trap cleanup INT TERM EXIT

# Wait a moment for anvil to start
sleep 2

echo "🐌 Starting speedbump proxy on port 8545 (→ localhost:8546)"
echo " Latency: 300ms base + 150ms sine wave (150-450ms variable)"
echo " Connect to: http://localhost:8545"
echo ""
speedbump --port=8545 --latency=300ms --sine-amplitude=150ms --sine-period=1m localhost:8546

# Fund an address with 1 ETH (bypasses speedbump for faster setup)
fund address:
@echo "💰 Funding {{address}} with 1 ETH..."
@cast rpc anvil_setBalance {{address}} $(cast to-wei 1) --rpc-url http://localhost:8546
@echo "✅ Done!"
Comment on lines +38 to +41
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Add validation and error handling for address parameter.

The target doesn't validate that the address parameter is provided and doesn't handle errors from the cast command, which could result in a misleading success message.

 fund address:
+    @test -n "{{address}}" || (echo "❌ Error: address parameter is required" && exit 1)
     @echo "💰 Funding {{address}} with 1 ETH..."
-    @cast rpc anvil_setBalance {{address}} $(cast to-wei 1) --rpc-url http://localhost:8546
-    @echo "✅ Done!"
+    @if cast rpc anvil_setBalance {{address}} $(cast to-wei 1) --rpc-url http://localhost:8546; then \
+        echo "✅ Done!"; \
+    else \
+        echo "❌ Failed to fund address"; \
+        exit 1; \
+    fi
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
fund address:
@echo "💰 Funding {{address}} with 1 ETH..."
@cast rpc anvil_setBalance {{address}} $(cast to-wei 1) --rpc-url http://localhost:8546
@echo "✅ Done!"
fund address:
@test -n "{{address}}" || (echo "❌ Error: address parameter is required" && exit 1)
@echo "💰 Funding {{address}} with 1 ETH..."
@if cast rpc anvil_setBalance {{address}} $(cast to-wei 1) --rpc-url http://localhost:8546; then \
echo "✅ Done!"; \
else \
echo "❌ Failed to fund address"; \
exit 1; \
fi
🤖 Prompt for AI Agents
In justfile around lines 38 to 41, the target doesn't validate the address
parameter or check the result of the cast command; update the recipe to first
verify {{address}} is provided and matches an expected address pattern (or at
minimum non-empty), printing a clear error and exiting non-zero if
missing/invalid, then execute the cast rpc anvil_setBalance command and check
its exit status (do not print the success "✅ Done!" unless the cast command
succeeded); on failure forward the cast error output and exit with a non-zero
code so CI/consumers detect the failure.


11 changes: 0 additions & 11 deletions scripts/benchmarks/eoa.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,3 @@
/// <reference lib="dom" />

// Bun globals (runtime will provide these)
declare const Bun: any;
declare const process: any;

// Extend ImportMeta for Bun
interface ImportMeta {
dir: string;
}

// Types based on events.rs
interface WebhookEvent {
transactionId: string;
Expand Down
Loading