From 47f5a5511e8b66039045e0910b3e3a4e7f21bad7 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 11:27:07 +0000 Subject: [PATCH 01/23] Validation node works and tests are passing using the new test infra --- Cargo.lock | 171 ++++++- Cargo.toml | 1 + crates/op-rbuilder/Cargo.toml | 24 +- crates/op-rbuilder/src/args/op.rs | 28 +- crates/op-rbuilder/src/builders/context.rs | 4 +- crates/op-rbuilder/src/lib.rs | 6 + .../op-rbuilder/src/tests/framework/apis.rs | 181 ++++--- .../op-rbuilder/src/tests/framework/blocks.rs | 425 ---------------- .../op-rbuilder/src/tests/framework/driver.rs | 265 ++++++++++ .../src/tests/framework/external.rs | 475 ++++++++++++++++++ .../src/tests/framework/harness.rs | 19 +- .../src/tests/framework/instance.rs | 351 +++++++++++++ crates/op-rbuilder/src/tests/framework/mod.rs | 41 +- crates/op-rbuilder/src/tests/framework/txs.rs | 163 +++++- .../op-rbuilder/src/tests/framework/utils.rs | 135 +++++ crates/op-rbuilder/src/tests/mod.rs | 2 +- crates/op-rbuilder/src/tests/vanilla/mod.rs | 4 +- crates/op-rbuilder/src/tests/vanilla/smoke.rs | 181 ++++--- .../op-rbuilder/src/tests/vanilla/txpool.rs | 58 ++- 19 files changed, 1903 insertions(+), 631 deletions(-) delete mode 100644 crates/op-rbuilder/src/tests/framework/blocks.rs create mode 100644 crates/op-rbuilder/src/tests/framework/driver.rs create mode 100644 crates/op-rbuilder/src/tests/framework/external.rs create mode 100644 crates/op-rbuilder/src/tests/framework/instance.rs create mode 100644 crates/op-rbuilder/src/tests/framework/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 23614dfb5..51aa6e1bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1829,6 +1829,51 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "bollard" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af706e9dc793491dd382c99c22fde6e9934433d4cc0d6a4b34eb2cdc57a5c917" +dependencies = [ + "base64", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 2.0.12", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.48.2-rc.28.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79cdf0fccd5341b38ae0be74b74410bdd5eceeea8876dc149a13edfe57e3b259" +dependencies = [ + "serde", + "serde_json", + "serde_repr", + "serde_with", +] + [[package]] name = "boyer-moore-magiclen" version = "0.2.20" @@ -2428,6 +2473,22 @@ dependencies = [ "subtle", ] +[[package]] +name = "ctor" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4735f265ba6a1188052ca32d461028a7d1125868be18e287e756019da7607b5" +dependencies = [ + "ctor-proc-macro", + "dtor", +] + +[[package]] +name = "ctor-proc-macro" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f211af61d8efdd104f96e57adf5e426ba1bc3ed7a4ead616e15e5881fd79c4d" + [[package]] name = "ctr" version = "0.9.2" @@ -2800,6 +2861,21 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "dtor" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cbdf2ad6846025e8e25df05171abfb30e3ababa12ee0a0e44b9bbe570633a8" +dependencies = [ + "dtor-proc-macro", +] + +[[package]] +name = "dtor-proc-macro" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055" + [[package]] name = "dunce" version = "1.0.5" @@ -3775,6 +3851,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + [[package]] name = "hyper-rustls" version = "0.27.5" @@ -3844,6 +3935,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" version = "0.1.63" @@ -5162,6 +5268,15 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "nanoid" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" +dependencies = [ + "rand 0.8.5", +] + [[package]] name = "native-tls" version = "0.2.14" @@ -5504,9 +5619,12 @@ dependencies = [ "alloy-transport", "alloy-transport-http", "async-trait", + "bollard", "chrono", "clap", "clap_builder", + "ctor", + "dashmap 6.1.0", "derive_more", "eyre", "futures", @@ -5516,6 +5634,7 @@ dependencies = [ "jsonrpsee-types 0.25.1 (registry+https://github.com/rust-lang/crates.io-index)", "metrics", "moka", + "nanoid", "op-alloy-consensus", "op-alloy-flz", "op-alloy-network", @@ -5533,6 +5652,7 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-exex", + "reth-ipc 1.4.1", "reth-metrics", "reth-network-peers", "reth-node-api", @@ -5569,6 +5689,7 @@ dependencies = [ "serde_with", "serde_yaml", "shellexpand", + "tar", "thiserror 1.0.69", "tikv-jemallocator", "time", @@ -7109,12 +7230,17 @@ dependencies = [ "rayon", "reth-config", "reth-consensus", + "reth-db", + "reth-db-api", + "reth-ethereum-primitives", "reth-metrics", "reth-network-p2p", "reth-network-peers", "reth-primitives-traits", "reth-storage-api", "reth-tasks", + "reth-testing-utils", + "tempfile", "thiserror 2.0.12", "tokio", "tokio-stream", @@ -7250,6 +7376,7 @@ dependencies = [ "parking_lot", "rayon", "reth-chain-state", + "reth-chainspec", "reth-consensus", "reth-db", "reth-engine-primitives", @@ -7263,9 +7390,13 @@ dependencies = [ "reth-primitives-traits", "reth-provider", "reth-prune", + "reth-prune-types", "reth-revm", + "reth-stages", "reth-stages-api", + "reth-static-file", "reth-tasks", + "reth-tracing", "reth-trie", "reth-trie-db", "reth-trie-parallel", @@ -7739,6 +7870,26 @@ dependencies = [ "serde_json", ] +[[package]] +name = "reth-ipc" +version = "1.4.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.4.1#e6ce41ebba0d8752cef9ed885aae057e09226d05" +dependencies = [ + "bytes", + "futures", + "futures-util", + "interprocess", + "jsonrpsee 0.25.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-project", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tokio-util", + "tower 0.5.2", + "tracing", +] + [[package]] name = "reth-ipc" version = "1.4.7" @@ -7908,6 +8059,7 @@ dependencies = [ "auto_impl", "derive_more", "futures", + "parking_lot", "reth-consensus", "reth-eth-wire-types", "reth-ethereum-primitives", @@ -8538,6 +8690,7 @@ version = "1.4.7" source = "git+https://github.com/paradigmxyz/reth?tag=v1.4.7#dc7cb6e6670b0da294a0e5010e02855f5aaf6b49" dependencies = [ "alloy-consensus", + "alloy-primitives", "alloy-rpc-types", "futures-util", "metrics", @@ -8912,7 +9065,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-evm", - "reth-ipc", + "reth-ipc 1.4.7", "reth-metrics", "reth-network-api", "reth-node-core", @@ -9108,11 +9261,13 @@ dependencies = [ "num-traits", "rayon", "reqwest", + "reth-chainspec", "reth-codecs", "reth-config", "reth-consensus", "reth-db", "reth-db-api", + "reth-ethereum-primitives", "reth-etl", "reth-evm", "reth-execution-types", @@ -9127,9 +9282,11 @@ dependencies = [ "reth-stages-api", "reth-static-file-types", "reth-storage-errors", + "reth-testing-utils", "reth-trie", "reth-trie-db", "serde", + "tempfile", "thiserror 2.0.12", "tokio", "tracing", @@ -9322,6 +9479,7 @@ dependencies = [ "futures-util", "metrics", "parking_lot", + "paste", "rand 0.9.0", "reth-chain-state", "reth-chainspec", @@ -10218,6 +10376,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "serde_spanned" version = "0.6.8" diff --git a/Cargo.toml b/Cargo.toml index 3376461fd..5ad1c242e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4. reth-testing-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4.7" } reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4.7" } reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4.7" } +reth-ipc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4.1" } # reth optimism reth-optimism-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.4.7" } diff --git a/crates/op-rbuilder/Cargo.toml b/crates/op-rbuilder/Cargo.toml index 1326c6bec..98788ddc1 100644 --- a/crates/op-rbuilder/Cargo.toml +++ b/crates/op-rbuilder/Cargo.toml @@ -106,6 +106,12 @@ http = "1.0" rollup-boost = { git = "http://github.com/flashbots/rollup-boost", branch = "main" } +dashmap = { version = "6.1", optional = true } +nanoid = { version = "0.4", optional = true } +reth-ipc = { workspace = true, optional = true } +bollard = { version = "0.19", optional = true } +tar = { version = "0.4", optional = true } + [target.'cfg(unix)'.dependencies] tikv-jemallocator = { version = "0.6", optional = true } @@ -118,6 +124,14 @@ alloy-provider = { workspace = true, default-features = true, features = [ "txpool-api", ] } +dashmap = { version = "6.1" } +nanoid = { version = "0.4" } +reth-ipc = { workspace = true } +reth-node-builder = { workspace = true, features = ["test-utils"] } +bollard = "0.19" +ctor = "0.4.2" +tar = "0.4" + [features] default = ["jemalloc"] @@ -139,7 +153,15 @@ min-info-logs = ["tracing/release_max_level_info"] min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] -testing = [] + +testing = [ + "dashmap", + "nanoid", + "reth-ipc", + "reth-node-builder/test-utils", + "bollard", +] + interop = [] diff --git a/crates/op-rbuilder/src/args/op.rs b/crates/op-rbuilder/src/args/op.rs index d4b92e589..87f46d691 100644 --- a/crates/op-rbuilder/src/args/op.rs +++ b/crates/op-rbuilder/src/args/op.rs @@ -3,13 +3,15 @@ //! Copied from OptimismNode to allow easy extension. //! clap [Args](clap::Args) for optimism rollup configuration -use std::path::PathBuf; use crate::tx_signer::Signer; +use clap::Parser; +use reth_optimism_cli::commands::Commands; use reth_optimism_node::args::RollupArgs; +use std::path::PathBuf; /// Parameters for rollup configuration -#[derive(Debug, Clone, Default, PartialEq, Eq, clap::Args)] +#[derive(Debug, Clone, PartialEq, Eq, clap::Args)] #[command(next_help_heading = "Rollup")] pub struct OpRbuilderArgs { /// Rollup configuration @@ -51,6 +53,16 @@ pub struct OpRbuilderArgs { pub flashblocks: FlashblocksArgs, } +impl Default for OpRbuilderArgs { + fn default() -> Self { + let args = crate::args::Cli::parse_from(["dummy", "node"]); + let Commands::Node(node_command) = args.command else { + unreachable!() + }; + node_command.ext + } +} + fn expand_path(s: &str) -> Result { shellexpand::full(s) .map_err(|e| format!("expansion error for `{s}`: {e}"))? @@ -63,7 +75,7 @@ fn expand_path(s: &str) -> Result { /// The names in the struct are prefixed with `flashblocks` to avoid conflicts /// with the standard block building configuration since these args are flattened /// into the main `OpRbuilderArgs` struct with the other rollup/node args. -#[derive(Debug, Clone, Default, PartialEq, Eq, clap::Args)] +#[derive(Debug, Clone, PartialEq, Eq, clap::Args)] pub struct FlashblocksArgs { /// When set to true, the builder will build flashblocks /// and will build standard blocks at the chain block time. @@ -101,3 +113,13 @@ pub struct FlashblocksArgs { )] pub flashblocks_block_time: u64, } + +impl Default for FlashblocksArgs { + fn default() -> Self { + let args = crate::args::Cli::parse_from(["dummy", "node"]); + let Commands::Node(node_command) = args.command else { + unreachable!() + }; + node_command.ext.flashblocks + } +} diff --git a/crates/op-rbuilder/src/builders/context.rs b/crates/op-rbuilder/src/builders/context.rs index 6cf26cab1..d6eb5466a 100644 --- a/crates/op-rbuilder/src/builders/context.rs +++ b/crates/op-rbuilder/src/builders/context.rs @@ -32,7 +32,7 @@ use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction}; use revm::{context::result::ResultAndState, Database, DatabaseCommit}; use std::{sync::Arc, time::Instant}; use tokio_util::sync::CancellationToken; -use tracing::{info, trace, warn}; +use tracing::{debug, info, trace, warn}; use crate::{ metrics::OpRBuilderMetrics, @@ -350,7 +350,7 @@ impl OpPayloadBuilderCtx { let tx_hash = tx.tx_hash(); let log_txn = |result: TxnExecutionResult| { - info!(target: "payload_builder", tx_hash = ?tx_hash, tx_da_size = ?tx_da_size, exclude_reverting_txs = ?exclude_reverting_txs, result = %result, "Considering transaction"); + debug!(target: "payload_builder", tx_hash = ?tx_hash, tx_da_size = ?tx_da_size, exclude_reverting_txs = ?exclude_reverting_txs, result = %result, "Considering transaction"); }; // TODO: remove this condition and feature once we are comfortable enabling interop for everything diff --git a/crates/op-rbuilder/src/lib.rs b/crates/op-rbuilder/src/lib.rs index f377643f9..b574d1a2c 100644 --- a/crates/op-rbuilder/src/lib.rs +++ b/crates/op-rbuilder/src/lib.rs @@ -1,4 +1,10 @@ +pub mod args; +pub mod builders; +pub mod metrics; pub mod primitives; +pub mod revert_protection; +pub mod traits; +pub mod tx; pub mod tx_signer; #[cfg(any(test, feature = "testing"))] diff --git a/crates/op-rbuilder/src/tests/framework/apis.rs b/crates/op-rbuilder/src/tests/framework/apis.rs index 19b63e876..d93572070 100644 --- a/crates/op-rbuilder/src/tests/framework/apis.rs +++ b/crates/op-rbuilder/src/tests/framework/apis.rs @@ -1,8 +1,9 @@ use super::DEFAULT_JWT_TOKEN; use alloy_eips::{eip7685::Requests, BlockNumberOrTag}; use alloy_primitives::B256; + use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus}; -use http::Uri; +use core::{future::Future, marker::PhantomData}; use jsonrpsee::{ core::{client::SubscriptionClientT, RpcResult}, proc_macros::rpc, @@ -15,84 +16,143 @@ use reth_optimism_rpc::engine::OpEngineApiClient; use reth_payload_builder::PayloadId; use reth_rpc_layer::{AuthClientLayer, JwtSecret}; use serde_json::Value; -use std::str::FromStr; +use tracing::debug; -/// Helper for engine api operations -pub struct EngineApi { - pub url: Uri, - pub jwt_secret: JwtSecret, +#[derive(Clone, Debug)] +pub enum Address { + Ipc(String), + Http(url::Url), +} + +pub trait Protocol { + fn client( + jwt: JwtSecret, + address: Address, + ) -> impl Future; +} + +pub struct Http; +impl Protocol for Http { + async fn client( + jwt: JwtSecret, + address: Address, + ) -> impl SubscriptionClientT + Send + Sync + Unpin + 'static { + let Address::Http(url) = address else { + unreachable!(); + }; + + let secret_layer = AuthClientLayer::new(jwt); + let middleware = tower::ServiceBuilder::default().layer(secret_layer); + jsonrpsee::http_client::HttpClientBuilder::default() + .set_http_middleware(middleware) + .build(url) + .expect("Failed to create http client") + } +} + +pub struct Ipc; +impl Protocol for Ipc { + async fn client( + _: JwtSecret, // ipc does not use JWT + address: Address, + ) -> impl SubscriptionClientT + Send + Sync + Unpin + 'static { + let Address::Ipc(path) = address else { + unreachable!(); + }; + reth_ipc::client::IpcClientBuilder::default() + .build(&path) + .await + .expect("Failed to create ipc client") + } } -/// Builder for EngineApi configuration -pub struct EngineApiBuilder { - url: String, - jwt_secret: String, +/// Helper for engine api operations +pub struct EngineApi { + address: Address, + jwt_secret: JwtSecret, + _tag: PhantomData

, } -impl Default for EngineApiBuilder { - fn default() -> Self { - Self::new() +impl EngineApi

{ + async fn client(&self) -> impl SubscriptionClientT + Send + Sync + Unpin + 'static { + P::client(self.jwt_secret, self.address.clone()).await } } -impl EngineApiBuilder { - pub fn new() -> Self { - Self { - url: String::from("http://localhost:8551"), - jwt_secret: String::from(DEFAULT_JWT_TOKEN), +// http specific +impl EngineApi { + pub fn with_http(url: &str) -> EngineApi { + EngineApi:: { + address: Address::Http(url.parse().expect("Invalid URL")), + jwt_secret: DEFAULT_JWT_TOKEN.parse().expect("Invalid JWT"), + _tag: PhantomData, } } - pub fn with_url(mut self, url: &str) -> Self { - self.url = url.to_string(); - self + pub fn with_localhost_port(port: u16) -> EngineApi { + EngineApi:: { + address: Address::Http( + format!("http://localhost:{}", port) + .parse() + .expect("Invalid URL"), + ), + jwt_secret: DEFAULT_JWT_TOKEN.parse().expect("Invalid JWT"), + _tag: PhantomData, + } } - pub fn build(self) -> Result> { - Ok(EngineApi { - url: self.url.parse()?, - jwt_secret: JwtSecret::from_str(&self.jwt_secret)?, - }) + pub fn with_port(mut self, port: u16) -> Self { + let Address::Http(url) = &mut self.address else { + unreachable!(); + }; + + url.set_port(Some(port)).expect("Invalid port"); + self } -} -impl EngineApi { - pub fn builder() -> EngineApiBuilder { - EngineApiBuilder::new() + pub fn with_jwt_secret(mut self, jwt_secret: &str) -> Self { + self.jwt_secret = jwt_secret.parse().expect("Invalid JWT"); + self } - pub fn new(url: &str) -> Result> { - Self::builder().with_url(url).build() + pub fn url(&self) -> &url::Url { + let Address::Http(url) = &self.address else { + unreachable!(); + }; + url } +} - pub fn new_with_port(port: u16) -> Result> { - Self::builder() - .with_url(&format!("http://localhost:{port}")) - .build() +// ipc specific +impl EngineApi { + pub fn with_ipc(path: &str) -> EngineApi { + EngineApi:: { + address: Address::Ipc(path.into()), + jwt_secret: DEFAULT_JWT_TOKEN.parse().expect("Invalid JWT"), + _tag: PhantomData, + } } - pub fn http_client(&self) -> impl SubscriptionClientT + Clone + Send + Sync + Unpin + 'static { - // Create a middleware that adds a new JWT token to every request. - let secret_layer = AuthClientLayer::new(self.jwt_secret); - let middleware = tower::ServiceBuilder::default().layer(secret_layer); - jsonrpsee::http_client::HttpClientBuilder::default() - .set_http_middleware(middleware) - .build(&self.url.to_string()) - .expect("Failed to create http client") + pub fn path(&self) -> &str { + let Address::Ipc(path) = &self.address else { + unreachable!(); + }; + path } +} +impl EngineApi

{ pub async fn get_payload( &self, payload_id: PayloadId, ) -> eyre::Result<::ExecutionPayloadEnvelopeV4> { - println!( + debug!( "Fetching payload with id: {} at {}", payload_id, chrono::Utc::now() ); - Ok( - OpEngineApiClient::::get_payload_v4(&self.http_client(), payload_id) + OpEngineApiClient::::get_payload_v4(&self.client().await, payload_id) .await?, ) } @@ -104,10 +164,9 @@ impl EngineApi { parent_beacon_block_root: B256, execution_requests: Requests, ) -> eyre::Result { - println!("Submitting new payload at {}...", chrono::Utc::now()); - + debug!("Submitting new payload at {}...", chrono::Utc::now()); Ok(OpEngineApiClient::::new_payload_v4( - &self.http_client(), + &self.client().await, payload, versioned_hashes, parent_beacon_block_root, @@ -122,10 +181,9 @@ impl EngineApi { new_head: B256, payload_attributes: Option<::PayloadAttributes>, ) -> eyre::Result { - println!("Updating forkchoice at {}...", chrono::Utc::now()); - + debug!("Updating forkchoice at {}...", chrono::Utc::now()); Ok(OpEngineApiClient::::fork_choice_updated_v3( - &self.http_client(), + &self.client().await, ForkchoiceState { head_block_hash: new_head, safe_block_hash: current_head, @@ -135,19 +193,6 @@ impl EngineApi { ) .await?) } - - pub async fn latest(&self) -> eyre::Result> { - self.get_block_by_number(BlockNumberOrTag::Latest, false) - .await - } - - pub async fn get_block_by_number( - &self, - number: BlockNumberOrTag, - include_txs: bool, - ) -> eyre::Result> { - Ok(BlockApiClient::get_block_by_number(&self.http_client(), number, include_txs).await?) - } } #[rpc(server, client, namespace = "eth")] @@ -177,9 +222,9 @@ pub async fn generate_genesis(output: Option) -> eyre::Result<()> { // Write the result to the output file if let Some(output) = output { std::fs::write(&output, serde_json::to_string_pretty(&genesis)?)?; - println!("Generated genesis file at: {output}"); + debug!("Generated genesis file at: {output}"); } else { - println!("{}", serde_json::to_string_pretty(&genesis)?); + debug!("{}", serde_json::to_string_pretty(&genesis)?); } Ok(()) diff --git a/crates/op-rbuilder/src/tests/framework/blocks.rs b/crates/op-rbuilder/src/tests/framework/blocks.rs deleted file mode 100644 index f6094679f..000000000 --- a/crates/op-rbuilder/src/tests/framework/blocks.rs +++ /dev/null @@ -1,425 +0,0 @@ -use std::{ - net::{IpAddr, SocketAddr}, - str::FromStr, -}; - -use crate::tx_signer::Signer; -use alloy_eips::{eip2718::Encodable2718, eip7685::Requests, BlockNumberOrTag}; -use alloy_primitives::{address, hex, Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types_engine::{ - ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadAttributes, - PayloadStatusEnum, -}; -use alloy_rpc_types_eth::Block; -use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; -use op_alloy_rpc_types_engine::{OpExecutionPayloadV4, OpPayloadAttributes}; -use rollup_boost::{ - Flashblocks, FlashblocksService, OpExecutionPayloadEnvelope, PayloadSource, PayloadVersion, - RpcClient, -}; -use url::Url; - -use super::apis::EngineApi; - -// L1 block info for OP mainnet block 124665056 (stored in input of tx at index 0) -// -// https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1 -const FJORD_DATA: &[u8] = &hex!("440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); - -/// A system that continuously generates blocks using the engine API -pub struct BlockGenerator { - engine_api: EngineApi, - validation_api: Option, - latest_hash: B256, - no_tx_pool: bool, - block_time_secs: u64, - timestamp: u64, - // flashblocks service - flashblocks_endpoint: Option, - flashblocks_service: Option, -} - -impl BlockGenerator { - pub fn new( - engine_api: EngineApi, - validation_api: Option, - no_tx_pool: bool, - block_time_secs: u64, - flashblocks_endpoint: Option, - ) -> Self { - Self { - engine_api, - validation_api, - latest_hash: B256::ZERO, // temporary value - no_tx_pool, - timestamp: 0, - block_time_secs, - flashblocks_endpoint, - flashblocks_service: None, - } - } - - /// Initialize the block generator by fetching the latest block - pub async fn init(&mut self) -> eyre::Result { - let latest_block = self.engine_api.latest().await?.expect("block not found"); - self.latest_hash = latest_block.header.hash; - self.timestamp = latest_block.header.timestamp; - - // Sync validation node if it exists - if let Some(validation_api) = &self.validation_api { - self.sync_validation_node(validation_api).await?; - } - - // Initialize flashblocks service - if let Some(flashblocks_endpoint) = &self.flashblocks_endpoint { - println!("Initializing flashblocks service at {flashblocks_endpoint}"); - let builder_client = RpcClient::new( - self.engine_api.url.clone(), - self.engine_api.jwt_secret.clone(), - 10, - PayloadSource::Builder, - )?; - - self.flashblocks_service = Some(Flashblocks::run( - builder_client, - Url::from_str(flashblocks_endpoint)?, - SocketAddr::new(IpAddr::from_str("127.0.0.1")?, 1112), // output address for the preconfirmations from rb - )?); - } - - Ok(latest_block) - } - - /// Sync the validation node to the current state - async fn sync_validation_node(&self, validation_api: &EngineApi) -> eyre::Result<()> { - let latest_validation_block = validation_api.latest().await?.expect("block not found"); - let latest_block = self.engine_api.latest().await?.expect("block not found"); - - if latest_validation_block.header.number > latest_block.header.number { - return Err(eyre::eyre!("validation node is ahead of the builder")); - } - - if latest_validation_block.header.number < latest_block.header.number { - println!( - "validation node {} is behind the builder {}, syncing up", - latest_validation_block.header.number, latest_block.header.number - ); - - let mut latest_hash = latest_validation_block.header.hash; - - for i in (latest_validation_block.header.number + 1)..=latest_block.header.number { - println!("syncing block {i}"); - - let block = self - .engine_api - .get_block_by_number(BlockNumberOrTag::Number(i), true) - .await? - .expect("block not found"); - - if block.header.parent_hash != latest_hash { - return Err(eyre::eyre!("unexpected parent hash during sync")); - } - - let payload_request = OpExecutionPayloadV4 { - payload_inner: ExecutionPayloadV3 { - payload_inner: ExecutionPayloadV2 { - payload_inner: ExecutionPayloadV1 { - parent_hash: block.header.parent_hash, - fee_recipient: block.header.beneficiary, - state_root: block.header.state_root, - receipts_root: block.header.receipts_root, - logs_bloom: block.header.logs_bloom, - prev_randao: B256::ZERO, - block_number: block.header.number, - gas_limit: block.header.gas_limit, - gas_used: block.header.gas_used, - timestamp: block.header.timestamp, - extra_data: block.header.extra_data.clone(), - base_fee_per_gas: U256::from( - block.header.base_fee_per_gas.unwrap(), - ), - block_hash: block.header.hash, - transactions: vec![], // there are no txns yet - }, - withdrawals: block.withdrawals.unwrap().to_vec(), - }, - blob_gas_used: block.header.inner.blob_gas_used.unwrap(), - excess_blob_gas: block.header.inner.excess_blob_gas.unwrap(), - }, - withdrawals_root: Default::default(), - }; - - let validation_status = validation_api - .new_payload(payload_request, vec![], B256::ZERO, Requests::default()) - .await?; - - if validation_status.status != PayloadStatusEnum::Valid { - return Err(eyre::eyre!("invalid payload status during sync")); - } - - let new_chain_hash = validation_status - .latest_valid_hash - .ok_or_else(|| eyre::eyre!("missing latest valid hash"))?; - - if new_chain_hash != block.header.hash { - return Err(eyre::eyre!("hash mismatch during sync")); - } - - validation_api - .update_forkchoice(latest_hash, new_chain_hash, None) - .await?; - - latest_hash = new_chain_hash; - } - } - - Ok(()) - } - - /// Helper function to submit a payload and update chain state - pub async fn submit_payload( - &mut self, - transactions: Option>, - block_building_delay_secs: u64, - no_sleep: bool, // TODO: Change this, too many parameters we can tweak here to put as a function arguments - ) -> eyre::Result { - let timestamp = self.timestamp + self.block_time_secs; - - // Add L1 block info as the first transaction in every L2 block - // This deposit transaction contains L1 block metadata required by the L2 chain - // Currently using hardcoded data from L1 block 124665056 - // If this info is not provided, Reth cannot decode the receipt for any transaction - // in the block since it also includes this info as part of the result. - // It does not matter if the to address (4200000000000000000000000000000000000015) is - // not deployed on the L2 chain since Reth queries the block to get the info and not the contract. - let block_info_tx: Bytes = { - let deposit_tx = TxDeposit { - source_hash: B256::default(), - from: address!("DeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001"), - to: TxKind::Call(address!("4200000000000000000000000000000000000015")), - mint: 0, - value: U256::default(), - gas_limit: 210000, - is_system_transaction: false, - input: FJORD_DATA.into(), - }; - - // Create a temporary signer for the deposit - let signer = Signer::random(); - let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit_tx))?; - signed_tx.encoded_2718().into() - }; - - let transactions = if let Some(transactions) = transactions { - // prepend the block info transaction - let mut all_transactions = vec![block_info_tx]; - all_transactions.extend(transactions.into_iter()); - all_transactions - } else { - vec![block_info_tx] - }; - - let result = self - .engine_api - .update_forkchoice( - self.latest_hash, - self.latest_hash, - Some(OpPayloadAttributes { - payload_attributes: PayloadAttributes { - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Default::default(), - }, - transactions: Some(transactions), - no_tx_pool: Some(self.no_tx_pool), - gas_limit: Some(10000000), - eip_1559_params: None, - }), - ) - .await?; - - if result.payload_status.status != PayloadStatusEnum::Valid { - return Err(eyre::eyre!("Invalid payload status")); - } - - let payload_id = result.payload_id.unwrap(); - - // update the payload id in the flashblocks service if present - if let Some(flashblocks_service) = &self.flashblocks_service { - flashblocks_service.set_current_payload_id(payload_id).await; - } - - if !self.no_tx_pool && !no_sleep { - let sleep_time = self.block_time_secs + block_building_delay_secs; - tokio::time::sleep(tokio::time::Duration::from_secs(sleep_time)).await; - } - - let payload = if let Some(flashblocks_service) = &self.flashblocks_service { - flashblocks_service - .get_best_payload(PayloadVersion::V4) - .await? - .unwrap() - } else { - OpExecutionPayloadEnvelope::V4(self.engine_api.get_payload(payload_id).await?) - }; - - let execution_payload = match payload { - OpExecutionPayloadEnvelope::V4(execution_payload) => { - execution_payload.execution_payload - } - _ => { - return Err(eyre::eyre!("execution_payload should be V4")); - } - }; - - // Validate with builder node - let validation_status = self - .engine_api - .new_payload( - execution_payload.clone(), - vec![], - B256::ZERO, - Requests::default(), - ) - .await?; - - if validation_status.status != PayloadStatusEnum::Valid { - return Err(eyre::eyre!("Invalid validation status from builder")); - } - - // Validate with validation node if present - if let Some(validation_api) = &self.validation_api { - let validation_status = validation_api - .new_payload( - execution_payload.clone(), - vec![], - B256::ZERO, - Requests::default(), - ) - .await?; - - if validation_status.status != PayloadStatusEnum::Valid { - return Err(eyre::eyre!("Invalid validation status from validator")); - } - } - - let new_block_hash = execution_payload - .payload_inner - .payload_inner - .payload_inner - .block_hash; - - // Update forkchoice on builder - self.engine_api - .update_forkchoice(self.latest_hash, new_block_hash, None) - .await?; - - // Update forkchoice on validator if present - if let Some(validation_api) = &self.validation_api { - validation_api - .update_forkchoice(self.latest_hash, new_block_hash, None) - .await?; - } - - // Update internal state - self.latest_hash = new_block_hash; - self.timestamp = execution_payload.payload_inner.timestamp(); - - let block = self - .engine_api - .get_block_by_number(BlockNumberOrTag::Latest, false) - .await? - .expect("block not found"); - - assert_eq!(block.header.hash, new_block_hash); - - let generated_block = BlockGenerated { block }; - Ok(generated_block) - } - - /// Generate a single new block and return its hash - pub async fn generate_block(&mut self) -> eyre::Result { - self.submit_payload(None, 0, false).await - } - - pub async fn generate_block_with_delay(&mut self, delay: u64) -> eyre::Result { - self.submit_payload(None, delay, false).await - } - - /// Submit a deposit transaction to seed an account with ETH - pub async fn deposit(&mut self, address: Address, value: u128) -> eyre::Result { - // Create deposit transaction - let deposit_tx = TxDeposit { - source_hash: B256::default(), - from: address, // Set the sender to the address of the account to seed - to: TxKind::Create, - mint: value, // Amount to deposit - value: U256::default(), - gas_limit: 210000, - is_system_transaction: false, - input: Bytes::default(), - }; - - // Create a temporary signer for the deposit - let signer = Signer::random(); - let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit_tx))?; - let signed_tx_rlp = signed_tx.encoded_2718(); - - self.submit_payload(Some(vec![signed_tx_rlp.into()]), 0, false) - .await - } - - pub async fn create_funded_accounts( - &mut self, - count: usize, - amount: u128, - ) -> eyre::Result> { - let mut signers = Vec::with_capacity(count); - - for _ in 0..count { - // Create a new signer - let signer = Signer::random(); - let address = signer.address; - - // Deposit funds to the new account - self.deposit(address, amount).await?; - - signers.push(signer); - } - - Ok(signers) - } -} - -#[derive(Debug)] -pub struct BlockGenerated { - pub block: Block, -} - -impl BlockGenerated { - pub fn block_hash(&self) -> B256 { - self.block.header.hash - } - - pub fn not_includes(&self, tx_hash: B256) -> bool { - !self.includes(tx_hash) - } - - pub fn includes(&self, tx_hash: B256) -> bool { - self.block.transactions.hashes().any(|hash| hash == tx_hash) - } - - pub fn includes_vec(&self, tx_hashes: Vec) -> bool { - tx_hashes.iter().all(|hash| self.includes(*hash)) - } - - pub fn not_includes_vec(&self, tx_hashes: Vec) -> bool { - tx_hashes.iter().all(|hash| self.not_includes(*hash)) - } - - pub fn num_transactions(&self) -> usize { - self.block.transactions.len() - } -} diff --git a/crates/op-rbuilder/src/tests/framework/driver.rs b/crates/op-rbuilder/src/tests/framework/driver.rs new file mode 100644 index 000000000..a5e2591a3 --- /dev/null +++ b/crates/op-rbuilder/src/tests/framework/driver.rs @@ -0,0 +1,265 @@ +use core::time::Duration; +use std::time::SystemTime; + +use alloy_eips::{eip7685::Requests, BlockNumberOrTag, Encodable2718}; +use alloy_primitives::{address, hex, Bytes, TxKind, B256, U256}; +use alloy_provider::{Provider, RootProvider}; +use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadAttributes, PayloadStatusEnum}; +use alloy_rpc_types_eth::Block; +use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; +use op_alloy_network::Optimism; +use op_alloy_rpc_types::Transaction; +use reth_optimism_node::OpPayloadAttributes; +use rollup_boost::OpExecutionPayloadEnvelope; + +use super::{EngineApi, Ipc, LocalInstance, TransactionBuilder}; +use crate::{args::OpRbuilderArgs, tests::ExternalNode, tx_signer::Signer}; + +const DEFAULT_GAS_LIMIT: u64 = 10_000_000; + +/// The ChainDriver is a type that allows driving the op builder node to build new blocks manually +/// by calling the `build_new_block` method. It uses the Engine API to interact with the node +/// and the provider to fetch blocks and transactions. +pub struct ChainDriver { + engine_api: EngineApi, + provider: RootProvider, + signer: Option, + gas_limit: Option, + args: OpRbuilderArgs, + validation_nodes: Vec, +} + +// instantiation and configuration +impl ChainDriver { + const MIN_BLOCK_TIME: Duration = Duration::from_secs(1); + + pub async fn new(instance: &LocalInstance) -> eyre::Result { + Ok(Self { + engine_api: instance.engine_api(), + provider: instance.provider().await?, + signer: Default::default(), + gas_limit: None, + args: instance.args().clone(), + validation_nodes: vec![], + }) + } + + /// Specifies the block builder signing key used to sign builder transactions. + /// If not specified, a random signer will be used. + pub fn with_signer(mut self, signer: Signer) -> Self { + self.signer = Some(signer); + self + } + + /// Specifies a custom gas limit for blocks being built, otherwise the limit is + /// set to a default value of 10_000_000. + pub fn with_gas_limit(mut self, gas_limit: u64) -> Self { + self.gas_limit = Some(gas_limit); + self + } + + /// Adds an external Optimism execution client node that will receive all newly built + /// blocks by this driver and ensure that they are valid. This validation process is + /// transparent and happens in the background when building new blocks. + /// + /// If there are validation nodes specified any newly built block will be submitted to + /// the validation EL and the driver will fail if the block is rejected by the + /// validation node. + pub async fn with_validation_node(mut self, node: ExternalNode) -> eyre::Result { + node.catch_up_with(self.provider()).await?; + self.validation_nodes.push(node); + Ok(self) + } +} + +// public test api +impl ChainDriver { + /// Builds a new block using the current state of the chain and the transactions in the pool. + pub async fn build_new_block(&self) -> eyre::Result> { + self.build_new_block_with_txs(vec![]).await + } + + /// Builds a new block using the current state of the chain and the transactions in the pool with a list + /// of mandatory builder transactions. Those are usually deposit transactions. + pub async fn build_new_block_with_txs( + &self, + txs: Vec, + ) -> eyre::Result> { + let latest = self.latest().await?; + let latest_timestamp = Duration::from_secs(latest.header.timestamp); + let actual_timestamp = SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map_err(|_| eyre::eyre!("Failed to get current system time"))?; + + // block timestamp will be the max of the actual timestamp and the latest block + // timestamp plus the minimum block time. This ensures that blocks don't break any + // assumptions, but also gives the test author the ability to control the block time + // in the test. + let block_timestamp = actual_timestamp.max(latest_timestamp + Self::MIN_BLOCK_TIME); + + // Add L1 block info as the first transaction in every L2 block + // This deposit transaction contains L1 block metadata required by the L2 chain + // Currently using hardcoded data from L1 block 124665056 + // If this info is not provided, Reth cannot decode the receipt for any transaction + // in the block since it also includes this info as part of the result. + // It does not matter if the to address (4200000000000000000000000000000000000015) is + // not deployed on the L2 chain since Reth queries the block to get the info and not the contract. + let block_info_tx: Bytes = { + let deposit_tx = TxDeposit { + source_hash: B256::default(), + from: address!("DeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001"), + to: TxKind::Call(address!("4200000000000000000000000000000000000015")), + mint: 0, + value: U256::default(), + gas_limit: 210000, + is_system_transaction: false, + input: FJORD_DATA.into(), + }; + + // Create a temporary signer for the deposit + let signer = self.signer.unwrap_or_else(Signer::random); + let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit_tx))?; + signed_tx.encoded_2718().into() + }; + + let fcu_result = self + .fcu(OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: block_timestamp.as_secs(), + parent_beacon_block_root: Some(B256::ZERO), + withdrawals: Some(vec![]), + ..Default::default() + }, + transactions: Some(vec![block_info_tx].into_iter().chain(txs).collect()), + gas_limit: Some(self.gas_limit.unwrap_or(DEFAULT_GAS_LIMIT)), + ..Default::default() + }) + .await?; + + if fcu_result.payload_status.is_invalid() { + return Err(eyre::eyre!("Forkchoice update failed: {fcu_result:?}")); + } + + let payload_id = fcu_result + .payload_id + .ok_or_else(|| eyre::eyre!("Forkchoice update did not return a payload ID"))?; + + // wait for the block to be built for the specified chain block time + tokio::time::sleep(Duration::from_millis(self.args.chain_block_time)).await; + + let payload = + OpExecutionPayloadEnvelope::V4(self.engine_api.get_payload(payload_id).await?); + let OpExecutionPayloadEnvelope::V4(payload) = payload else { + return Err(eyre::eyre!("Expected V4 payload, got something else")); + }; + let payload = payload.execution_payload; + + if self + .engine_api + .new_payload(payload.clone(), vec![], B256::ZERO, Requests::default()) + .await? + .status + != PayloadStatusEnum::Valid + { + return Err(eyre::eyre!("Invalid validation status from builder")); + } + + let new_block_hash = payload.payload_inner.payload_inner.payload_inner.block_hash; + self.engine_api + .update_forkchoice(latest.header.hash, new_block_hash, None) + .await?; + + let block = self + .provider + .get_block_by_number(alloy_eips::BlockNumberOrTag::Latest) + .full() + .await? + .ok_or_else(|| eyre::eyre!("Failed to get latest block after building new block"))?; + + assert_eq!( + block.header.hash, new_block_hash, + "New block hash does not match expected hash" + ); + + for validation_node in &self.validation_nodes { + // optionally for each external validation node, ensure + // that they consider the block valid before returning it + // to the test author. + validation_node.post_block(&payload).await?; + } + + Ok(block) + } + + /// Retreives the latest built block and returns only a list of transaction + /// hashes from its body. + pub async fn latest(&self) -> eyre::Result> { + self.provider + .get_block_by_number(alloy_eips::BlockNumberOrTag::Latest) + .await? + .ok_or_else(|| eyre::eyre!("Failed to get latest block")) + } + + /// Retreives the latest built block and returns a list of full transaction + /// contents in its body. + pub async fn latest_full(&self) -> eyre::Result> { + self.provider + .get_block_by_number(alloy_eips::BlockNumberOrTag::Latest) + .full() + .await? + .ok_or_else(|| eyre::eyre!("Failed to get latest full block")) + } + + /// retreives a specific block by its number or tag and returns a list of transaction + /// hashes from its body. + pub async fn get_block( + &self, + number: BlockNumberOrTag, + ) -> eyre::Result>> { + Ok(self.provider.get_block_by_number(number).await?) + } + + /// retreives a specific block by its number or tag and returns a list of full transaction + /// contents in its body. + pub async fn get_block_full( + &self, + number: BlockNumberOrTag, + ) -> eyre::Result>> { + Ok(self.provider.get_block_by_number(number).full().await?) + } + + /// Returns a transaction builder that can be used to create and send transactions. + pub fn create_transaction(&self) -> TransactionBuilder { + TransactionBuilder::new(self.provider.clone()) + } + + /// Returns a reference to the underlying alloy provider that is used to + /// interact with the chain. + pub const fn provider(&self) -> &RootProvider { + &self.provider + } +} + +// internal methods +impl ChainDriver { + async fn fcu(&self, attribs: OpPayloadAttributes) -> eyre::Result { + let latest = self.latest().await?.header.hash; + let response = self + .engine_api + .update_forkchoice(latest, latest, Some(attribs)) + .await?; + + Ok(response) + } +} + +// L1 block info for OP mainnet block 124665056 (stored in input of tx at index 0) +// +// https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1 +const FJORD_DATA: &[u8] = &hex!( + "440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a + 3000000000000000000000000000000000000000000000000000000003ef12787000000 + 00000000000000000000000000000000000000000000000000000000012fdf87b89884a + 61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a5900000000000000000000 + 00006887246668a3b87f54deb3b94ba47a6f63f32985" +); diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs new file mode 100644 index 000000000..f7fe7ad13 --- /dev/null +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -0,0 +1,475 @@ +use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::Encodable2718; +use alloy_eips::{eip7685::Requests, BlockNumberOrTag}; +use alloy_primitives::private::alloy_rlp::Encodable; +use alloy_primitives::{keccak256, B256, U256}; +use alloy_provider::{Identity, Provider, ProviderBuilder, RootProvider}; +use alloy_rpc_types_engine::{ + ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadStatusEnum, +}; +use bollard::{ + exec::{CreateExecOptions, StartExecResults}, + query_parameters::{ + AttachContainerOptions, CreateContainerOptions, RemoveContainerOptions, + StartContainerOptions, StopContainerOptions, + }, + secret::{ContainerCreateBody, HostConfig}, + Docker, +}; +use futures::StreamExt; +use op_alloy_network::Optimism; +use op_alloy_rpc_types_engine::OpExecutionPayloadV4; +use std::path::PathBuf; +use tracing::debug; + +use crate::tests::{EngineApi, Ipc}; + +const AUTH_CONTAINER_IPC_PATH: &str = "/home/op-reth-shared/auth.ipc"; +const RPC_CONTAINER_IPC_PATH: &str = "/home/op-reth-shared/rpc.ipc"; + +/// This type represents an Optimism execution client node that is running inside a +/// docker container. This node is used to validate the correctness of the blocks built +/// by op-rbuilder. +/// +/// When this node is attached to a `ChainDriver`, it will automatically catch up with the +/// provided chain and will transparently ingest all newly built blocks by the driver. +/// +/// If the built payload fails to validate, then the driver block production function will +/// return an error during `ChainDriver::build_new_block`. +pub struct ExternalNode { + engine_api: EngineApi, + provider: RootProvider, + docker: Docker, + tempdir: PathBuf, + container_id: String, +} + +impl ExternalNode { + /// Creates a new instance of `ExternalNode` that runs the `op-reth` client in a Docker container + /// using the specified version tag. + pub async fn reth_version(version_tag: &str) -> eyre::Result { + let docker = Docker::connect_with_local_defaults()?; + + let tempdir = std::env::temp_dir().join(format!("reth-shared-{}", nanoid::nanoid!())); + let auth_ipc = tempdir.join("auth.ipc").to_string_lossy().to_string(); + let rpc_ipc = tempdir.join("rpc.ipc").to_string_lossy().to_string(); + + std::fs::create_dir_all(&tempdir) + .map_err(|_| eyre::eyre!("Failed to create temporary directory"))?; + + std::fs::write( + tempdir.join("genesis.json"), + include_str!("./artifacts/genesis.json.tmpl"), + ) + .map_err(|_| eyre::eyre!("Failed to write genesis file"))?; + + let host_config = HostConfig { + binds: Some(vec![format!( + "{}:/home/op-reth-shared:rw", + tempdir.display() + )]), + ..Default::default() + }; + + // Don't expose any ports, as we will only use IPC for communication. + let container_config = ContainerCreateBody { + image: Some(format!("ghcr.io/paradigmxyz/op-reth:{version_tag}")), + entrypoint: Some(vec!["op-reth".to_string()]), + cmd: Some( + vec![ + "node", + "--chain=/home/op-reth-shared/genesis.json", + "--auth-ipc", + &format!("--auth-ipc.path={AUTH_CONTAINER_IPC_PATH}"), + &format!("--ipcpath={RPC_CONTAINER_IPC_PATH}"), + "--disable-discovery", + "--no-persist-peers", + "--max-outbound-peers=0", + "--max-inbound-peers=0", + "--trusted-only", + ] + .into_iter() + .map(String::from) + .collect(), + ), + host_config: Some(host_config), + ..Default::default() + }; + + // Create the container + let container = docker + .create_container(Some(CreateContainerOptions::default()), container_config) + .await?; + + docker + .start_container(&container.id, None::) + .await?; + + // Wait for the container to be ready and IPCs to be created + await_ipc_readiness(&docker, &container.id).await?; + + // IPC files created by the container have restrictive permissions, + // so we need to relax them to allow the host to access them. + relax_permissions(&docker, &container.id, AUTH_CONTAINER_IPC_PATH).await?; + relax_permissions(&docker, &container.id, RPC_CONTAINER_IPC_PATH).await?; + + // Connect to the IPCs + let engine_api = EngineApi::with_ipc(&auth_ipc); + let provider = ProviderBuilder::::default() + .connect_ipc(rpc_ipc.into()) + .await?; + + Ok(Self { + engine_api, + provider, + docker, + tempdir, + container_id: container.id, + }) + } + + /// Creates a new instance of `ExternalNode` that runs the `op-reth` client in a Docker container + /// using the latest version. + pub async fn reth() -> eyre::Result { + Self::reth_version("latest").await + } +} + +impl ExternalNode { + /// Access to the RPC API of the validation node. + pub fn provider(&self) -> &RootProvider { + &self.provider + } + + /// Access to the Engine API of the validation node. + pub fn engine_api(&self) -> &EngineApi { + &self.engine_api + } +} + +impl ExternalNode { + /// Catches up this node with another node. + /// + /// This method will fail if this node is ahead of the provided chain or they do not + /// share the same genesis block. + pub async fn catch_up_with(&self, chain: &RootProvider) -> eyre::Result<()> { + // check if we need to catch up + let (latest_hash, latest_number) = chain.latest_block_hash_and_number().await?; + let (our_latest_hash, our_latest_number) = + self.provider.latest_block_hash_and_number().await?; + + // check if we can sync in the first place + match (our_latest_number, latest_number) { + (we, them) if we == them && our_latest_hash == latest_hash => { + // we are already caught up and in sync with the provided chain + return Ok(()); + } + (we, them) if we == them && our_latest_hash != latest_hash => { + // divergent histories, can't sync + return Err(eyre::eyre!( + "External node is at the same height but has a different latest block hash: \ + {we} == {them}, {our_latest_hash} != {latest_hash}", + )); + } + (we, them) if we > them => { + return Err(eyre::eyre!( + "External node is ahead of the provided chain: {we} > {them}", + )); + } + (we, them) if we < them => { + debug!("external node is behind the local chain: {we} < {them}, catching up..."); + + // make sure that we share common history with the provided chain + let hash_at_height = chain.hash_at_height(we).await?; + if hash_at_height != our_latest_hash { + return Err(eyre::eyre!( + "External node does not share the same genesis block or history with \ + the provided chain: {} != {} at height {}", + hash_at_height, + our_latest_hash, + we + )); + } + } + _ => {} + }; + + // we are behind, let's catch up + let mut our_current_height = our_latest_number + 1; + + while our_current_height <= latest_number { + let payload = chain + .execution_payload_for_block(our_current_height) + .await?; + + let (latest_hash, _) = self.provider().latest_block_hash_and_number().await?; + + let status = self + .engine_api() + .new_payload(payload, vec![], B256::ZERO, Requests::default()) + .await?; + + if status.status != PayloadStatusEnum::Valid { + return Err(eyre::eyre!( + "Failed to import block at height {our_current_height} into external validation node: {:?}", + status.status + )); + } + + let new_chain_hash = status.latest_valid_hash.unwrap_or_default(); + self.engine_api() + .update_forkchoice(latest_hash, new_chain_hash, None) + .await?; + + our_current_height += 1; + } + + // sync complete, double check that we are in sync + let (final_hash, final_number) = self.provider().latest_block_hash_and_number().await?; + + if final_hash != latest_hash || final_number != latest_number { + return Err(eyre::eyre!( + "Failed to sync external validation node: {:?} != {:?}, {:?} != {:?}", + final_hash, + latest_hash, + final_number, + latest_number + )); + } + + Ok(()) + } + + /// Posts a block to the external validation node for validation and sets it as the latest block + /// in the fork choice. + pub async fn post_block(&self, payload: &OpExecutionPayloadV4) -> eyre::Result<()> { + let result = self + .engine_api + .new_payload(payload.clone(), vec![], B256::ZERO, Requests::default()) + .await?; + + let new_block_hash = payload.payload_inner.payload_inner.payload_inner.block_hash; + debug!( + "external validation node payload status for block {new_block_hash}: {:?}", + result.status + ); + + if result.status != PayloadStatusEnum::Valid { + return Err(eyre::eyre!( + "Failed to validate block {new_block_hash} with external validation node." + )); + } + + let (latest_hash, _) = self.provider.latest_block_hash_and_number().await?; + + self.engine_api + .update_forkchoice(latest_hash, new_block_hash, None) + .await?; + + Ok(()) + } +} + +impl Drop for ExternalNode { + fn drop(&mut self) { + // Clean up the temporary directory + if self.tempdir.exists() { + std::fs::remove_dir_all(&self.tempdir).expect("Failed to remove temporary directory"); + } + + // Block on cleaning up the container + let docker = self.docker.clone(); + let container_id = self.container_id.clone(); + + if tokio::runtime::Handle::try_current().is_ok() { + let _ = tokio::task::spawn_blocking(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + let _ = docker + .stop_container(&container_id, None::) + .await; + + let _ = docker + .remove_container( + &container_id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await; + }); + }); + } + } +} + +async fn relax_permissions(docker: &Docker, container: &str, path: &str) -> eyre::Result<()> { + let exec = docker + .create_exec( + container, + CreateExecOptions { + cmd: Some(vec!["chmod", "777", path]), + attach_stdout: Some(true), + attach_stderr: Some(true), + ..Default::default() + }, + ) + .await?; + + let StartExecResults::Attached { mut output, .. } = docker.start_exec(&exec.id, None).await? + else { + return Err(eyre::eyre!("Failed to start exec for relaxing permissions")); + }; + + while let Some(Ok(output)) = output.next().await { + use bollard::container::LogOutput::*; + match output { + StdErr { message } => { + return Err(eyre::eyre!( + "Failed to relax permissions for {path}: {}", + String::from_utf8_lossy(&message) + )) + } + _ => continue, + }; + } + + Ok(()) +} + +async fn await_ipc_readiness(docker: &Docker, container: &str) -> eyre::Result<()> { + let mut attach_stream = docker + .attach_container( + container, + Some(AttachContainerOptions { + stdout: true, + stderr: true, + stream: true, + logs: true, + ..Default::default() + }), + ) + .await?; + + let mut rpc_ipc_started = false; + let mut auth_ipc_started = false; + + // wait for the node to start and signal that IPCs are ready + while let Some(Ok(output)) = attach_stream.output.next().await { + use bollard::container::LogOutput; + match output { + LogOutput::StdOut { message } | LogOutput::StdErr { message } => { + let message = String::from_utf8_lossy(&message); + if message.contains(AUTH_CONTAINER_IPC_PATH) { + auth_ipc_started = true; + } + + if message.contains(RPC_CONTAINER_IPC_PATH) { + rpc_ipc_started = true; + } + + if message.to_lowercase().contains("error") { + return Err(eyre::eyre!("Failed to start op-reth container: {message}")); + } + } + LogOutput::StdIn { .. } | LogOutput::Console { .. } => {} + } + + if auth_ipc_started && rpc_ipc_started { + break; + } + } + + if !auth_ipc_started || !rpc_ipc_started { + return Err(eyre::eyre!( + "Failed to start op-reth container: IPCs not ready" + )); + } + + Ok(()) +} + +trait OptimismProviderExt { + async fn hash_at_height(&self, height: u64) -> eyre::Result; + async fn latest_block_hash_and_number(&self) -> eyre::Result<(B256, u64)>; + async fn execution_payload_for_block(&self, number: u64) -> eyre::Result; + + async fn genesis_hash(&self) -> eyre::Result { + Ok(self.hash_at_height(0).await?) + } +} + +impl OptimismProviderExt for RootProvider { + async fn hash_at_height(&self, height: u64) -> eyre::Result { + let block = self + .get_block_by_number(BlockNumberOrTag::Number(height)) + .await? + .ok_or_else(|| eyre::eyre!("No block found at height {}", height))?; + Ok(block.header.hash) + } + + async fn latest_block_hash_and_number(&self) -> eyre::Result<(B256, u64)> { + let block = self + .get_block_by_number(BlockNumberOrTag::Latest) + .await? + .ok_or_else(|| eyre::eyre!("No latest block found"))?; + Ok((block.header.hash, block.header.number)) + } + + async fn execution_payload_for_block(&self, number: u64) -> eyre::Result { + let block = self + .get_block_by_number(BlockNumberOrTag::Number(number)) + .full() + .await? + .ok_or_else(|| eyre::eyre!("No block found at height {}", number))?; + + let withdrawals = block.withdrawals.clone().unwrap_or_default(); + + // Calculate the withdrawals root properly + let withdrawals_root = if withdrawals.is_empty() { + EMPTY_WITHDRAWALS + } else { + // Calculate the Merkle Patricia Trie root of the withdrawals + let mut buf = Vec::new(); + withdrawals.encode(&mut buf); + keccak256(&buf) + }; + + let payload = OpExecutionPayloadV4 { + payload_inner: ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + parent_hash: block.header.parent_hash, + fee_recipient: block.header.beneficiary, + state_root: block.header.state_root, + receipts_root: block.header.receipts_root, + logs_bloom: block.header.logs_bloom, + prev_randao: block.header.mix_hash, + block_number: block.header.number, + gas_limit: block.header.gas_limit, + gas_used: block.header.gas_used, + timestamp: block.header.timestamp, + extra_data: block.header.extra_data.clone(), + base_fee_per_gas: U256::from( + block.header.base_fee_per_gas.unwrap_or_default(), + ), + block_hash: block.header.hash, + transactions: block + .transactions + .into_transactions_vec() + .into_iter() + .map(|tx| tx.as_ref().encoded_2718().into()) + .collect(), + }, + withdrawals: block.withdrawals.unwrap_or_default().to_vec(), + }, + blob_gas_used: block.header.inner.blob_gas_used.unwrap_or_default(), + excess_blob_gas: block.header.inner.excess_blob_gas.unwrap_or_default(), + }, + withdrawals_root, + }; + + Ok(payload) + } +} diff --git a/crates/op-rbuilder/src/tests/framework/harness.rs b/crates/op-rbuilder/src/tests/framework/harness.rs index cd5ee0ee9..b4789c527 100644 --- a/crates/op-rbuilder/src/tests/framework/harness.rs +++ b/crates/op-rbuilder/src/tests/framework/harness.rs @@ -1,6 +1,5 @@ use super::{ apis::EngineApi, - blocks::BlockGenerator, op::{OpRbuilderConfig, OpRethConfig}, service::{self, Service, ServiceInstance}, TransactionBuilder, BUILDER_PRIVATE_KEY, @@ -128,7 +127,7 @@ impl TestHarnessBuilder { let builder_log_path = builder.log_path.clone(); Ok(TestHarness { - framework: framework, + framework, builder_auth_rpc_port, builder_http_port, validator_auth_rpc_port, @@ -171,22 +170,6 @@ impl TestHarness { Ok(provider) } - pub async fn block_generator(&self) -> eyre::Result { - let engine_api = EngineApi::new_with_port(self.builder_auth_rpc_port).unwrap(); - let validation_api = Some(EngineApi::new_with_port(self.validator_auth_rpc_port).unwrap()); - - let mut generator = BlockGenerator::new( - engine_api, - validation_api, - false, - self.chain_block_time.map_or(1, |time| time / 1000), // in seconds - None, - ); - generator.init().await?; - - Ok(generator) - } - pub fn create_transaction(&self) -> TransactionBuilder { TransactionBuilder::new(self.provider().expect("provider not available")) } diff --git a/crates/op-rbuilder/src/tests/framework/instance.rs b/crates/op-rbuilder/src/tests/framework/instance.rs new file mode 100644 index 000000000..4966103a9 --- /dev/null +++ b/crates/op-rbuilder/src/tests/framework/instance.rs @@ -0,0 +1,351 @@ +use crate::revert_protection::EthApiOverrideServer; +use crate::{ + args::OpRbuilderArgs, + builders::{BuilderConfig, FlashblocksBuilder, PayloadBuilder, StandardBuilder}, + revert_protection::{EthApiExtServer, RevertProtectionExt}, + tests::{ + framework::{driver::ChainDriver, BUILDER_PRIVATE_KEY}, + ChainDriverExt, EngineApi, Ipc, TransactionPoolObserver, + }, + tx::FBPooledTransaction, + tx_signer::Signer, +}; +use alloy_provider::{Identity, ProviderBuilder, RootProvider}; +use clap::Parser; +use core::future::Future; +use core::{ + any::Any, + net::Ipv4Addr, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; +use futures::FutureExt; +use moka::future::Cache; +use nanoid::nanoid; +use op_alloy_network::Optimism; +use reth::{ + args::{DatadirArgs, NetworkArgs, RpcServerArgs}, + core::exit::NodeExitFuture, + tasks::TaskManager, +}; +use reth_node_builder::{NodeBuilder, NodeConfig}; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_cli::commands::Commands; +use reth_optimism_node::{ + node::{OpAddOnsBuilder, OpPoolBuilder}, + OpNode, +}; +use reth_transaction_pool::AllTransactionsEvents; +use reth_transaction_pool::TransactionPool; +use std::sync::{Arc, LazyLock}; +use tokio::sync::oneshot; + +/// Represents a type that emulates a local in-process instance of the OP builder node. +/// This node uses IPC as the communication channel for the RPC server Engine API. +pub struct LocalInstance { + signer: Signer, + config: NodeConfig, + args: OpRbuilderArgs, + task_manager: Option, + exit_future: NodeExitFuture, + _node_handle: Box, + pool_observer: TransactionPoolObserver, +} + +impl LocalInstance { + /// Creates a new local instance of the OP builder node with the given arguments, + /// with the default Reth node configuration. + /// + /// This method does not prefund any accounts, so before sending any transactions + /// make sure that sender accounts are funded. + pub async fn new(args: OpRbuilderArgs) -> eyre::Result { + Self::new_with_config::

(args, default_node_config()).await + } + + /// Creates a new local instance of the OP builder node with the given arguments, + /// with a given Reth node configuration. + /// + /// This method does not prefund any accounts, so before sending any transactions + /// make sure that sender accounts are funded. + pub async fn new_with_config( + args: OpRbuilderArgs, + config: NodeConfig, + ) -> eyre::Result { + let mut args = args; + let task_manager = task_manager(); + let op_node = OpNode::new(args.rollup_args.clone()); + let reverted_cache = Cache::builder().max_capacity(100).build(); + let reverted_cache_clone = reverted_cache.clone(); + + let (rpc_ready_tx, rpc_ready_rx) = oneshot::channel::<()>(); + let (txpool_ready_tx, txpool_ready_rx) = + oneshot::channel::>(); + + let signer = args.builder_signer.unwrap_or_else(|| { + Signer::try_from_secret( + BUILDER_PRIVATE_KEY + .parse() + .expect("Invalid builder private key"), + ) + .expect("Failed to create signer from private key") + }); + args.builder_signer = Some(signer); + args.rollup_args.enable_tx_conditional = true; + + let builder_config = BuilderConfig::::try_from(args.clone()) + .expect("Failed to convert rollup args to builder config"); + let da_config = builder_config.da_config.clone(); + + let node_builder = NodeBuilder::<_, OpChainSpec>::new(config.clone()) + .testing_node(task_manager.executor()) + .with_types::() + .with_components( + op_node + .components() + .pool(pool_component(&args)) + .payload(P::new_service(builder_config)?), + ) + .with_add_ons( + OpAddOnsBuilder::default() + .with_sequencer(op_node.args.sequencer.clone()) + .with_enable_tx_conditional(op_node.args.enable_tx_conditional) + .with_da_config(da_config) + .build(), + ) + .extend_rpc_modules(move |ctx| { + if args.enable_revert_protection { + tracing::info!("Revert protection enabled"); + + let pool = ctx.pool().clone(); + let provider = ctx.provider().clone(); + let revert_protection_ext: RevertProtectionExt< + _, + _, + _, + op_alloy_network::Optimism, + > = RevertProtectionExt::new(pool, provider, ctx.registry.eth_api().clone()); + + ctx.modules + .merge_configured(revert_protection_ext.bundle_api().into_rpc())?; + + ctx.modules.replace_configured( + revert_protection_ext.eth_api(reverted_cache).into_rpc(), + )?; + } + + Ok(()) + }) + .on_rpc_started(move |_, _| { + let _ = rpc_ready_tx.send(()); + Ok(()) + }) + .on_node_started(move |ctx| { + txpool_ready_tx + .send(ctx.pool.all_transactions_event_listener()) + .expect("Failed to send txpool ready signal"); + + Ok(()) + }); + + let node_handle = node_builder.launch().await?; + let exit_future = node_handle.node_exit_future; + let boxed_handle = Box::new(node_handle.node); + let node_handle: Box = boxed_handle; + + // Wait for all required components to be ready + rpc_ready_rx.await.expect("Failed to receive ready signal"); + let pool_monitor = txpool_ready_rx + .await + .expect("Failed to receive txpool ready signal"); + + Ok(Self { + args, + signer, + config, + exit_future, + _node_handle: node_handle, + task_manager: Some(task_manager), + pool_observer: TransactionPoolObserver::new(pool_monitor, reverted_cache_clone), + }) + } + + /// Creates new local instance of the OP builder node with the standard builder configuration. + /// This method prefunds the default accounts with 1 ETH each. + pub async fn standard() -> eyre::Result { + let args = crate::args::Cli::parse_from(["dummy", "node"]); + let Commands::Node(ref node_command) = args.command else { + unreachable!() + }; + let instance = Self::new::(node_command.ext.clone()).await?; + let driver = ChainDriver::new(&instance).await?; + driver.fund_default_accounts().await?; + Ok(instance) + } + + /// Creates new local instance of the OP builder node with the flashblocks builder configuration. + /// This method prefunds the default accounts with 1 ETH each. + pub async fn flashblocks() -> eyre::Result { + let mut args = crate::args::Cli::parse_from(["dummy", "node"]); + let Commands::Node(ref mut node_command) = args.command else { + unreachable!() + }; + node_command.ext.flashblocks.enabled = true; + node_command.ext.flashblocks.flashblocks_port = 0; // use random os assigned port + let instance = Self::new::(node_command.ext.clone()).await?; + let driver = ChainDriver::new(&instance).await?; + driver.fund_default_accounts().await?; + Ok(instance) + } + + pub const fn config(&self) -> &NodeConfig { + &self.config + } + + pub const fn args(&self) -> &OpRbuilderArgs { + &self.args + } + + pub const fn signer(&self) -> &Signer { + &self.signer + } + + pub fn flashblocks_ws_url(&self) -> String { + let ipaddr: Ipv4Addr = self + .args + .flashblocks + .flashblocks_addr + .parse() + .expect("Failed to parse flashblocks IP address"); + + let ipaddr = if ipaddr.is_unspecified() { + Ipv4Addr::LOCALHOST + } else { + ipaddr + }; + + let port = self.args.flashblocks.flashblocks_port; + + format!("ws://{ipaddr}:{port}/") + } + + pub fn rpc_ipc(&self) -> &str { + &self.config.rpc.ipcpath + } + + pub fn auth_ipc(&self) -> &str { + &self.config.rpc.auth_ipc_path + } + + pub fn engine_api(&self) -> EngineApi { + EngineApi::::with_ipc(self.auth_ipc()) + } + + pub const fn pool(&self) -> &TransactionPoolObserver { + &self.pool_observer + } + + pub async fn driver(&self) -> eyre::Result { + ChainDriver::new(self).await + } + + pub async fn provider(&self) -> eyre::Result> { + ProviderBuilder::::default() + .connect_ipc(self.rpc_ipc().to_string().into()) + .await + .map_err(|e| eyre::eyre!("Failed to connect to provider: {e}")) + } +} + +impl Drop for LocalInstance { + fn drop(&mut self) { + if let Some(task_manager) = self.task_manager.take() { + task_manager.graceful_shutdown_with_timeout(Duration::from_secs(3)); + std::fs::remove_dir_all(self.config().datadir().to_string()).unwrap_or_else(|e| { + panic!( + "Failed to remove temporary data directory {}: {e}", + self.config().datadir() + ) + }); + } + } +} + +impl Future for LocalInstance { + type Output = eyre::Result<()>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.get_mut().exit_future.poll_unpin(cx) + } +} + +pub fn default_node_config() -> NodeConfig { + let tempdir = std::env::temp_dir(); + let random_id = nanoid!(); + + let data_path = tempdir + .join(format!("rbuilder.{random_id}.datadir")) + .to_path_buf(); + + std::fs::create_dir_all(&data_path).expect("Failed to create temporary data directory"); + + let rpc_ipc_path = tempdir + .join(format!("rbuilder.{random_id}.rpc-ipc")) + .to_path_buf(); + + let auth_ipc_path = tempdir + .join(format!("rbuilder.{random_id}.auth-ipc")) + .to_path_buf(); + + let mut rpc = RpcServerArgs::default().with_auth_ipc(); + rpc.ws = false; + rpc.http = false; + rpc.auth_port = 0; + rpc.ipcpath = rpc_ipc_path.to_string_lossy().into(); + rpc.auth_ipc_path = auth_ipc_path.to_string_lossy().into(); + + let mut network = NetworkArgs::default().with_unused_ports(); + network.discovery.disable_discovery = true; + + let datadir = DatadirArgs { + datadir: data_path + .to_string_lossy() + .parse() + .expect("Failed to parse data dir path"), + static_files_path: None, + }; + + NodeConfig::::new(chain_spec()) + .with_datadir_args(datadir) + .with_rpc(rpc) + .with_network(network) +} + +fn chain_spec() -> Arc { + static CHAIN_SPEC: LazyLock> = LazyLock::new(|| { + let genesis = include_str!("./artifacts/genesis.json.tmpl"); + let genesis = serde_json::from_str(genesis).expect("invalid genesis JSON"); + let chain_spec = OpChainSpec::from_genesis(genesis); + Arc::new(chain_spec) + }); + + CHAIN_SPEC.clone() +} + +fn task_manager() -> TaskManager { + TaskManager::new(tokio::runtime::Handle::current()) +} + +fn pool_component(args: &OpRbuilderArgs) -> OpPoolBuilder { + let rollup_args = &args.rollup_args; + OpPoolBuilder::::default() + .with_enable_tx_conditional( + // Revert protection uses the same internal pool logic as conditional transactions + // to garbage collect transactions out of the bundle range. + rollup_args.enable_tx_conditional || args.enable_revert_protection, + ) + .with_supervisor( + rollup_args.supervisor_http.clone(), + rollup_args.supervisor_safety_level, + ) +} diff --git a/crates/op-rbuilder/src/tests/framework/mod.rs b/crates/op-rbuilder/src/tests/framework/mod.rs index d8ffff3c0..cd52b1110 100644 --- a/crates/op-rbuilder/src/tests/framework/mod.rs +++ b/crates/op-rbuilder/src/tests/framework/mod.rs @@ -1,16 +1,22 @@ mod apis; -mod blocks; +mod driver; +mod external; mod harness; +mod instance; mod op; mod service; mod txs; +mod utils; pub use apis::*; -pub use blocks::*; +pub use driver::*; +pub use external::*; pub use harness::*; +pub use instance::*; pub use op::*; pub use service::*; pub use txs::*; +pub use utils::*; const BUILDER_PRIVATE_KEY: &str = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"; @@ -22,3 +28,34 @@ pub const DEFAULT_JWT_TOKEN: &str = "688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a"; pub const ONE_ETH: u128 = 1_000_000_000_000_000_000; + +/// This gets invoked before any tests, when the cargo test framework loads the test library. +/// It injects itself into +#[ctor::ctor] +fn init_tests_logging() { + use tracing_subscriber::{filter::filter_fn, prelude::*}; + if let Ok(v) = std::env::var("TEST_TRACE") { + let level = match v.as_str() { + "false" | "off" => return, + "true" | "debug" | "on" => tracing::Level::DEBUG, + "trace" => tracing::Level::TRACE, + "info" => tracing::Level::INFO, + "warn" => tracing::Level::WARN, + "error" => tracing::Level::ERROR, + _ => return, + }; + + // let prefix_blacklist = &["alloy_transport_ipc", "storage::db::mdbx"]; + let prefix_blacklist = &["storage::db::mdbx"]; + + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer()) + .with(filter_fn(move |metadata| { + metadata.level() <= &level + && !prefix_blacklist + .iter() + .any(|prefix| metadata.target().starts_with(prefix)) + })) + .init(); + } +} diff --git a/crates/op-rbuilder/src/tests/framework/txs.rs b/crates/op-rbuilder/src/tests/framework/txs.rs index 70c8d3bd8..771c0bad8 100644 --- a/crates/op-rbuilder/src/tests/framework/txs.rs +++ b/crates/op-rbuilder/src/tests/framework/txs.rs @@ -1,15 +1,23 @@ use crate::{ primitives::bundle::{Bundle, BundleResult}, + tx::FBPooledTransaction, tx_signer::Signer, }; use alloy_consensus::TxEip1559; use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; -use alloy_primitives::{hex, Bytes}; +use alloy_primitives::{hex, Address, Bytes, TxHash, TxKind, B256, U256}; use alloy_provider::{PendingTransactionBuilder, Provider, RootProvider}; use core::cmp::max; +use dashmap::DashMap; +use futures::StreamExt; +use moka::future::Cache; use op_alloy_consensus::{OpTxEnvelope, OpTypedTransaction}; use op_alloy_network::Optimism; use reth_primitives::Recovered; +use reth_transaction_pool::{AllTransactionsEvents, FullTransactionEvent, TransactionEvent}; +use std::{collections::VecDeque, sync::Arc}; +use tokio::sync::watch; +use tracing::debug; use alloy_eips::eip1559::MIN_PROTOCOL_BASE_FEE; @@ -48,11 +56,26 @@ impl TransactionBuilder { } } + pub fn with_to(mut self, to: Address) -> Self { + self.tx.to = TxKind::Call(to); + self + } + + pub fn with_create(mut self) -> Self { + self.tx.to = TxKind::Create; + self + } + pub fn with_key(mut self, key: u64) -> Self { self.key = Some(key); self } + pub fn with_value(mut self, value: u128) -> Self { + self.tx.value = U256::from(value); + self + } + pub fn with_signer(mut self, signer: Signer) -> Self { self.signer = Some(signer); self @@ -173,3 +196,141 @@ impl TransactionBuilder { .await?) } } + +type ObservationsMap = DashMap>; + +pub struct TransactionPoolObserver { + /// Stores a mapping of all observed transactions to their history of events. + observations: Arc, + + /// Fired when this type is dropped, giving a signal to the listener loop + /// to stop listening for events. + term: Option>, +} + +impl Drop for TransactionPoolObserver { + fn drop(&mut self) { + // Signal the listener loop to stop listening for events + if let Some(term) = self.term.take() { + let _ = term.send(true); + } + } +} + +impl TransactionPoolObserver { + pub fn new( + stream: AllTransactionsEvents, + reverts: Cache, + ) -> Self { + let mut stream = stream; + let observations = Arc::new(ObservationsMap::new()); + let observations_clone = Arc::clone(&observations); + let (term, mut term_rx) = watch::channel(false); + + tokio::spawn(async move { + let observations = observations_clone; + + loop { + tokio::select! { + _ = term_rx.changed() => { + if *term_rx.borrow() { + debug!("Transaction pool observer terminated."); + return; + } + } + tx_event = stream.next() => { + match tx_event { + Some(FullTransactionEvent::Pending(hash)) => { + tracing::debug!("Transaction pending: {hash}"); + observations.entry(hash).or_default().push_back(TransactionEvent::Pending); + }, + Some(FullTransactionEvent::Queued(hash)) => { + tracing::debug!("Transaction queued: {hash}"); + observations.entry(hash).or_default().push_back(TransactionEvent::Queued); + }, + Some(FullTransactionEvent::Mined { tx_hash, block_hash }) => { + tracing::debug!("Transaction mined: {tx_hash} in block {block_hash}"); + observations.entry(tx_hash).or_default().push_back(TransactionEvent::Mined(block_hash)); + }, + Some(FullTransactionEvent::Replaced { transaction, replaced_by }) => { + tracing::debug!("Transaction replaced: {transaction:?} by {replaced_by}"); + observations.entry(*transaction.hash()).or_default().push_back(TransactionEvent::Replaced(replaced_by)); + }, + Some(FullTransactionEvent::Discarded(hash)) => { + tracing::debug!("Transaction discarded: {hash}"); + observations.entry(hash).or_default().push_back(TransactionEvent::Discarded); + reverts.insert(hash, ()).await; + }, + Some(FullTransactionEvent::Invalid(hash)) => { + tracing::debug!("Transaction invalid: {hash}"); + observations.entry(hash).or_default().push_back(TransactionEvent::Invalid); + }, + Some(FullTransactionEvent::Propagated(_)) => {}, + None => {}, + } + } + } + } + }); + + Self { + observations, + term: Some(term), + } + } + + pub fn tx_status(&self, txhash: TxHash) -> Option { + self.observations + .get(&txhash) + .and_then(|history| history.back().cloned()) + } + + pub fn is_pending(&self, txhash: TxHash) -> bool { + matches!(self.tx_status(txhash), Some(TransactionEvent::Pending)) + } + + pub fn is_queued(&self, txhash: TxHash) -> bool { + matches!(self.tx_status(txhash), Some(TransactionEvent::Queued)) + } + + pub fn is_dropped(&self, txhash: TxHash) -> bool { + matches!(self.tx_status(txhash), Some(TransactionEvent::Discarded)) + } + + pub fn count(&self, status: TransactionEvent) -> usize { + self.observations + .iter() + .filter(|tx| tx.value().back() == Some(&status)) + .count() + } + + pub fn pending_count(&self) -> usize { + self.count(TransactionEvent::Pending) + } + + pub fn queued_count(&self) -> usize { + self.count(TransactionEvent::Queued) + } + + pub fn dropped_count(&self) -> usize { + self.count(TransactionEvent::Discarded) + } + + /// Returns the history of pool events for a transaction. + pub fn history(&self, txhash: TxHash) -> Option> { + self.observations + .get(&txhash) + .map(|history| history.iter().cloned().collect()) + } + + pub fn print_all(&self) { + tracing::debug!("TxPool {:#?}", self.observations); + } + + pub fn exists(&self, txhash: TxHash) -> bool { + matches!( + self.tx_status(txhash), + Some(TransactionEvent::Pending) | Some(TransactionEvent::Queued) + ) + } +} diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs new file mode 100644 index 000000000..7d561aea7 --- /dev/null +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -0,0 +1,135 @@ +use alloy_eips::Encodable2718; +use alloy_primitives::{hex, Address, TxKind, B256, U256}; +use alloy_rpc_types_eth::{Block, BlockTransactionHashes}; +use core::future::Future; +use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; +use op_alloy_rpc_types::Transaction; + +use crate::{ + tests::{framework::driver::ChainDriver, ONE_ETH}, + tx_signer::Signer, +}; + +use super::{TransactionBuilder, FUNDED_PRIVATE_KEYS}; + +pub trait TransactionBuilderExt { + fn random_valid_transfer(self) -> Self; + fn random_reverting_transaction(self) -> Self; +} + +impl TransactionBuilderExt for TransactionBuilder { + fn random_valid_transfer(self) -> Self { + self.with_to(rand::random::

()).with_value(1) + } + + fn random_reverting_transaction(self) -> Self { + self.with_create().with_input(hex!("60006000fd").into()) // PUSH1 0x00 PUSH1 0x00 REVERT + } +} + +pub trait ChainDriverExt { + fn fund_default_accounts(&self) -> impl Future>; + fn fund_many(&self, addresses: Vec
, amount: u128) -> impl Future>; + fn fund(&self, address: Address, amount: u128) -> impl Future>; + fn first_funded_address(&self) -> Address { + FUNDED_PRIVATE_KEYS[0] + .parse() + .expect("Invalid funded private key") + } + + fn fund_accounts( + &self, + count: usize, + amount: u128, + ) -> impl Future>> { + async move { + let accounts = (0..count).map(|_| Signer::random()).collect::>(); + self.fund_many(accounts.iter().map(|a| a.address).collect(), amount).await?; + Ok(accounts) + } + } +} + +impl ChainDriverExt for ChainDriver { + async fn fund_default_accounts(&self) -> eyre::Result<()> { + for key in FUNDED_PRIVATE_KEYS { + let signer: Signer = key.parse()?; + self.fund(signer.address, ONE_ETH).await?; + } + Ok(()) + } + + async fn fund_many(&self, addresses: Vec
, amount: u128) -> eyre::Result<()> { + let mut txs = Vec::with_capacity(addresses.len()); + + for address in addresses { + let deposit = TxDeposit { + source_hash: B256::default(), + from: address, // Set the sender to the address of the account to seed + to: TxKind::Create, + mint: amount, // Amount to deposit + value: U256::default(), + gas_limit: 210000, + is_system_transaction: false, + input: Default::default(), // No input data for the deposit + }; + + let signer = Signer::random(); + let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit))?; + let signed_tx_rlp = signed_tx.encoded_2718(); + txs.push(signed_tx_rlp.into()); + } + + self.build_new_block_with_txs(txs).await?; + Ok(()) + } + + async fn fund(&self, address: Address, amount: u128) -> eyre::Result<()> { + let deposit = TxDeposit { + source_hash: B256::default(), + from: address, // Set the sender to the address of the account to seed + to: TxKind::Create, + mint: amount, // Amount to deposit + value: U256::default(), + gas_limit: 210000, + is_system_transaction: false, + input: Default::default(), // No input data for the deposit + }; + + let signer = Signer::random(); + let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit))?; + let signed_tx_rlp = signed_tx.encoded_2718(); + self.build_new_block_with_txs(vec![signed_tx_rlp.into()]) + .await?; + Ok(()) + } +} + +pub trait BlockTransactionsExt { + fn includes(&self, tx_hash: &B256) -> bool; +} + +impl BlockTransactionsExt for Block { + fn includes(&self, tx_hash: &B256) -> bool { + self.transactions.hashes().any(|tx| tx == *tx_hash) + } +} + +impl BlockTransactionsExt for BlockTransactionHashes<'_, Transaction> { + fn includes(&self, tx_hash: &B256) -> bool { + let mut iter = self.clone(); + iter.any(|tx| tx == *tx_hash) + } +} + +pub trait OpRbuilderArgsTestExt { + fn test_default() -> Self; +} + +impl OpRbuilderArgsTestExt for crate::args::OpRbuilderArgs { + fn test_default() -> Self { + let mut default = Self::default(); + default.flashblocks.flashblocks_port = 0; // randomize port + default + } +} diff --git a/crates/op-rbuilder/src/tests/mod.rs b/crates/op-rbuilder/src/tests/mod.rs index e3f96187c..a8a4cd1d9 100644 --- a/crates/op-rbuilder/src/tests/mod.rs +++ b/crates/op-rbuilder/src/tests/mod.rs @@ -2,5 +2,5 @@ mod framework; pub use framework::*; -mod flashblocks; +//mod flashblocks; mod vanilla; diff --git a/crates/op-rbuilder/src/tests/vanilla/mod.rs b/crates/op-rbuilder/src/tests/vanilla/mod.rs index 976a3dc33..6a448711e 100644 --- a/crates/op-rbuilder/src/tests/vanilla/mod.rs +++ b/crates/op-rbuilder/src/tests/vanilla/mod.rs @@ -1,7 +1,7 @@ #![cfg(test)] -mod data_availability; -mod ordering; +//mod data_availability; +//mod ordering; mod revert; mod smoke; mod txpool; diff --git a/crates/op-rbuilder/src/tests/vanilla/smoke.rs b/crates/op-rbuilder/src/tests/vanilla/smoke.rs index 33109c4a3..157852696 100644 --- a/crates/op-rbuilder/src/tests/vanilla/smoke.rs +++ b/crates/op-rbuilder/src/tests/vanilla/smoke.rs @@ -1,31 +1,39 @@ -use super::framework::TestHarnessBuilder; -use alloy_provider::Provider; +use crate::tests::{ChainDriver, ExternalNode, LocalInstance, TransactionBuilderExt}; +use alloy_primitives::TxHash; +use core::{ + sync::atomic::{AtomicBool, Ordering}, + time::Duration, +}; use std::collections::HashSet; +use tokio::{join, task::yield_now}; +use tracing::info; /// This is a smoke test that ensures that transactions are included in blocks /// and that the block generator is functioning correctly. +/// +/// Generated blocks are also validated against an external op-reth node to +/// ensure their correctness. #[tokio::test] async fn chain_produces_blocks() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("chain_produces_blocks") - .build() + let rbuilder = LocalInstance::standard().await?; + let driver = ChainDriver::new(&rbuilder) + .await? + .with_validation_node(ExternalNode::reth().await?) .await?; - let mut generator = harness.block_generator().await?; - const SAMPLE_SIZE: usize = 10; // ensure that each block has at least two transactions when // no user transactions are sent. // the deposit transaction and the block generator's transaction for _ in 0..SAMPLE_SIZE { - generator - .generate_block() - .await - .expect("Failed to generate block"); - let transactions = harness.latest_block().await.transactions; - assert!( - transactions.len() == 2, - "Block should have exactly two transactions" + let block = driver.build_new_block().await?; + let transactions = block.transactions; + + assert_eq!( + transactions.len(), + 2, + "Empty blocks should have exactly two transactions" ); } @@ -33,98 +41,111 @@ async fn chain_produces_blocks() -> eyre::Result<()> { // sent to it during its block time + the two mandatory transactions for _ in 0..SAMPLE_SIZE { let count = rand::random_range(1..8); - let mut tx_hashes = HashSet::new(); + let mut tx_hashes = HashSet::::default(); + for _ in 0..count { - let tx = harness - .send_valid_transaction() + let tx = driver + .create_transaction() + .random_valid_transfer() + .send() .await .expect("Failed to send transaction"); - let tx_hash = *tx.tx_hash(); - tx_hashes.insert(tx_hash); + tx_hashes.insert(*tx.tx_hash()); } - generator - .generate_block() - .await - .expect("Failed to generate block"); - let transactions = harness.latest_block().await.transactions; - - assert!( - transactions.len() == 2 + count, + + let block = driver.build_new_block().await?; + + let txs = block.transactions; + + assert_eq!( + txs.len(), + 2 + count, "Block should have {} transactions", 2 + count ); for tx_hash in tx_hashes { assert!( - transactions.hashes().any(|hash| hash == *tx_hash), + txs.hashes().any(|hash| hash == tx_hash), "Transaction {} should be included in the block", tx_hash ); } } - Ok(()) } /// Ensures that payloads are generated correctly even when the builder is busy /// with other requests, such as fcu or getPayload. -#[tokio::test] -async fn get_payload_close_to_fcu() -> eyre::Result<()> { - let test_harness = TestHarnessBuilder::new("get_payload_close_to_fcu") - .build() - .await?; - let mut block_generator = test_harness.block_generator().await?; - - // add some transactions to the pool so that the builder - // is busy when we send the fcu/getPayload requests - for _ in 0..10 { - // Note, for this test it is okay if they are not valid - let _ = test_harness.send_valid_transaction().await?; - } - - let result = tokio::time::timeout( - std::time::Duration::from_secs(1), - block_generator.submit_payload(None, 0, true), - ) - .await; - - // ensure we didn't timeout - let result = result.expect("Submit payload timed out"); +#[tokio::test(flavor = "multi_thread")] +async fn produces_blocks_under_load_within_deadline() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = ChainDriver::new(&rbuilder).await?.with_gas_limit(10_00_000); + + let done = AtomicBool::new(false); + + let (populate, produce) = join!( + async { + // Keep the builder busy with new transactions. + loop { + match driver + .create_transaction() + .random_valid_transfer() + .send() + .await + { + Ok(_) => {} + Err(e) if e.to_string().contains("txpool is full") => { + // If the txpool is full, give it a short break + tokio::time::sleep(Duration::from_millis(100)).await; + } + Err(e) => return Err(e), + }; + + if done.load(Ordering::Relaxed) { + break; + } + + yield_now().await; + } + Ok::<(), eyre::Error>(()) + }, + async { + // Wait for a short time to allow the transaction population to start + // and fill up the txpool. + tokio::time::sleep(Duration::from_secs(1)).await; + + // Now, start producing blocks under load. + for _ in 0..10 { + // Ensure that the builder can still produce blocks under + // heavy load of incoming transactions. + let block = tokio::time::timeout( + Duration::from_secs(rbuilder.args().chain_block_time) + + Duration::from_millis(500), + driver.build_new_block(), + ) + .await + .expect("Timeout while waiting for block production") + .expect("Failed to produce block under load"); - // ensure we got a payload - assert!(result.is_ok(), "Failed to get payload: {:?}", result); + info!("Produced a block under load: {block:#?}"); - Ok(()) -} + yield_now().await; + } -/// This test validates that if we flood the builder with many transactions -/// and we request short block times, the builder can still eventually resolve all the transactions -#[tokio::test] -async fn transaction_flood_no_sleep() -> eyre::Result<()> { - let test_harness = TestHarnessBuilder::new("transaction_flood_no_sleep") - .build() - .await?; + // we're happy with one block produced under load + // set the done flag to true to stop the transaction population + done.store(true, Ordering::Relaxed); + info!("All blocks produced under load"); - let mut block_generator = test_harness.block_generator().await?; - let provider = test_harness.provider()?; + Ok::<(), eyre::Error>(()) + } + ); - // Send 200 valid transactions to the builder - // More than this and there is an issue with the RPC endpoint not being able to handle the load - let mut transactions = vec![]; - for _ in 0..200 { - let tx = test_harness.send_valid_transaction().await?; - let tx_hash = *tx.tx_hash(); - transactions.push(tx_hash); - } + populate.unwrap(); - // After a 10 blocks all the transactions should be included in a block - for _ in 0..10 { - block_generator.submit_payload(None, 0, true).await.unwrap(); - } - - for tx in transactions { - provider.get_transaction_receipt(tx).await?; - } + //assert!(populate.is_ok(), "Failed to populate transactions"); + assert!(produce.is_ok(), "Failed to produce block under load"); Ok(()) } diff --git a/crates/op-rbuilder/src/tests/vanilla/txpool.rs b/crates/op-rbuilder/src/tests/vanilla/txpool.rs index cbf15512d..00c6c5d07 100644 --- a/crates/op-rbuilder/src/tests/vanilla/txpool.rs +++ b/crates/op-rbuilder/src/tests/vanilla/txpool.rs @@ -1,45 +1,54 @@ -use crate::tests::{framework::TestHarnessBuilder, ONE_ETH}; -use alloy_provider::ext::TxPoolApi; +use crate::{ + builders::StandardBuilder, + tests::{ + default_node_config, BlockTransactionsExt, ChainDriver, ChainDriverExt, LocalInstance, ONE_ETH + }, +}; +use reth::args::TxPoolArgs; +use reth_node_builder::NodeConfig; +use reth_optimism_chainspec::OpChainSpec; /// This test ensures that pending pool custom limit is respected and priority tx would be included even when pool if full. #[tokio::test] async fn pending_pool_limit() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("pending_pool_limit") - .with_namespaces("txpool,eth,debug,admin") - .with_extra_params("--txpool.pending-max-count 50") - .build() - .await?; + let rbuilder = LocalInstance::new_with_config::( + Default::default(), + NodeConfig:: { + txpool: TxPoolArgs { + pending_max_count: 50, + ..Default::default() + }, + ..default_node_config() + }, + ) + .await?; + + let driver = ChainDriver::new(&rbuilder).await?; + let accounts = driver.fund_accounts(50, ONE_ETH).await?; - let mut generator = harness.block_generator().await?; // Send 50 txs from different addrs - let accounts = generator.create_funded_accounts(2, ONE_ETH).await?; let acc_no_priority = accounts.first().unwrap(); let acc_with_priority = accounts.last().unwrap(); for _ in 0..50 { - let _ = harness + let _ = driver .create_transaction() .with_signer(*acc_no_priority) .send() .await?; } - let pool = harness - .provider() - .expect("provider not available") - .txpool_status() - .await?; assert_eq!( - pool.pending, 50, + rbuilder.pool().pending_count(), 50, "Pending pool must contain at max 50 txs {:?}", - pool + rbuilder.pool().pending_count() ); // Send 10 txs that should be included in the block let mut txs = Vec::new(); for _ in 0..10 { - let tx = harness + let tx = driver .create_transaction() .with_signer(*acc_with_priority) .with_max_priority_fee_per_gas(10) @@ -48,22 +57,17 @@ async fn pending_pool_limit() -> eyre::Result<()> { txs.push(*tx.tx_hash()); } - let pool = harness - .provider() - .expect("provider not available") - .txpool_status() - .await?; assert_eq!( - pool.pending, 50, + rbuilder.pool().pending_count(), 50, "Pending pool must contain at max 50 txs {:?}", - pool + rbuilder.pool().pending_count() ); // After we try building block our reverting tx would be removed and other tx will move to queue pool - let block = generator.generate_block().await?; + let block = driver.build_new_block().await?; // Ensure that 10 extra txs got included - assert!(block.includes_vec(txs)); + assert!(txs.into_iter().all(|tx| block.includes(&tx))); Ok(()) } From de69af78328af086bc4a727b3f644c13a6a92525 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 11:57:23 +0000 Subject: [PATCH 02/23] revert_protection_monitor_transaction_gc passes --- .../op-rbuilder/src/tests/vanilla/revert.rs | 640 +++++++++--------- 1 file changed, 320 insertions(+), 320 deletions(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 5f44204d0..4a11d61ab 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -2,8 +2,7 @@ use alloy_provider::{PendingTransactionBuilder, Provider}; use op_alloy_network::Optimism; use crate::{ - primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, - tests::{BundleOpts, TestHarness, TestHarnessBuilder}, + args::OpRbuilderArgs, builders::StandardBuilder, primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, tests::{BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, TestHarnessBuilder, TransactionBuilderExt, ONE_ETH} }; /// This test ensures that the transactions that get reverted and not included in the block, @@ -11,23 +10,26 @@ use crate::{ /// This test creates N transactions with different block ranges. #[tokio::test] async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_monitor_transaction_gc") - .with_revert_protection() - .with_namespaces("eth,web3,txpool") - .build() - .await?; + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }).await?; - let mut generator = harness.block_generator().await?; + let driver = ChainDriver::new(&rbuilder).await?; + let accounts = driver.fund_accounts(10, ONE_ETH).await?; + let latest_block_number = driver.latest().await?.header.number; // send 10 bundles with different block ranges let mut pending_txn = Vec::new(); - for i in 1..=10 { - let txn = harness + + for i in 0..accounts.len() { + let txn = driver .create_transaction() - .with_revert() + .random_reverting_transaction() + .with_signer(accounts[i].clone()) .with_bundle(BundleOpts { - block_number_max: Some(i), - block_number_min: None, + block_number_max: Some(latest_block_number + i as u64 + 1), + ..Default::default() }) .send() .await?; @@ -36,325 +38,323 @@ async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { // generate 10 blocks for i in 0..10 { - let generated_block = generator.generate_block().await?; + let generated_block = driver.build_new_block().await?; // blocks should only include two transactions (deposit + builder) - assert_eq!(generated_block.block.transactions.len(), 2); + assert_eq!(generated_block.transactions.len(), 2); // since we created the 10 transactions with increasing block ranges, as we generate blocks // one transaction will be gc on each block. // transactions from [0, i] should be dropped, transactions from [i+1, 10] should be queued for j in 0..=i { - let status = harness.check_tx_in_pool(*pending_txn[j].tx_hash()).await?; - assert!(status.is_dropped()); + assert!(rbuilder.pool().is_dropped(*pending_txn[j].tx_hash())); } for j in i + 1..10 { - let status = harness.check_tx_in_pool(*pending_txn[j].tx_hash()).await?; - assert!(status.is_queued()); + assert!(rbuilder.pool().is_pending(*pending_txn[j].tx_hash())); } } Ok(()) } -/// If revert protection is disabled, the transactions that revert are included in the block. -#[tokio::test] -async fn revert_protection_disabled() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_disabled") - .build() - .await?; - - let mut generator = harness.block_generator().await?; - - for _ in 0..10 { - let valid_tx = harness.send_valid_transaction().await?; - let reverting_tx = harness.send_revert_transaction().await?; - let block_generated = generator.generate_block().await?; - - assert!(block_generated.includes(*valid_tx.tx_hash())); - assert!(block_generated.includes(*reverting_tx.tx_hash())); - } - - Ok(()) -} - -/// If revert protection is disabled, it should not be possible to send a revert bundle -/// since the revert RPC endpoint is not available. -#[tokio::test] -async fn revert_protection_disabled_bundle_endpoint_error() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_disabled_bundle_endpoint_error") - .build() - .await?; - - let res = harness - .create_transaction() - .with_bundle(BundleOpts::default()) - .send() - .await; - - assert!( - res.is_err(), - "Expected error because method is not available" - ); - Ok(()) -} - -/// Test the behaviour of the revert protection bundle, if the bundle **does not** revert -/// the transaction is included in the block. If the bundle reverts, the transaction -/// is not included in the block and tried again for the next bundle range blocks -/// when it will be dropped from the pool. -#[tokio::test] -async fn revert_protection_bundle() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_bundle") - .with_revert_protection() - .with_namespaces("eth,web3,txpool") - .build() - .await?; - - let mut generator = harness.block_generator().await?; // Block 1 - - // Test 1: Bundle does not revert - let valid_bundle = harness - .create_transaction() - .with_bundle(BundleOpts::default()) - .send() - .await?; - - let block_generated = generator.generate_block().await?; // Block 2 - assert!(block_generated.includes(*valid_bundle.tx_hash())); - - let bundle_opts = BundleOpts { - block_number_max: Some(5), - block_number_min: None, - }; - - let reverted_bundle_0 = harness - .create_transaction() - .with_revert() - .with_reverted_hash() - .with_bundle(bundle_opts) - .send() - .await?; - - // Test 2: Bundle reverts. It is included in the block since the transaction - // includes the reverted_hashes field - let block_generated = generator.generate_block().await?; // Block 3 - assert!(block_generated.includes(*reverted_bundle_0.tx_hash())); - - let reverted_bundle_1 = harness - .create_transaction() - .with_revert() - .with_bundle(bundle_opts) - .send() - .await?; - - // Test 3: Bundle reverts. It is not included in the block since it reverts - // and the hash is not in the reverted_hashes field. - let block_generated = generator.generate_block().await?; // Block 4 - assert!(block_generated.not_includes(*reverted_bundle_1.tx_hash())); - - // After the block the transaction is still pending in the pool - assert!(harness - .check_tx_in_pool(*reverted_bundle_1.tx_hash()) - .await? - .is_pending()); - - // Test 3: Chain progresses beyond the bundle range. The transaction is dropped from the pool - generator.generate_block().await?; // Block 5 - assert!(harness - .check_tx_in_pool(*reverted_bundle_1.tx_hash()) - .await? - .is_pending()); - - generator.generate_block().await?; // Block 6 - assert!(harness - .check_tx_in_pool(*reverted_bundle_1.tx_hash()) - .await? - .is_dropped()); - - Ok(()) -} - -/// Test the behaviour of the revert protection bundle with a min block number. -#[tokio::test] -async fn revert_protection_bundle_min_block_number() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_bundle_min_block_number") - .with_revert_protection() - .build() - .await?; - - let mut generator = harness.block_generator().await?; - - // The bundle is valid when the min block number is equal to the current block - let bundle_with_min_block = harness - .create_transaction() - .with_revert() // the transaction reverts but it is included in the block - .with_reverted_hash() - .with_bundle(BundleOpts { - block_number_max: None, - block_number_min: Some(2), - }) - .send() - .await?; - - let block = generator.generate_block().await?; // Block 1, bundle still not valid - assert!(block.not_includes(*bundle_with_min_block.tx_hash())); - - let block = generator.generate_block().await?; // Block 2, bundle is valid - assert!(block.includes(*bundle_with_min_block.tx_hash())); - - // Send a bundle with a match of min and max block number - let bundle_with_min_and_max_block = harness - .create_transaction() - .with_revert() - .with_reverted_hash() - .with_bundle(BundleOpts { - block_number_max: Some(4), - block_number_min: Some(4), - }) - .send() - .await?; - - let block = generator.generate_block().await?; // Block 3, bundle still not valid - assert!(block.not_includes(*bundle_with_min_and_max_block.tx_hash())); - - let block = generator.generate_block().await?; // Block 4, bundle is valid - assert!(block.includes(*bundle_with_min_and_max_block.tx_hash())); - - Ok(()) -} - -/// Test the range limits for the revert protection bundle. -#[tokio::test] -async fn revert_protection_bundle_range_limits() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("revert_protection_bundle_range_limits") - .with_revert_protection() - .build() - .await?; - - let mut generator = harness.block_generator().await?; - - // Advance two blocks and try to send a bundle with max block = 1 - generator.generate_block().await?; // Block 1 - generator.generate_block().await?; // Block 2 - - async fn send_bundle( - harness: &TestHarness, - block_number_max: u64, - block_number_min: Option, - ) -> eyre::Result> { - harness - .create_transaction() - .with_bundle(BundleOpts { - block_number_max: Some(block_number_max), - block_number_min: block_number_min, - }) - .send() - .await - } - - // Max block cannot be a past block - assert!(send_bundle(&harness, 1, None).await.is_err()); - - // Bundles are valid if their max block in in between the current block and the max block range - let current_block = 2; - let next_valid_block = current_block + 1; - - for i in next_valid_block..next_valid_block + MAX_BLOCK_RANGE_BLOCKS { - assert!(send_bundle(&harness, i, None).await.is_ok()); - } - - // A bundle with a block out of range is invalid - assert!(send_bundle( - &harness, - next_valid_block + MAX_BLOCK_RANGE_BLOCKS + 1, - None - ) - .await - .is_err()); - - // A bundle with a min block number higher than the max block is invalid - assert!(send_bundle(&harness, 1, Some(2)).await.is_err()); - - // A bundle with a min block number lower or equal to the current block is valid - assert!(send_bundle(&harness, next_valid_block, Some(current_block)) - .await - .is_ok()); - assert!(send_bundle(&harness, next_valid_block, Some(0)) - .await - .is_ok()); - - // A bundle with a min block equal to max block is valid - assert!( - send_bundle(&harness, next_valid_block, Some(next_valid_block)) - .await - .is_ok() - ); - - Ok(()) -} - -/// If a transaction reverts and was sent as a normal transaction through the eth_sendRawTransaction -/// bundle, the transaction should be included in the block. -/// This behaviour is the same as the 'revert_protection_disabled' test. -#[tokio::test] -async fn revert_protection_allow_reverted_transactions_without_bundle() -> eyre::Result<()> { - let harness = - TestHarnessBuilder::new("revert_protection_allow_reverted_transactions_without_bundle") - .with_revert_protection() - .build() - .await?; - - let mut generator = harness.block_generator().await?; - - for _ in 0..10 { - let valid_tx = harness.send_valid_transaction().await?; - let reverting_tx = harness.send_revert_transaction().await?; - let block_generated = generator.generate_block().await?; - - assert!(block_generated.includes(*valid_tx.tx_hash())); - assert!(block_generated.includes(*reverting_tx.tx_hash())); - } - - Ok(()) -} - -/// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt should return -/// an error message that it was dropped. -#[tokio::test] -async fn revert_protection_check_transaction_receipt_status_message() -> eyre::Result<()> { - let harness = - TestHarnessBuilder::new("revert_protection_check_transaction_receipt_status_message") - .with_revert_protection() - .build() - .await?; - - let provider = harness.provider()?; - let mut generator = harness.block_generator().await?; - - let reverting_tx = harness - .create_transaction() - .with_revert() - .with_bundle(BundleOpts { - block_number_max: Some(3), - block_number_min: None, - }) - .send() - .await?; - let tx_hash = reverting_tx.tx_hash(); - - let _ = generator.generate_block().await?; - let receipt = provider.get_transaction_receipt(*tx_hash).await?; - assert!(receipt.is_none()); - - let _ = generator.generate_block().await?; - let receipt = provider.get_transaction_receipt(*tx_hash).await?; - assert!(receipt.is_none()); - - // Dropped - let _ = generator.generate_block().await?; - let receipt = provider.get_transaction_receipt(*tx_hash).await; - assert!(receipt.is_err()); - - Ok(()) -} +// /// If revert protection is disabled, the transactions that revert are included in the block. +// #[tokio::test] +// async fn revert_protection_disabled() -> eyre::Result<()> { +// let harness = TestHarnessBuilder::new("revert_protection_disabled") +// .build() +// .await?; + +// let mut generator = harness.block_generator().await?; + +// for _ in 0..10 { +// let valid_tx = harness.send_valid_transaction().await?; +// let reverting_tx = harness.send_revert_transaction().await?; +// let block_generated = generator.generate_block().await?; + +// assert!(block_generated.includes(*valid_tx.tx_hash())); +// assert!(block_generated.includes(*reverting_tx.tx_hash())); +// } + +// Ok(()) +// } + +// /// If revert protection is disabled, it should not be possible to send a revert bundle +// /// since the revert RPC endpoint is not available. +// #[tokio::test] +// async fn revert_protection_disabled_bundle_endpoint_error() -> eyre::Result<()> { +// let harness = TestHarnessBuilder::new("revert_protection_disabled_bundle_endpoint_error") +// .build() +// .await?; + +// let res = harness +// .create_transaction() +// .with_bundle(BundleOpts::default()) +// .send() +// .await; + +// assert!( +// res.is_err(), +// "Expected error because method is not available" +// ); +// Ok(()) +// } + +// /// Test the behaviour of the revert protection bundle, if the bundle **does not** revert +// /// the transaction is included in the block. If the bundle reverts, the transaction +// /// is not included in the block and tried again for the next bundle range blocks +// /// when it will be dropped from the pool. +// #[tokio::test] +// async fn revert_protection_bundle() -> eyre::Result<()> { +// let harness = TestHarnessBuilder::new("revert_protection_bundle") +// .with_revert_protection() +// .with_namespaces("eth,web3,txpool") +// .build() +// .await?; + +// let mut generator = harness.block_generator().await?; // Block 1 + +// // Test 1: Bundle does not revert +// let valid_bundle = harness +// .create_transaction() +// .with_bundle(BundleOpts::default()) +// .send() +// .await?; + +// let block_generated = generator.generate_block().await?; // Block 2 +// assert!(block_generated.includes(*valid_bundle.tx_hash())); + +// let bundle_opts = BundleOpts { +// block_number_max: Some(5), +// block_number_min: None, +// }; + +// let reverted_bundle_0 = harness +// .create_transaction() +// .with_revert() +// .with_reverted_hash() +// .with_bundle(bundle_opts) +// .send() +// .await?; + +// // Test 2: Bundle reverts. It is included in the block since the transaction +// // includes the reverted_hashes field +// let block_generated = generator.generate_block().await?; // Block 3 +// assert!(block_generated.includes(*reverted_bundle_0.tx_hash())); + +// let reverted_bundle_1 = harness +// .create_transaction() +// .with_revert() +// .with_bundle(bundle_opts) +// .send() +// .await?; + +// // Test 3: Bundle reverts. It is not included in the block since it reverts +// // and the hash is not in the reverted_hashes field. +// let block_generated = generator.generate_block().await?; // Block 4 +// assert!(block_generated.not_includes(*reverted_bundle_1.tx_hash())); + +// // After the block the transaction is still pending in the pool +// assert!(harness +// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) +// .await? +// .is_pending()); + +// // Test 3: Chain progresses beyond the bundle range. The transaction is dropped from the pool +// generator.generate_block().await?; // Block 5 +// assert!(harness +// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) +// .await? +// .is_pending()); + +// generator.generate_block().await?; // Block 6 +// assert!(harness +// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) +// .await? +// .is_dropped()); + +// Ok(()) +// } + +// /// Test the behaviour of the revert protection bundle with a min block number. +// #[tokio::test] +// async fn revert_protection_bundle_min_block_number() -> eyre::Result<()> { +// let harness = TestHarnessBuilder::new("revert_protection_bundle_min_block_number") +// .with_revert_protection() +// .build() +// .await?; + +// let mut generator = harness.block_generator().await?; + +// // The bundle is valid when the min block number is equal to the current block +// let bundle_with_min_block = harness +// .create_transaction() +// .with_revert() // the transaction reverts but it is included in the block +// .with_reverted_hash() +// .with_bundle(BundleOpts { +// block_number_max: None, +// block_number_min: Some(2), +// }) +// .send() +// .await?; + +// let block = generator.generate_block().await?; // Block 1, bundle still not valid +// assert!(block.not_includes(*bundle_with_min_block.tx_hash())); + +// let block = generator.generate_block().await?; // Block 2, bundle is valid +// assert!(block.includes(*bundle_with_min_block.tx_hash())); + +// // Send a bundle with a match of min and max block number +// let bundle_with_min_and_max_block = harness +// .create_transaction() +// .with_revert() +// .with_reverted_hash() +// .with_bundle(BundleOpts { +// block_number_max: Some(4), +// block_number_min: Some(4), +// }) +// .send() +// .await?; + +// let block = generator.generate_block().await?; // Block 3, bundle still not valid +// assert!(block.not_includes(*bundle_with_min_and_max_block.tx_hash())); + +// let block = generator.generate_block().await?; // Block 4, bundle is valid +// assert!(block.includes(*bundle_with_min_and_max_block.tx_hash())); + +// Ok(()) +// } + +// /// Test the range limits for the revert protection bundle. +// #[tokio::test] +// async fn revert_protection_bundle_range_limits() -> eyre::Result<()> { +// let harness = TestHarnessBuilder::new("revert_protection_bundle_range_limits") +// .with_revert_protection() +// .build() +// .await?; + +// let mut generator = harness.block_generator().await?; + +// // Advance two blocks and try to send a bundle with max block = 1 +// generator.generate_block().await?; // Block 1 +// generator.generate_block().await?; // Block 2 + +// async fn send_bundle( +// harness: &TestHarness, +// block_number_max: u64, +// block_number_min: Option, +// ) -> eyre::Result> { +// harness +// .create_transaction() +// .with_bundle(BundleOpts { +// block_number_max: Some(block_number_max), +// block_number_min: block_number_min, +// }) +// .send() +// .await +// } + +// // Max block cannot be a past block +// assert!(send_bundle(&harness, 1, None).await.is_err()); + +// // Bundles are valid if their max block in in between the current block and the max block range +// let current_block = 2; +// let next_valid_block = current_block + 1; + +// for i in next_valid_block..next_valid_block + MAX_BLOCK_RANGE_BLOCKS { +// assert!(send_bundle(&harness, i, None).await.is_ok()); +// } + +// // A bundle with a block out of range is invalid +// assert!(send_bundle( +// &harness, +// next_valid_block + MAX_BLOCK_RANGE_BLOCKS + 1, +// None +// ) +// .await +// .is_err()); + +// // A bundle with a min block number higher than the max block is invalid +// assert!(send_bundle(&harness, 1, Some(2)).await.is_err()); + +// // A bundle with a min block number lower or equal to the current block is valid +// assert!(send_bundle(&harness, next_valid_block, Some(current_block)) +// .await +// .is_ok()); +// assert!(send_bundle(&harness, next_valid_block, Some(0)) +// .await +// .is_ok()); + +// // A bundle with a min block equal to max block is valid +// assert!( +// send_bundle(&harness, next_valid_block, Some(next_valid_block)) +// .await +// .is_ok() +// ); + +// Ok(()) +// } + +// /// If a transaction reverts and was sent as a normal transaction through the eth_sendRawTransaction +// /// bundle, the transaction should be included in the block. +// /// This behaviour is the same as the 'revert_protection_disabled' test. +// #[tokio::test] +// async fn revert_protection_allow_reverted_transactions_without_bundle() -> eyre::Result<()> { +// let harness = +// TestHarnessBuilder::new("revert_protection_allow_reverted_transactions_without_bundle") +// .with_revert_protection() +// .build() +// .await?; + +// let mut generator = harness.block_generator().await?; + +// for _ in 0..10 { +// let valid_tx = harness.send_valid_transaction().await?; +// let reverting_tx = harness.send_revert_transaction().await?; +// let block_generated = generator.generate_block().await?; + +// assert!(block_generated.includes(*valid_tx.tx_hash())); +// assert!(block_generated.includes(*reverting_tx.tx_hash())); +// } + +// Ok(()) +// } + +// /// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt should return +// /// an error message that it was dropped. +// #[tokio::test] +// async fn revert_protection_check_transaction_receipt_status_message() -> eyre::Result<()> { +// let harness = +// TestHarnessBuilder::new("revert_protection_check_transaction_receipt_status_message") +// .with_revert_protection() +// .build() +// .await?; + +// let provider = harness.provider()?; +// let mut generator = harness.block_generator().await?; + +// let reverting_tx = harness +// .create_transaction() +// .with_revert() +// .with_bundle(BundleOpts { +// block_number_max: Some(3), +// block_number_min: None, +// }) +// .send() +// .await?; +// let tx_hash = reverting_tx.tx_hash(); + +// let _ = generator.generate_block().await?; +// let receipt = provider.get_transaction_receipt(*tx_hash).await?; +// assert!(receipt.is_none()); + +// let _ = generator.generate_block().await?; +// let receipt = provider.get_transaction_receipt(*tx_hash).await?; +// assert!(receipt.is_none()); + +// // Dropped +// let _ = generator.generate_block().await?; +// let receipt = provider.get_transaction_receipt(*tx_hash).await; +// assert!(receipt.is_err()); + +// Ok(()) +// } From c8c04a10022abbd29590ea4dd02a695076c73552 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 12:01:12 +0000 Subject: [PATCH 03/23] revert_protection_disabled passes --- .../op-rbuilder/src/tests/framework/utils.rs | 11 +++- .../op-rbuilder/src/tests/vanilla/revert.rs | 57 ++++++++++++------- .../op-rbuilder/src/tests/vanilla/txpool.rs | 10 ++-- 3 files changed, 52 insertions(+), 26 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs index 7d561aea7..39434cab1 100644 --- a/crates/op-rbuilder/src/tests/framework/utils.rs +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -29,7 +29,11 @@ impl TransactionBuilderExt for TransactionBuilder { pub trait ChainDriverExt { fn fund_default_accounts(&self) -> impl Future>; - fn fund_many(&self, addresses: Vec
, amount: u128) -> impl Future>; + fn fund_many( + &self, + addresses: Vec
, + amount: u128, + ) -> impl Future>; fn fund(&self, address: Address, amount: u128) -> impl Future>; fn first_funded_address(&self) -> Address { FUNDED_PRIVATE_KEYS[0] @@ -44,7 +48,8 @@ pub trait ChainDriverExt { ) -> impl Future>> { async move { let accounts = (0..count).map(|_| Signer::random()).collect::>(); - self.fund_many(accounts.iter().map(|a| a.address).collect(), amount).await?; + self.fund_many(accounts.iter().map(|a| a.address).collect(), amount) + .await?; Ok(accounts) } } @@ -73,7 +78,7 @@ impl ChainDriverExt for ChainDriver { is_system_transaction: false, input: Default::default(), // No input data for the deposit }; - + let signer = Signer::random(); let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit))?; let signed_tx_rlp = signed_tx.encoded_2718(); diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 4a11d61ab..7af96ce8f 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -2,7 +2,13 @@ use alloy_provider::{PendingTransactionBuilder, Provider}; use op_alloy_network::Optimism; use crate::{ - args::OpRbuilderArgs, builders::StandardBuilder, primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, tests::{BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, TestHarnessBuilder, TransactionBuilderExt, ONE_ETH} + args::OpRbuilderArgs, + builders::StandardBuilder, + primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, + tests::{ + BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, TestHarnessBuilder, + TransactionBuilderExt, ONE_ETH, + }, }; /// This test ensures that the transactions that get reverted and not included in the block, @@ -13,7 +19,8 @@ async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { let rbuilder = LocalInstance::new::(OpRbuilderArgs { enable_revert_protection: true, ..Default::default() - }).await?; + }) + .await?; let driver = ChainDriver::new(&rbuilder).await?; let accounts = driver.fund_accounts(10, ONE_ETH).await?; @@ -57,26 +64,38 @@ async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { Ok(()) } -// /// If revert protection is disabled, the transactions that revert are included in the block. -// #[tokio::test] -// async fn revert_protection_disabled() -> eyre::Result<()> { -// let harness = TestHarnessBuilder::new("revert_protection_disabled") -// .build() -// .await?; - -// let mut generator = harness.block_generator().await?; +/// If revert protection is disabled, the transactions that revert are included in the block. +#[tokio::test] +async fn revert_protection_disabled() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; -// for _ in 0..10 { -// let valid_tx = harness.send_valid_transaction().await?; -// let reverting_tx = harness.send_revert_transaction().await?; -// let block_generated = generator.generate_block().await?; + for _ in 0..10 { + let valid_tx = driver + .create_transaction() + .random_valid_transfer() + .send() + .await?; -// assert!(block_generated.includes(*valid_tx.tx_hash())); -// assert!(block_generated.includes(*reverting_tx.tx_hash())); -// } + let reverting_tx = driver + .create_transaction() + .random_reverting_transaction() + .send() + .await?; + let block = driver.build_new_block().await?; + + assert!(block + .transactions + .hashes() + .any(|hash| hash == *valid_tx.tx_hash())); + assert!(block + .transactions + .hashes() + .any(|hash| hash == *reverting_tx.tx_hash())); + } -// Ok(()) -// } + Ok(()) +} // /// If revert protection is disabled, it should not be possible to send a revert bundle // /// since the revert RPC endpoint is not available. diff --git a/crates/op-rbuilder/src/tests/vanilla/txpool.rs b/crates/op-rbuilder/src/tests/vanilla/txpool.rs index 00c6c5d07..edabf8bf2 100644 --- a/crates/op-rbuilder/src/tests/vanilla/txpool.rs +++ b/crates/op-rbuilder/src/tests/vanilla/txpool.rs @@ -1,7 +1,8 @@ use crate::{ builders::StandardBuilder, tests::{ - default_node_config, BlockTransactionsExt, ChainDriver, ChainDriverExt, LocalInstance, ONE_ETH + default_node_config, BlockTransactionsExt, ChainDriver, ChainDriverExt, LocalInstance, + ONE_ETH, }, }; use reth::args::TxPoolArgs; @@ -26,7 +27,6 @@ async fn pending_pool_limit() -> eyre::Result<()> { let driver = ChainDriver::new(&rbuilder).await?; let accounts = driver.fund_accounts(50, ONE_ETH).await?; - // Send 50 txs from different addrs let acc_no_priority = accounts.first().unwrap(); let acc_with_priority = accounts.last().unwrap(); @@ -40,7 +40,8 @@ async fn pending_pool_limit() -> eyre::Result<()> { } assert_eq!( - rbuilder.pool().pending_count(), 50, + rbuilder.pool().pending_count(), + 50, "Pending pool must contain at max 50 txs {:?}", rbuilder.pool().pending_count() ); @@ -58,7 +59,8 @@ async fn pending_pool_limit() -> eyre::Result<()> { } assert_eq!( - rbuilder.pool().pending_count(), 50, + rbuilder.pool().pending_count(), + 50, "Pending pool must contain at max 50 txs {:?}", rbuilder.pool().pending_count() ); From 14332861347eff8dee128f8b5d9354ee6ebdd05b Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 12:26:08 +0000 Subject: [PATCH 04/23] ordering tests are passing --- crates/op-rbuilder/src/tests/vanilla/mod.rs | 2 +- .../op-rbuilder/src/tests/vanilla/ordering.rs | 34 +- .../op-rbuilder/src/tests/vanilla/revert.rs | 536 ++++++++---------- 3 files changed, 273 insertions(+), 299 deletions(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/mod.rs b/crates/op-rbuilder/src/tests/vanilla/mod.rs index 6a448711e..12f52ca7d 100644 --- a/crates/op-rbuilder/src/tests/vanilla/mod.rs +++ b/crates/op-rbuilder/src/tests/vanilla/mod.rs @@ -1,7 +1,7 @@ #![cfg(test)] //mod data_availability; -//mod ordering; +mod ordering; mod revert; mod smoke; mod txpool; diff --git a/crates/op-rbuilder/src/tests/vanilla/ordering.rs b/crates/op-rbuilder/src/tests/vanilla/ordering.rs index f524e57d4..b2a15c342 100644 --- a/crates/op-rbuilder/src/tests/vanilla/ordering.rs +++ b/crates/op-rbuilder/src/tests/vanilla/ordering.rs @@ -1,21 +1,26 @@ -use crate::tests::framework::{TestHarnessBuilder, ONE_ETH}; +use crate::tests::{ + framework::{TestHarnessBuilder, ONE_ETH}, + ChainDriverExt, LocalInstance, +}; use alloy_consensus::Transaction; use futures::{future::join_all, stream, StreamExt}; /// This test ensures that the transactions are ordered by fee priority in the block. #[tokio::test] async fn fee_priority_ordering() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("integration_test_fee_priority_ordering") - .build() - .await?; + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; + let accounts = driver.fund_accounts(10, ONE_ETH).await?; - let mut generator = harness.block_generator().await?; - let accounts = generator.create_funded_accounts(10, ONE_ETH).await?; - let base_fee = harness.latest_base_fee().await; + let latest_block = driver.latest().await?; + let base_fee = latest_block + .header + .base_fee_per_gas + .expect("Base fee should be present in the latest block"); // generate transactions with randomized tips let txs = join_all(accounts.iter().map(|signer| { - harness + driver .create_transaction() .with_signer(*signer) .with_max_priority_fee_per_gas(rand::random_range(1..50)) @@ -28,15 +33,16 @@ async fn fee_priority_ordering() -> eyre::Result<()> { .map(|tx| *tx.tx_hash()) .collect::>(); - generator.generate_block().await?; + driver.build_new_block().await?; // verify all transactions are included in the block assert!( stream::iter(txs.iter()) .all(|tx_hash| async { - harness - .latest_block() + driver + .latest_full() .await + .expect("Failed to fetch latest block") .transactions .hashes() .any(|hash| hash == *tx_hash) @@ -46,9 +52,9 @@ async fn fee_priority_ordering() -> eyre::Result<()> { ); // verify all transactions are ordered by fee priority - let txs_tips = harness - .latest_block() - .await + let txs_tips = driver + .latest_full() + .await? .into_transactions_vec() .into_iter() .skip(1) // skip the deposit transaction diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 7af96ce8f..49375608a 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -6,8 +6,8 @@ use crate::{ builders::StandardBuilder, primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, tests::{ - BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, TestHarnessBuilder, - TransactionBuilderExt, ONE_ETH, + BlockTransactionsExt, BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, + TestHarnessBuilder, TransactionBuilderExt, ONE_ETH, }, }; @@ -15,7 +15,7 @@ use crate::{ /// are eventually dropped from the pool once their block range is reached. /// This test creates N transactions with different block ranges. #[tokio::test] -async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { +async fn monitor_transaction_gc() -> eyre::Result<()> { let rbuilder = LocalInstance::new::(OpRbuilderArgs { enable_revert_protection: true, ..Default::default() @@ -66,7 +66,7 @@ async fn revert_protection_monitor_transaction_gc() -> eyre::Result<()> { /// If revert protection is disabled, the transactions that revert are included in the block. #[tokio::test] -async fn revert_protection_disabled() -> eyre::Result<()> { +async fn disabled() -> eyre::Result<()> { let rbuilder = LocalInstance::standard().await?; let driver = rbuilder.driver().await?; @@ -97,283 +97,251 @@ async fn revert_protection_disabled() -> eyre::Result<()> { Ok(()) } -// /// If revert protection is disabled, it should not be possible to send a revert bundle -// /// since the revert RPC endpoint is not available. -// #[tokio::test] -// async fn revert_protection_disabled_bundle_endpoint_error() -> eyre::Result<()> { -// let harness = TestHarnessBuilder::new("revert_protection_disabled_bundle_endpoint_error") -// .build() -// .await?; - -// let res = harness -// .create_transaction() -// .with_bundle(BundleOpts::default()) -// .send() -// .await; - -// assert!( -// res.is_err(), -// "Expected error because method is not available" -// ); -// Ok(()) -// } - -// /// Test the behaviour of the revert protection bundle, if the bundle **does not** revert -// /// the transaction is included in the block. If the bundle reverts, the transaction -// /// is not included in the block and tried again for the next bundle range blocks -// /// when it will be dropped from the pool. -// #[tokio::test] -// async fn revert_protection_bundle() -> eyre::Result<()> { -// let harness = TestHarnessBuilder::new("revert_protection_bundle") -// .with_revert_protection() -// .with_namespaces("eth,web3,txpool") -// .build() -// .await?; - -// let mut generator = harness.block_generator().await?; // Block 1 - -// // Test 1: Bundle does not revert -// let valid_bundle = harness -// .create_transaction() -// .with_bundle(BundleOpts::default()) -// .send() -// .await?; - -// let block_generated = generator.generate_block().await?; // Block 2 -// assert!(block_generated.includes(*valid_bundle.tx_hash())); - -// let bundle_opts = BundleOpts { -// block_number_max: Some(5), -// block_number_min: None, -// }; - -// let reverted_bundle_0 = harness -// .create_transaction() -// .with_revert() -// .with_reverted_hash() -// .with_bundle(bundle_opts) -// .send() -// .await?; - -// // Test 2: Bundle reverts. It is included in the block since the transaction -// // includes the reverted_hashes field -// let block_generated = generator.generate_block().await?; // Block 3 -// assert!(block_generated.includes(*reverted_bundle_0.tx_hash())); - -// let reverted_bundle_1 = harness -// .create_transaction() -// .with_revert() -// .with_bundle(bundle_opts) -// .send() -// .await?; - -// // Test 3: Bundle reverts. It is not included in the block since it reverts -// // and the hash is not in the reverted_hashes field. -// let block_generated = generator.generate_block().await?; // Block 4 -// assert!(block_generated.not_includes(*reverted_bundle_1.tx_hash())); - -// // After the block the transaction is still pending in the pool -// assert!(harness -// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) -// .await? -// .is_pending()); - -// // Test 3: Chain progresses beyond the bundle range. The transaction is dropped from the pool -// generator.generate_block().await?; // Block 5 -// assert!(harness -// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) -// .await? -// .is_pending()); - -// generator.generate_block().await?; // Block 6 -// assert!(harness -// .check_tx_in_pool(*reverted_bundle_1.tx_hash()) -// .await? -// .is_dropped()); - -// Ok(()) -// } - -// /// Test the behaviour of the revert protection bundle with a min block number. -// #[tokio::test] -// async fn revert_protection_bundle_min_block_number() -> eyre::Result<()> { -// let harness = TestHarnessBuilder::new("revert_protection_bundle_min_block_number") -// .with_revert_protection() -// .build() -// .await?; - -// let mut generator = harness.block_generator().await?; - -// // The bundle is valid when the min block number is equal to the current block -// let bundle_with_min_block = harness -// .create_transaction() -// .with_revert() // the transaction reverts but it is included in the block -// .with_reverted_hash() -// .with_bundle(BundleOpts { -// block_number_max: None, -// block_number_min: Some(2), -// }) -// .send() -// .await?; - -// let block = generator.generate_block().await?; // Block 1, bundle still not valid -// assert!(block.not_includes(*bundle_with_min_block.tx_hash())); - -// let block = generator.generate_block().await?; // Block 2, bundle is valid -// assert!(block.includes(*bundle_with_min_block.tx_hash())); - -// // Send a bundle with a match of min and max block number -// let bundle_with_min_and_max_block = harness -// .create_transaction() -// .with_revert() -// .with_reverted_hash() -// .with_bundle(BundleOpts { -// block_number_max: Some(4), -// block_number_min: Some(4), -// }) -// .send() -// .await?; - -// let block = generator.generate_block().await?; // Block 3, bundle still not valid -// assert!(block.not_includes(*bundle_with_min_and_max_block.tx_hash())); - -// let block = generator.generate_block().await?; // Block 4, bundle is valid -// assert!(block.includes(*bundle_with_min_and_max_block.tx_hash())); - -// Ok(()) -// } - -// /// Test the range limits for the revert protection bundle. -// #[tokio::test] -// async fn revert_protection_bundle_range_limits() -> eyre::Result<()> { -// let harness = TestHarnessBuilder::new("revert_protection_bundle_range_limits") -// .with_revert_protection() -// .build() -// .await?; - -// let mut generator = harness.block_generator().await?; - -// // Advance two blocks and try to send a bundle with max block = 1 -// generator.generate_block().await?; // Block 1 -// generator.generate_block().await?; // Block 2 - -// async fn send_bundle( -// harness: &TestHarness, -// block_number_max: u64, -// block_number_min: Option, -// ) -> eyre::Result> { -// harness -// .create_transaction() -// .with_bundle(BundleOpts { -// block_number_max: Some(block_number_max), -// block_number_min: block_number_min, -// }) -// .send() -// .await -// } - -// // Max block cannot be a past block -// assert!(send_bundle(&harness, 1, None).await.is_err()); - -// // Bundles are valid if their max block in in between the current block and the max block range -// let current_block = 2; -// let next_valid_block = current_block + 1; - -// for i in next_valid_block..next_valid_block + MAX_BLOCK_RANGE_BLOCKS { -// assert!(send_bundle(&harness, i, None).await.is_ok()); -// } - -// // A bundle with a block out of range is invalid -// assert!(send_bundle( -// &harness, -// next_valid_block + MAX_BLOCK_RANGE_BLOCKS + 1, -// None -// ) -// .await -// .is_err()); - -// // A bundle with a min block number higher than the max block is invalid -// assert!(send_bundle(&harness, 1, Some(2)).await.is_err()); - -// // A bundle with a min block number lower or equal to the current block is valid -// assert!(send_bundle(&harness, next_valid_block, Some(current_block)) -// .await -// .is_ok()); -// assert!(send_bundle(&harness, next_valid_block, Some(0)) -// .await -// .is_ok()); - -// // A bundle with a min block equal to max block is valid -// assert!( -// send_bundle(&harness, next_valid_block, Some(next_valid_block)) -// .await -// .is_ok() -// ); - -// Ok(()) -// } - -// /// If a transaction reverts and was sent as a normal transaction through the eth_sendRawTransaction -// /// bundle, the transaction should be included in the block. -// /// This behaviour is the same as the 'revert_protection_disabled' test. -// #[tokio::test] -// async fn revert_protection_allow_reverted_transactions_without_bundle() -> eyre::Result<()> { -// let harness = -// TestHarnessBuilder::new("revert_protection_allow_reverted_transactions_without_bundle") -// .with_revert_protection() -// .build() -// .await?; - -// let mut generator = harness.block_generator().await?; - -// for _ in 0..10 { -// let valid_tx = harness.send_valid_transaction().await?; -// let reverting_tx = harness.send_revert_transaction().await?; -// let block_generated = generator.generate_block().await?; - -// assert!(block_generated.includes(*valid_tx.tx_hash())); -// assert!(block_generated.includes(*reverting_tx.tx_hash())); -// } - -// Ok(()) -// } - -// /// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt should return -// /// an error message that it was dropped. -// #[tokio::test] -// async fn revert_protection_check_transaction_receipt_status_message() -> eyre::Result<()> { -// let harness = -// TestHarnessBuilder::new("revert_protection_check_transaction_receipt_status_message") -// .with_revert_protection() -// .build() -// .await?; - -// let provider = harness.provider()?; -// let mut generator = harness.block_generator().await?; - -// let reverting_tx = harness -// .create_transaction() -// .with_revert() -// .with_bundle(BundleOpts { -// block_number_max: Some(3), -// block_number_min: None, -// }) -// .send() -// .await?; -// let tx_hash = reverting_tx.tx_hash(); - -// let _ = generator.generate_block().await?; -// let receipt = provider.get_transaction_receipt(*tx_hash).await?; -// assert!(receipt.is_none()); - -// let _ = generator.generate_block().await?; -// let receipt = provider.get_transaction_receipt(*tx_hash).await?; -// assert!(receipt.is_none()); - -// // Dropped -// let _ = generator.generate_block().await?; -// let receipt = provider.get_transaction_receipt(*tx_hash).await; -// assert!(receipt.is_err()); - -// Ok(()) -// } +/// If revert protection is disabled, it should not be possible to send a revert bundle +/// since the revert RPC endpoint is not available. +#[tokio::test] +async fn disabled_bundle_endpoint_error() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; + + let res = driver + .create_transaction() + .with_bundle(BundleOpts::default()) + .send() + .await; + + assert!( + res.is_err(), + "Expected error because method is not available" + ); + Ok(()) +} + +/// Test the behaviour of the revert protection bundle, if the bundle **does not** revert +/// the transaction is included in the block. If the bundle reverts, the transaction +/// is not included in the block and tried again for the next bundle range blocks +/// when it will be dropped from the pool. +#[tokio::test] +async fn bundle() -> eyre::Result<()> { + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + let _ = driver.build_new_block().await?; // Block 1 + + // Test 1: Bundle does not revert + let valid_bundle = driver + .create_transaction() + .random_valid_transfer() + .with_bundle(BundleOpts::default()) + .send() + .await?; + + let block2 = driver.build_new_block().await?; // Block 2 + assert!(block2 + .transactions + .hashes() + .includes(valid_bundle.tx_hash())); + + let bundle_opts = BundleOpts { + block_number_max: Some(4), + ..Default::default() + }; + + let reverted_bundle = driver + .create_transaction() + .random_reverting_transaction() + .with_bundle(bundle_opts) + .send() + .await?; + + // Test 2: Bundle reverts. It is not included in the block + let block3 = driver.build_new_block().await?; // Block 3 + assert!(!block3.includes(reverted_bundle.tx_hash())); + + // After the block the transaction is still pending in the pool + assert!(rbuilder.pool().is_pending(*reverted_bundle.tx_hash())); + + // Test 3: Chain progresses beyond the bundle range. The transaction is dropped from the pool + driver.build_new_block().await?; // Block 4 + assert!(rbuilder.pool().is_dropped(*reverted_bundle.tx_hash())); + + driver.build_new_block().await?; // Block 5 + assert!(rbuilder.pool().is_dropped(*reverted_bundle.tx_hash())); + + Ok(()) +} + +/// Test the behaviour of the revert protection bundle with a min block number. +#[tokio::test] +async fn bundle_min_block_number() -> eyre::Result<()> { + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + + // The bundle is valid when the min block number is equal to the current block + let bundle_with_min_block = driver + .create_transaction() + .with_revert() // the transaction reverts but it is included in the block + .with_reverted_hash() + .with_bundle(BundleOpts { + block_number_max: None, + block_number_min: Some(2), + }) + .send() + .await?; + + let block = driver.build_new_block().await?; // Block 1, bundle still not valid + assert!(!block.includes(bundle_with_min_block.tx_hash())); + + let block = driver.build_new_block().await?; // Block 2, bundle is valid + assert!(block.includes(bundle_with_min_block.tx_hash())); + + // Send a bundle with a match of min and max block number + let bundle_with_min_and_max_block = driver + .create_transaction() + .with_revert() + .with_reverted_hash() + .with_bundle(BundleOpts { + block_number_max: Some(4), + block_number_min: Some(4), + }) + .send() + .await?; + + let block = driver.build_new_block().await?; // Block 3, bundle still not valid + assert!(!block.includes(bundle_with_min_and_max_block.tx_hash())); + + let block = driver.build_new_block().await?; // Block 4, bundle is valid + assert!(block.includes(bundle_with_min_and_max_block.tx_hash())); + + Ok(()) +} + +/// Test the range limits for the revert protection bundle. +#[tokio::test] +async fn bundle_range_limits() -> eyre::Result<()> { + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + let _ = driver.build_new_block().await?; // Block 1 + let _ = driver.build_new_block().await?; // Block 2 + + async fn send_bundle( + driver: &ChainDriver, + block_number_max: u64, + ) -> eyre::Result> { + driver + .create_transaction() + .with_bundle(BundleOpts { + block_number_max: Some(block_number_max), + ..Default::default() + }) + .send() + .await + } + + // Max block cannot be a past block + assert!(send_bundle(&driver, 1).await.is_err()); + + // Bundles are valid if their max block in in between the current block and the max block range + let next_valid_block = 3; + + for i in next_valid_block..next_valid_block + MAX_BLOCK_RANGE_BLOCKS { + assert!(send_bundle(&driver, i).await.is_ok()); + } + + // A bundle with a block out of range is invalid + assert!( + send_bundle(&driver, next_valid_block + MAX_BLOCK_RANGE_BLOCKS + 1) + .await + .is_err() + ); + + Ok(()) +} + +/// If a transaction reverts and was sent as a normal transaction through the eth_sendRawTransaction +/// bundle, the transaction should be included in the block. +/// This behaviour is the same as the 'disabled' test. +#[tokio::test] +async fn allow_reverted_transactions_without_bundle() -> eyre::Result<()> { + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + + for _ in 0..10 { + let valid_tx = driver + .create_transaction() + .random_valid_transfer() + .send() + .await?; + let reverting_tx = driver + .create_transaction() + .random_reverting_transaction() + .send() + .await?; + let block = driver.build_new_block().await?; + + assert!(block.includes(valid_tx.tx_hash())); + assert!(block.includes(reverting_tx.tx_hash())); + } + + Ok(()) +} + +/// If a transaction reverts and gets dropped it, the eth_getTransactionReceipt should return +/// an error message that it was dropped. +#[tokio::test] +async fn check_transaction_receipt_status_message() -> eyre::Result<()> { + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + enable_revert_protection: true, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + let provider = rbuilder.provider().await?; + + let reverting_tx = driver + .create_transaction() + .random_reverting_transaction() + .with_bundle(BundleOpts { + block_number_max: Some(3), + ..Default::default() + }) + .send() + .await?; + let tx_hash = reverting_tx.tx_hash(); + + let _ = driver.build_new_block().await?; + let receipt = provider.get_transaction_receipt(*tx_hash).await?; + assert!(receipt.is_none()); + + let _ = driver.build_new_block().await?; + let receipt = provider.get_transaction_receipt(*tx_hash).await?; + assert!(receipt.is_none()); + + // Dropped + let _ = driver.build_new_block().await?; + let receipt = provider.get_transaction_receipt(*tx_hash).await; + + assert!(receipt.is_err()); + + Ok(()) +} From 5f18e652305717c78f8b7266820dad73d5eba076 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 12:49:18 +0000 Subject: [PATCH 05/23] all tests are passing --- crates/op-rbuilder/src/args/mod.rs | 2 +- .../src/tests/flashblocks/smoke.rs | 43 +++++++--- crates/op-rbuilder/src/tests/mod.rs | 2 +- .../src/tests/vanilla/data_availability.rs | 79 +++++++++---------- crates/op-rbuilder/src/tests/vanilla/mod.rs | 2 +- 5 files changed, 70 insertions(+), 58 deletions(-) diff --git a/crates/op-rbuilder/src/args/mod.rs b/crates/op-rbuilder/src/args/mod.rs index 20f716f97..9048fde70 100644 --- a/crates/op-rbuilder/src/args/mod.rs +++ b/crates/op-rbuilder/src/args/mod.rs @@ -3,7 +3,7 @@ use crate::{ metrics::{CARGO_PKG_VERSION, VERGEN_GIT_SHA}, }; use clap_builder::{CommandFactory, FromArgMatches}; -pub use op::OpRbuilderArgs; +pub use op::{FlashblocksArgs, OpRbuilderArgs}; use playground::PlaygroundOptions; use reth_optimism_cli::{chainspec::OpChainSpecParser, commands::Commands}; diff --git a/crates/op-rbuilder/src/tests/flashblocks/smoke.rs b/crates/op-rbuilder/src/tests/flashblocks/smoke.rs index f48a0f730..4b3b5b062 100644 --- a/crates/op-rbuilder/src/tests/flashblocks/smoke.rs +++ b/crates/op-rbuilder/src/tests/flashblocks/smoke.rs @@ -6,26 +6,40 @@ use tokio::task::JoinHandle; use tokio_tungstenite::{connect_async, tungstenite::Message}; use tokio_util::sync::CancellationToken; -use crate::tests::TestHarnessBuilder; +use crate::{ + args::{FlashblocksArgs, OpRbuilderArgs}, + builders::FlashblocksBuilder, + tests::{ChainDriverExt, LocalInstance, TestHarnessBuilder, TransactionBuilderExt}, + tx_signer::Signer, +}; #[tokio::test] async fn chain_produces_blocks() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("flashbots_chain_produces_blocks") - .with_flashblocks_port(1239) - .with_chain_block_time(2000) - .with_flashbots_block_time(200) - .build() - .await?; + let rbuilder = LocalInstance::new::(OpRbuilderArgs { + chain_block_time: 2000, + flashblocks: FlashblocksArgs { + enabled: true, + flashblocks_port: 1239, + flashblocks_addr: "127.0.0.1".into(), + flashblocks_block_time: 200, + }, + ..Default::default() + }) + .await?; + + let driver = rbuilder.driver().await?; + driver.fund_default_accounts().await?; // Create a struct to hold received messages let received_messages = Arc::new(Mutex::new(Vec::new())); let messages_clone = received_messages.clone(); let cancellation_token = CancellationToken::new(); + let flashblocks_ws_url = rbuilder.flashblocks_ws_url(); // Spawn WebSocket listener task let cancellation_token_clone = cancellation_token.clone(); let ws_handle: JoinHandle> = tokio::spawn(async move { - let (ws_stream, _) = connect_async("ws://localhost:1239").await?; + let (ws_stream, _) = connect_async(flashblocks_ws_url).await?; let (_, mut read) = ws_stream.split(); loop { @@ -40,22 +54,25 @@ async fn chain_produces_blocks() -> eyre::Result<()> { } }); - let mut generator = harness.block_generator().await?; - for _ in 0..10 { for _ in 0..5 { // send a valid transaction - let _ = harness.send_valid_transaction().await?; + let _ = driver + .create_transaction() + .random_valid_transfer() + .send() + .await?; } - let generated_block = generator.generate_block().await?; - assert_eq!(generated_block.num_transactions(), 8); // 5 normal txn + deposit + 2 builder txn + let block = driver.build_new_block().await?; + assert_eq!(block.transactions.len(), 8); // 5 normal txn + deposit + 2 builder txn tokio::time::sleep(std::time::Duration::from_secs(1)).await; } cancellation_token.cancel(); assert!(ws_handle.await.is_ok(), "WebSocket listener task failed"); + assert!( !received_messages .lock() diff --git a/crates/op-rbuilder/src/tests/mod.rs b/crates/op-rbuilder/src/tests/mod.rs index a8a4cd1d9..e3f96187c 100644 --- a/crates/op-rbuilder/src/tests/mod.rs +++ b/crates/op-rbuilder/src/tests/mod.rs @@ -2,5 +2,5 @@ mod framework; pub use framework::*; -//mod flashblocks; +mod flashblocks; mod vanilla; diff --git a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs index cf640bab9..03fb17f14 100644 --- a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs +++ b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs @@ -1,33 +1,32 @@ -use crate::tests::framework::TestHarnessBuilder; +use crate::tests::{ + framework::TestHarnessBuilder, BlockTransactionsExt, LocalInstance, TransactionBuilderExt, +}; use alloy_provider::Provider; /// This test ensures that the transaction size limit is respected. /// We will set limit to 1 byte and see that the builder will not include any transactions. #[tokio::test] -async fn data_availability_tx_size_limit() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("data_availability_tx_size_limit") - .with_namespaces("admin,eth,miner") - .build() - .await?; - - let mut generator = harness.block_generator().await?; +async fn tx_size_limit() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; // Set (max_tx_da_size, max_block_da_size), with this case block won't fit any transaction - let call = harness - .provider()? + let call = driver + .provider() .raw_request::<(i32, i32), bool>("miner_setMaxDASize".into(), (1, 0)) .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - let unfit_tx = harness + let unfit_tx = driver .create_transaction() .with_max_priority_fee_per_gas(50) .send() .await?; - let block = generator.generate_block().await?; + let block = driver.build_new_block().await?; + // tx should not be included because we set the tx_size_limit to 1 assert!( - block.not_includes(*unfit_tx.tx_hash()), + !block.includes(unfit_tx.tx_hash()), "transaction should not be included in the block" ); @@ -37,26 +36,26 @@ async fn data_availability_tx_size_limit() -> eyre::Result<()> { /// This test ensures that the block size limit is respected. /// We will set limit to 1 byte and see that the builder will not include any transactions. #[tokio::test] -async fn data_availability_block_size_limit() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("data_availability_block_size_limit") - .with_namespaces("admin,eth,miner") - .build() - .await?; - - let mut generator = harness.block_generator().await?; +async fn block_size_limit() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; // Set block da size to be small, so it won't include tx - let call = harness - .provider()? + let call = driver + .provider() .raw_request::<(i32, i32), bool>("miner_setMaxDASize".into(), (0, 1)) .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - let unfit_tx = harness.send_valid_transaction().await?; - let block = generator.generate_block().await?; + let unfit_tx = driver + .create_transaction() + .random_valid_transfer() + .send() + .await?; + let block = driver.build_new_block().await?; // tx should not be included because we set the tx_size_limit to 1 assert!( - block.not_includes(*unfit_tx.tx_hash()), + !block.includes(unfit_tx.tx_hash()), "transaction should not be included in the block" ); @@ -68,45 +67,41 @@ async fn data_availability_block_size_limit() -> eyre::Result<()> { /// We will set limit to 3 txs and see that the builder will include 3 transactions. /// We should not forget about builder transaction so we will spawn only 2 regular txs. #[tokio::test] -async fn data_availability_block_fill() -> eyre::Result<()> { - let harness = TestHarnessBuilder::new("data_availability_block_fill") - .with_namespaces("admin,eth,miner") - .build() - .await?; - - let mut generator = harness.block_generator().await?; +async fn block_fill() -> eyre::Result<()> { + let rbuilder = LocalInstance::standard().await?; + let driver = rbuilder.driver().await?; // Set block big enough so it could fit 3 transactions without tx size limit - let call = harness - .provider()? + let call = driver + .provider() .raw_request::<(i32, i32), bool>("miner_setMaxDASize".into(), (0, 100 * 3)) .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); // We already have 2 so we will spawn one more to check that it won't be included (it won't fit // because of builder tx) - let fit_tx_1 = harness + let fit_tx_1 = driver .create_transaction() .with_max_priority_fee_per_gas(50) .send() .await?; - let fit_tx_2 = harness + let fit_tx_2 = driver .create_transaction() .with_max_priority_fee_per_gas(50) .send() .await?; - let unfit_tx_3 = harness.create_transaction().send().await?; + let unfit_tx_3 = driver.create_transaction().send().await?; - let block = generator.generate_block().await?; + let block = driver.build_new_block().await?; // Now the first 2 txs will fit into the block - assert!(block.includes(*fit_tx_1.tx_hash()), "tx should be in block"); - assert!(block.includes(*fit_tx_2.tx_hash()), "tx should be in block"); + assert!(block.includes(fit_tx_1.tx_hash()), "tx should be in block"); + assert!(block.includes(fit_tx_2.tx_hash()), "tx should be in block"); assert!( - block.not_includes(*unfit_tx_3.tx_hash()), + !block.includes(unfit_tx_3.tx_hash()), "unfit tx should not be in block" ); assert!( - harness.latest_block().await.transactions.len() == 4, + driver.latest_full().await?.transactions.len() == 4, "builder + deposit + 2 valid txs should be in the block" ); diff --git a/crates/op-rbuilder/src/tests/vanilla/mod.rs b/crates/op-rbuilder/src/tests/vanilla/mod.rs index 12f52ca7d..976a3dc33 100644 --- a/crates/op-rbuilder/src/tests/vanilla/mod.rs +++ b/crates/op-rbuilder/src/tests/vanilla/mod.rs @@ -1,6 +1,6 @@ #![cfg(test)] -//mod data_availability; +mod data_availability; mod ordering; mod revert; mod smoke; From 0833fed5d3c1f6fee72da0a7fa9a5db9425d364b Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 14:14:59 +0000 Subject: [PATCH 06/23] ready --- .github/workflows/checks.yaml | 8 +- crates/op-rbuilder/Cargo.toml | 3 +- crates/op-rbuilder/src/bin/tester/main.rs | 73 ++-- .../src/tests/flashblocks/smoke.rs | 3 +- .../op-rbuilder/src/tests/framework/driver.rs | 42 ++- .../src/tests/framework/external.rs | 55 ++- .../src/tests/framework/harness.rs | 340 ------------------ .../src/tests/framework/instance.rs | 16 +- crates/op-rbuilder/src/tests/framework/mod.rs | 6 - crates/op-rbuilder/src/tests/framework/op.rs | 325 ----------------- .../src/tests/framework/service.rs | 111 ------ crates/op-rbuilder/src/tests/framework/txs.rs | 2 +- .../op-rbuilder/src/tests/framework/utils.rs | 26 +- .../src/tests/vanilla/data_availability.rs | 4 +- crates/op-rbuilder/src/tests/vanilla/mod.rs | 2 - .../op-rbuilder/src/tests/vanilla/ordering.rs | 5 +- .../op-rbuilder/src/tests/vanilla/revert.rs | 6 +- crates/op-rbuilder/src/tests/vanilla/smoke.rs | 7 +- .../op-rbuilder/src/tests/vanilla/txpool.rs | 7 +- 19 files changed, 119 insertions(+), 922 deletions(-) delete mode 100644 crates/op-rbuilder/src/tests/framework/harness.rs delete mode 100644 crates/op-rbuilder/src/tests/framework/op.rs delete mode 100644 crates/op-rbuilder/src/tests/framework/service.rs diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 5f4d1970c..0be051938 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -23,6 +23,7 @@ jobs: - nightly features: - "" + steps: - name: Checkout sources uses: actions/checkout@v4 @@ -66,13 +67,10 @@ jobs: - name: Compile op-rbuilder run: cargo build -p op-rbuilder --bin op-rbuilder - - name: Download op-reth - run: | - ./scripts/ci/download-op-reth.sh - echo "$(pwd)" >> $GITHUB_PATH - - name: Lint run: make lint - name: Test run: make test + env: + TESTS_TEMP_DIR: ${{ github.workspace }} diff --git a/crates/op-rbuilder/Cargo.toml b/crates/op-rbuilder/Cargo.toml index 213c085bb..7a221b52e 100644 --- a/crates/op-rbuilder/Cargo.toml +++ b/crates/op-rbuilder/Cargo.toml @@ -114,6 +114,7 @@ nanoid = { version = "0.4", optional = true } reth-ipc = { workspace = true, optional = true } bollard = { version = "0.19", optional = true } tar = { version = "0.4", optional = true } +ctor = { version = "0.4.2", optional = true } [target.'cfg(unix)'.dependencies] tikv-jemallocator = { version = "0.6", optional = true } @@ -133,7 +134,6 @@ reth-ipc = { workspace = true } reth-node-builder = { workspace = true, features = ["test-utils"] } bollard = "0.19" ctor = "0.4.2" -tar = "0.4" [features] default = ["jemalloc"] @@ -163,6 +163,7 @@ testing = [ "reth-ipc", "reth-node-builder/test-utils", "bollard", + "ctor", ] diff --git a/crates/op-rbuilder/src/bin/tester/main.rs b/crates/op-rbuilder/src/bin/tester/main.rs index 10768caa9..b011beb6a 100644 --- a/crates/op-rbuilder/src/bin/tester/main.rs +++ b/crates/op-rbuilder/src/bin/tester/main.rs @@ -1,5 +1,7 @@ use alloy_primitives::Address; +use alloy_provider::{Identity, ProviderBuilder}; use clap::Parser; +use op_alloy_network::Optimism; use op_rbuilder::tests::*; /// CLI Commands @@ -49,69 +51,38 @@ async fn main() -> eyre::Result<()> { match cli.command { Commands::Genesis { output } => generate_genesis(output).await, - Commands::Run { - validation, - no_tx_pool, - block_time_secs, - flashblocks_endpoint, - no_sleep, - } => { - run_system( - validation, - no_tx_pool, - block_time_secs, - flashblocks_endpoint, - no_sleep, - ) - .await - } + Commands::Run { validation, .. } => run_system(validation).await, Commands::Deposit { address, amount } => { - let engine_api = EngineApi::builder().build().unwrap(); - let mut generator = BlockGenerator::new(engine_api, None, false, 1, None); - - generator.init().await?; - - let block_generated = generator.deposit(address, amount).await?; - println!( - "Deposit transaction included in block: {:?}", - block_generated.block_hash() - ); + let engine_api = EngineApi::with_http("http://localhost:4444"); + let provider = ProviderBuilder::::default() + .connect_http("http://localhost:2222".try_into()?); + let driver = ChainDriver::::remote(provider, engine_api); + let block_hash = driver.fund(address, amount).await?; + println!("Deposit transaction included in block: {block_hash}"); Ok(()) } } } #[allow(dead_code)] -pub async fn run_system( - validation: bool, - no_tx_pool: bool, - block_time_secs: u64, - flashblocks_endpoint: Option, - no_sleep: bool, -) -> eyre::Result<()> { - println!("Validation: {validation}"); +pub async fn run_system(validation: bool) -> eyre::Result<()> { + println!("Validation node enabled: {validation}"); - let engine_api = EngineApi::new("http://localhost:4444").unwrap(); - let validation_api = if validation { - Some(EngineApi::new("http://localhost:5555").unwrap()) - } else { - None - }; + let engine_api = EngineApi::with_http("http://localhost:4444"); + let provider = ProviderBuilder::::default() + .connect_http("http://localhost:2222".try_into()?); + let mut driver = ChainDriver::::remote(provider, engine_api); - let mut generator = BlockGenerator::new( - engine_api, - validation_api, - no_tx_pool, - block_time_secs, - flashblocks_endpoint, - ); - - generator.init().await?; + if validation { + driver = driver + .with_validation_node(ExternalNode::reth().await?) + .await?; + } // Infinite loop generating blocks loop { println!("Generating new block..."); - let block_generated = generator.submit_payload(None, 0, no_sleep).await?; - println!("Generated block: {:?}", block_generated.block_hash()); + let block = driver.build_new_block().await?; + println!("Generated block: {:?}", block.header.hash); } } diff --git a/crates/op-rbuilder/src/tests/flashblocks/smoke.rs b/crates/op-rbuilder/src/tests/flashblocks/smoke.rs index 4b3b5b062..5a98b5d0d 100644 --- a/crates/op-rbuilder/src/tests/flashblocks/smoke.rs +++ b/crates/op-rbuilder/src/tests/flashblocks/smoke.rs @@ -9,8 +9,7 @@ use tokio_util::sync::CancellationToken; use crate::{ args::{FlashblocksArgs, OpRbuilderArgs}, builders::FlashblocksBuilder, - tests::{ChainDriverExt, LocalInstance, TestHarnessBuilder, TransactionBuilderExt}, - tx_signer::Signer, + tests::{ChainDriverExt, LocalInstance, TransactionBuilderExt}, }; #[tokio::test] diff --git a/crates/op-rbuilder/src/tests/framework/driver.rs b/crates/op-rbuilder/src/tests/framework/driver.rs index a5e2591a3..493213665 100644 --- a/crates/op-rbuilder/src/tests/framework/driver.rs +++ b/crates/op-rbuilder/src/tests/framework/driver.rs @@ -13,15 +13,19 @@ use reth_optimism_node::OpPayloadAttributes; use rollup_boost::OpExecutionPayloadEnvelope; use super::{EngineApi, Ipc, LocalInstance, TransactionBuilder}; -use crate::{args::OpRbuilderArgs, tests::ExternalNode, tx_signer::Signer}; +use crate::{ + args::OpRbuilderArgs, + tests::{ExternalNode, Protocol}, + tx_signer::Signer, +}; const DEFAULT_GAS_LIMIT: u64 = 10_000_000; /// The ChainDriver is a type that allows driving the op builder node to build new blocks manually /// by calling the `build_new_block` method. It uses the Engine API to interact with the node /// and the provider to fetch blocks and transactions. -pub struct ChainDriver { - engine_api: EngineApi, +pub struct ChainDriver { + engine_api: EngineApi, provider: RootProvider, signer: Option, gas_limit: Option, @@ -30,11 +34,13 @@ pub struct ChainDriver { } // instantiation and configuration -impl ChainDriver { +impl ChainDriver { const MIN_BLOCK_TIME: Duration = Duration::from_secs(1); - pub async fn new(instance: &LocalInstance) -> eyre::Result { - Ok(Self { + /// Creates a new ChainDriver instance for a local instance of RBuilder running in-process + /// communicating over IPC. + pub async fn local(instance: &LocalInstance) -> eyre::Result> { + Ok(ChainDriver:: { engine_api: instance.engine_api(), provider: instance.provider().await?, signer: Default::default(), @@ -44,6 +50,21 @@ impl ChainDriver { }) } + /// Creates a new ChainDriver for some EL node instance. + pub fn remote( + provider: RootProvider, + engine_api: EngineApi, + ) -> ChainDriver { + ChainDriver { + engine_api, + provider, + signer: Default::default(), + gas_limit: None, + args: OpRbuilderArgs::default(), + validation_nodes: vec![], + } + } + /// Specifies the block builder signing key used to sign builder transactions. /// If not specified, a random signer will be used. pub fn with_signer(mut self, signer: Signer) -> Self { @@ -73,7 +94,7 @@ impl ChainDriver { } // public test api -impl ChainDriver { +impl ChainDriver { /// Builds a new block using the current state of the chain and the transactions in the pool. pub async fn build_new_block(&self) -> eyre::Result> { self.build_new_block_with_txs(vec![]).await @@ -145,7 +166,10 @@ impl ChainDriver { .ok_or_else(|| eyre::eyre!("Forkchoice update did not return a payload ID"))?; // wait for the block to be built for the specified chain block time - tokio::time::sleep(Duration::from_millis(self.args.chain_block_time)).await; + tokio::time::sleep( + Duration::from_millis(self.args.chain_block_time).max(Self::MIN_BLOCK_TIME), + ) + .await; let payload = OpExecutionPayloadEnvelope::V4(self.engine_api.get_payload(payload_id).await?); @@ -241,7 +265,7 @@ impl ChainDriver { } // internal methods -impl ChainDriver { +impl ChainDriver { async fn fcu(&self, attribs: OpPayloadAttributes) -> eyre::Result { let latest = self.latest().await?.header.hash; let response = self diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index f7fe7ad13..3b8a3756d 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -1,8 +1,6 @@ use alloy_consensus::constants::EMPTY_WITHDRAWALS; -use alloy_eips::Encodable2718; -use alloy_eips::{eip7685::Requests, BlockNumberOrTag}; -use alloy_primitives::private::alloy_rlp::Encodable; -use alloy_primitives::{keccak256, B256, U256}; +use alloy_eips::{eip7685::Requests, BlockNumberOrTag, Encodable2718}; +use alloy_primitives::{keccak256, private::alloy_rlp::Encodable, B256, U256}; use alloy_provider::{Identity, Provider, ProviderBuilder, RootProvider}; use alloy_rpc_types_engine::{ ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PayloadStatusEnum, @@ -50,7 +48,11 @@ impl ExternalNode { pub async fn reth_version(version_tag: &str) -> eyre::Result { let docker = Docker::connect_with_local_defaults()?; - let tempdir = std::env::temp_dir().join(format!("reth-shared-{}", nanoid::nanoid!())); + let tempdir = std::env::var("TESTS_TEMP_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| std::env::temp_dir()); + + let tempdir = tempdir.join(format!("reth-shared-{}", nanoid::nanoid!())); let auth_ipc = tempdir.join("auth.ipc").to_string_lossy().to_string(); let rpc_ipc = tempdir.join("rpc.ipc").to_string_lossy().to_string(); @@ -281,26 +283,23 @@ impl Drop for ExternalNode { let docker = self.docker.clone(); let container_id = self.container_id.clone(); - if tokio::runtime::Handle::try_current().is_ok() { - let _ = tokio::task::spawn_blocking(move || { - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async move { - let _ = docker - .stop_container(&container_id, None::) - .await; - - let _ = docker - .remove_container( - &container_id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), - ) - .await; - }); - }); - } + tokio::spawn(async move { + docker + .stop_container(&container_id, None::) + .await + .expect("Failed to stop container"); + + docker + .remove_container( + &container_id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + .expect("Failed to remove container"); + }); } } @@ -370,7 +369,7 @@ async fn await_ipc_readiness(docker: &Docker, container: &str) -> eyre::Result<( } if message.to_lowercase().contains("error") { - return Err(eyre::eyre!("Failed to start op-reth container: {message}")); + return Err(eyre::eyre!("Failed to start op-reth container: {message}.")); } } LogOutput::StdIn { .. } | LogOutput::Console { .. } => {} @@ -394,10 +393,6 @@ trait OptimismProviderExt { async fn hash_at_height(&self, height: u64) -> eyre::Result; async fn latest_block_hash_and_number(&self) -> eyre::Result<(B256, u64)>; async fn execution_payload_for_block(&self, number: u64) -> eyre::Result; - - async fn genesis_hash(&self) -> eyre::Result { - Ok(self.hash_at_height(0).await?) - } } impl OptimismProviderExt for RootProvider { diff --git a/crates/op-rbuilder/src/tests/framework/harness.rs b/crates/op-rbuilder/src/tests/framework/harness.rs deleted file mode 100644 index b4789c527..000000000 --- a/crates/op-rbuilder/src/tests/framework/harness.rs +++ /dev/null @@ -1,340 +0,0 @@ -use super::{ - apis::EngineApi, - op::{OpRbuilderConfig, OpRethConfig}, - service::{self, Service, ServiceInstance}, - TransactionBuilder, BUILDER_PRIVATE_KEY, -}; -use alloy_eips::BlockNumberOrTag; -use alloy_network::Network; -use alloy_primitives::{hex, B256}; -use alloy_provider::{ - ext::TxPoolApi, Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider, -}; -use op_alloy_network::Optimism; -use parking_lot::Mutex; -use std::{ - collections::HashSet, net::TcpListener, path::PathBuf, sync::LazyLock, time::SystemTime, -}; -use time::{format_description, OffsetDateTime}; -use uuid::Uuid; - -pub struct TestHarnessBuilder { - name: String, - use_revert_protection: bool, - flashblocks_port: Option, - chain_block_time: Option, - flashbots_block_time: Option, - namespaces: Option, - extra_params: Option, -} - -impl TestHarnessBuilder { - pub fn new(name: &str) -> Self { - Self { - name: name.to_string(), - use_revert_protection: false, - flashblocks_port: None, - chain_block_time: None, - flashbots_block_time: None, - namespaces: None, - extra_params: None, - } - } - - pub fn with_revert_protection(mut self) -> Self { - self.use_revert_protection = true; - self - } - - pub fn with_flashblocks_port(mut self, port: u16) -> Self { - self.flashblocks_port = Some(port); - self - } - - pub fn with_chain_block_time(mut self, block_time: u64) -> Self { - self.chain_block_time = Some(block_time); - self - } - - pub fn with_flashbots_block_time(mut self, block_time: u64) -> Self { - self.flashbots_block_time = Some(block_time); - self - } - - pub fn with_namespaces(mut self, namespaces: &str) -> Self { - self.namespaces = Some(namespaces.to_string()); - self - } - - pub fn with_extra_params(mut self, extra_params: &str) -> Self { - self.extra_params = Some(extra_params.to_string()); - self - } - - pub async fn build(self) -> eyre::Result { - let mut framework = IntegrationFramework::new(&self.name).unwrap(); - - // we are going to use the fixture genesis and copy it to each test folder - let genesis = include_str!("artifacts/genesis.json.tmpl"); - - let mut genesis_path = framework.test_dir.clone(); - genesis_path.push("genesis.json"); - std::fs::write(&genesis_path, genesis)?; - - // create the builder - let builder_data_dir: PathBuf = std::env::temp_dir().join(Uuid::new_v4().to_string()); - let builder_auth_rpc_port = get_available_port(); - let builder_http_port = get_available_port(); - let mut op_rbuilder_config = OpRbuilderConfig::new() - .chain_config_path(genesis_path.clone()) - .data_dir(builder_data_dir) - .auth_rpc_port(builder_auth_rpc_port) - .network_port(get_available_port()) - .http_port(builder_http_port) - .with_builder_private_key(BUILDER_PRIVATE_KEY) - .with_revert_protection(self.use_revert_protection) - .with_namespaces(self.namespaces) - .with_extra_params(self.extra_params); - if let Some(flashblocks_port) = self.flashblocks_port { - op_rbuilder_config = op_rbuilder_config.with_flashblocks_port(flashblocks_port); - } - - if let Some(chain_block_time) = self.chain_block_time { - op_rbuilder_config = op_rbuilder_config.with_chain_block_time(chain_block_time); - } - - if let Some(flashbots_block_time) = self.flashbots_block_time { - op_rbuilder_config = op_rbuilder_config.with_flashbots_block_time(flashbots_block_time); - } - - // create the validation reth node - - let reth_data_dir = std::env::temp_dir().join(Uuid::new_v4().to_string()); - let validator_auth_rpc_port = get_available_port(); - let reth = OpRethConfig::new() - .chain_config_path(genesis_path) - .data_dir(reth_data_dir) - .auth_rpc_port(validator_auth_rpc_port) - .network_port(get_available_port()); - - framework.start("op-reth", &reth).await.unwrap(); - - let builder = framework - .start("op-rbuilder", &op_rbuilder_config) - .await - .unwrap(); - - let builder_log_path = builder.log_path.clone(); - - Ok(TestHarness { - framework, - builder_auth_rpc_port, - builder_http_port, - validator_auth_rpc_port, - builder_log_path, - chain_block_time: self.chain_block_time, - }) - } -} - -pub struct TestHarness { - framework: IntegrationFramework, - builder_auth_rpc_port: u16, - builder_http_port: u16, - validator_auth_rpc_port: u16, - builder_log_path: PathBuf, - chain_block_time: Option, -} - -impl TestHarness { - pub async fn send_valid_transaction( - &self, - ) -> eyre::Result> { - self.create_transaction().send().await - } - - pub async fn send_revert_transaction( - &self, - ) -> eyre::Result> { - self.create_transaction() - .with_input(hex!("60006000fd").into()) // PUSH1 0x00 PUSH1 0x00 REVERT - .send() - .await - } - - pub fn provider(&self) -> eyre::Result> { - let url = format!("http://localhost:{}", self.builder_http_port); - let provider = - ProviderBuilder::::default().connect_http(url.parse()?); - - Ok(provider) - } - - pub fn create_transaction(&self) -> TransactionBuilder { - TransactionBuilder::new(self.provider().expect("provider not available")) - } - - pub async fn latest_block(&self) -> ::BlockResponse { - self.provider() - .expect("provider not available") - .get_block_by_number(BlockNumberOrTag::Latest) - .full() - .await - .expect("failed to get latest block by hash") - .expect("latest block should exist") - } - - pub async fn latest_base_fee(&self) -> u128 { - self.latest_block() - .await - .header - .base_fee_per_gas - .expect("Base fee per gas not found in the latest block header") as u128 - } - - pub const fn builder_private_key() -> &'static str { - BUILDER_PRIVATE_KEY - } - - pub async fn check_tx_in_pool(&self, tx_hash: B256) -> eyre::Result { - let pool_inspect = self - .provider() - .expect("provider not available") - .txpool_content() - .await?; - - let is_pending = pool_inspect.pending.iter().any(|pending_account_map| { - pending_account_map - .1 - .iter() - .any(|(_, tx)| tx.as_recovered().hash() == *tx_hash) - }); - if is_pending { - return Ok(TransactionStatus::Pending); - } - - let is_queued = pool_inspect.queued.iter().any(|queued_account_map| { - queued_account_map - .1 - .iter() - .any(|(_, tx)| tx.as_recovered().hash() == *tx_hash) - }); - if is_queued { - return Ok(TransactionStatus::Queued); - } - - // check that the builder emitted logs for the reverted transactions with the monitoring logic - // this will tell us whether the builder dropped the transaction - // TODO: this is not ideal, lets find a different way to detect this - // Each time a transaction is dropped, it emits a log like this - // Note that this does not tell us the reason why the transaction was dropped. Ideally - // we should know it at this point. - // 'Transaction event received target="monitoring" tx_hash="" kind="discarded"' - let builder_logs = std::fs::read_to_string(&self.builder_log_path)?; - let txn_log = format!( - "Transaction event received target=\"monitoring\" tx_hash=\"{}\" kind=\"discarded\"", - tx_hash, - ); - if builder_logs.contains(txn_log.as_str()) { - return Ok(TransactionStatus::Dropped); - } - - Ok(TransactionStatus::NotFound) - } -} - -impl Drop for TestHarness { - fn drop(&mut self) { - for service in &mut self.framework.services { - let res = service.stop(); - if let Err(e) = res { - println!("Failed to stop service: {}", e); - } - } - } -} -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TransactionStatus { - NotFound, - Pending, - Queued, - Dropped, -} - -impl TransactionStatus { - pub fn is_pending(&self) -> bool { - matches!(self, TransactionStatus::Pending) - } - - pub fn is_queued(&self) -> bool { - matches!(self, TransactionStatus::Queued) - } - - pub fn is_dropped(&self) -> bool { - matches!(self, TransactionStatus::Dropped) - } -} - -pub fn get_available_port() -> u16 { - static CLAIMED_PORTS: LazyLock>> = - LazyLock::new(|| Mutex::new(HashSet::new())); - loop { - let port: u16 = rand::random_range(1000..20000); - if TcpListener::bind(("127.0.0.1", port)).is_ok() && CLAIMED_PORTS.lock().insert(port) { - return port; - } - } -} - -#[derive(Debug)] -pub enum IntegrationError { - SpawnError, - BinaryNotFound, - SetupError, - LogError, - ServiceAlreadyRunning, -} - -struct IntegrationFramework { - test_dir: PathBuf, - services: Vec, -} - -impl IntegrationFramework { - pub fn new(test_name: &str) -> Result { - let dt: OffsetDateTime = SystemTime::now().into(); - let format = format_description::parse("[year]_[month]_[day]_[hour]_[minute]_[second]") - .map_err(|_| IntegrationError::SetupError)?; - - let date_format = dt - .format(&format) - .map_err(|_| IntegrationError::SetupError)?; - - let mut test_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_dir.push("../../integration_logs"); - test_dir.push(format!("{date_format}_{test_name}")); - - std::fs::create_dir_all(&test_dir).map_err(|_| IntegrationError::SetupError)?; - - Ok(Self { - test_dir, - services: Vec::new(), - }) - } - - pub async fn start( - &mut self, - name: &str, - config: &T, - ) -> Result<&mut ServiceInstance, service::Error> { - let service = self.create_service(name)?; - service.start_with_config(config).await?; - Ok(service) - } - - pub fn create_service(&mut self, name: &str) -> Result<&mut ServiceInstance, service::Error> { - let service = ServiceInstance::new(name.to_string(), self.test_dir.clone()); - self.services.push(service); - Ok(self.services.last_mut().unwrap()) - } -} diff --git a/crates/op-rbuilder/src/tests/framework/instance.rs b/crates/op-rbuilder/src/tests/framework/instance.rs index 4966103a9..702b0ed55 100644 --- a/crates/op-rbuilder/src/tests/framework/instance.rs +++ b/crates/op-rbuilder/src/tests/framework/instance.rs @@ -1,8 +1,7 @@ -use crate::revert_protection::EthApiOverrideServer; use crate::{ args::OpRbuilderArgs, builders::{BuilderConfig, FlashblocksBuilder, PayloadBuilder, StandardBuilder}, - revert_protection::{EthApiExtServer, RevertProtectionExt}, + revert_protection::{EthApiExtServer, EthApiOverrideServer, RevertProtectionExt}, tests::{ framework::{driver::ChainDriver, BUILDER_PRIVATE_KEY}, ChainDriverExt, EngineApi, Ipc, TransactionPoolObserver, @@ -12,9 +11,9 @@ use crate::{ }; use alloy_provider::{Identity, ProviderBuilder, RootProvider}; use clap::Parser; -use core::future::Future; use core::{ any::Any, + future::Future, net::Ipv4Addr, pin::Pin, task::{Context, Poll}, @@ -36,8 +35,7 @@ use reth_optimism_node::{ node::{OpAddOnsBuilder, OpPoolBuilder}, OpNode, }; -use reth_transaction_pool::AllTransactionsEvents; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{AllTransactionsEvents, TransactionPool}; use std::sync::{Arc, LazyLock}; use tokio::sync::oneshot; @@ -178,7 +176,7 @@ impl LocalInstance { unreachable!() }; let instance = Self::new::(node_command.ext.clone()).await?; - let driver = ChainDriver::new(&instance).await?; + let driver = ChainDriver::::local(&instance).await?; driver.fund_default_accounts().await?; Ok(instance) } @@ -193,7 +191,7 @@ impl LocalInstance { node_command.ext.flashblocks.enabled = true; node_command.ext.flashblocks.flashblocks_port = 0; // use random os assigned port let instance = Self::new::(node_command.ext.clone()).await?; - let driver = ChainDriver::new(&instance).await?; + let driver = ChainDriver::::local(&instance).await?; driver.fund_default_accounts().await?; Ok(instance) } @@ -245,8 +243,8 @@ impl LocalInstance { &self.pool_observer } - pub async fn driver(&self) -> eyre::Result { - ChainDriver::new(self).await + pub async fn driver(&self) -> eyre::Result> { + ChainDriver::::local(self).await } pub async fn provider(&self) -> eyre::Result> { diff --git a/crates/op-rbuilder/src/tests/framework/mod.rs b/crates/op-rbuilder/src/tests/framework/mod.rs index cd52b1110..e8488d99b 100644 --- a/crates/op-rbuilder/src/tests/framework/mod.rs +++ b/crates/op-rbuilder/src/tests/framework/mod.rs @@ -1,20 +1,14 @@ mod apis; mod driver; mod external; -mod harness; mod instance; -mod op; -mod service; mod txs; mod utils; pub use apis::*; pub use driver::*; pub use external::*; -pub use harness::*; pub use instance::*; -pub use op::*; -pub use service::*; pub use txs::*; pub use utils::*; diff --git a/crates/op-rbuilder/src/tests/framework/op.rs b/crates/op-rbuilder/src/tests/framework/op.rs deleted file mode 100644 index f007b33d0..000000000 --- a/crates/op-rbuilder/src/tests/framework/op.rs +++ /dev/null @@ -1,325 +0,0 @@ -use std::{ - fs::File, - future::Future, - io::{ErrorKind, Read}, - path::{Path, PathBuf}, - process::Command, -}; - -use std::time::Duration; -use tokio::time::sleep; - -use super::{ - service::{self, Service}, - DEFAULT_JWT_TOKEN, -}; - -#[derive(Default, Debug)] -pub struct OpRbuilderConfig { - auth_rpc_port: Option, - jwt_secret_path: Option, - chain_config_path: Option, - data_dir: Option, - http_port: Option, - network_port: Option, - builder_private_key: Option, - flashblocks_port: Option, - chain_block_time: Option, - flashbots_block_time: Option, - with_revert_protection: Option, - namespaces: Option, - extra_params: Option, -} - -impl OpRbuilderConfig { - pub fn new() -> Self { - Self::default() - } - - pub fn auth_rpc_port(mut self, port: u16) -> Self { - self.auth_rpc_port = Some(port); - self - } - - pub fn chain_config_path>(mut self, path: P) -> Self { - self.chain_config_path = Some(path.into()); - self - } - - pub fn data_dir>(mut self, path: P) -> Self { - self.data_dir = Some(path.into()); - self - } - - pub fn network_port(mut self, port: u16) -> Self { - self.network_port = Some(port); - self - } - - pub fn http_port(mut self, port: u16) -> Self { - self.http_port = Some(port); - self - } - - pub fn with_builder_private_key(mut self, private_key: &str) -> Self { - self.builder_private_key = Some(private_key.to_string()); - self - } - - pub fn with_revert_protection(mut self, revert_protection: bool) -> Self { - self.with_revert_protection = Some(revert_protection); - self - } - - pub fn with_flashblocks_port(mut self, port: u16) -> Self { - self.flashblocks_port = Some(port); - self - } - - pub fn with_chain_block_time(mut self, time: u64) -> Self { - self.chain_block_time = Some(time); - self - } - - pub fn with_flashbots_block_time(mut self, time: u64) -> Self { - self.flashbots_block_time = Some(time); - self - } - - pub fn with_namespaces(mut self, namespaces: Option) -> Self { - self.namespaces = namespaces; - self - } - - pub fn with_extra_params(mut self, extra_params: Option) -> Self { - self.extra_params = extra_params; - self - } -} - -impl Service for OpRbuilderConfig { - fn command(&self) -> Command { - let mut bin_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - bin_path.push("../../target/debug/op-rbuilder"); - - let mut cmd = Command::new(bin_path); - let jwt_path = get_or_create_jwt_path(self.jwt_secret_path.as_ref()); - - cmd.arg("node") - .arg("--authrpc.port") - .arg( - self.auth_rpc_port - .expect("auth_rpc_port not set") - .to_string(), - ) - .arg("--authrpc.jwtsecret") - .arg( - jwt_path - .to_str() - .expect("Failed to convert jwt_path to string"), - ) - .arg("--chain") - .arg( - self.chain_config_path - .as_ref() - .expect("chain_config_path not set"), - ) - .arg("--datadir") - .arg(self.data_dir.as_ref().expect("data_dir not set")) - .arg("--disable-discovery") - .arg("--color") - .arg("never") - .arg("--builder.log-pool-transactions") - .arg("--port") - .arg(self.network_port.expect("network_port not set").to_string()) - .arg("--ipcdisable") - .arg("-vvvv"); - - if let Some(revert_protection) = self.with_revert_protection { - if revert_protection { - cmd.arg("--builder.enable-revert-protection"); - } - } - - if let Some(builder_private_key) = &self.builder_private_key { - cmd.arg("--rollup.builder-secret-key") - .arg(builder_private_key); - } - - if let Some(http_port) = self.http_port { - cmd.arg("--http") - .arg("--http.port") - .arg(http_port.to_string()); - } - - if let Some(flashblocks_port) = &self.flashblocks_port { - cmd.arg("--flashblocks.enabled"); - cmd.arg("--flashblocks.addr").arg("127.0.0.1"); - cmd.arg("--flashblocks.port") - .arg(flashblocks_port.to_string()); - } - - if let Some(chain_block_time) = self.chain_block_time { - cmd.arg("--rollup.chain-block-time") - .arg(chain_block_time.to_string()); - } - - if let Some(flashbots_block_time) = self.flashbots_block_time { - cmd.arg("--flashblocks.block-time") - .arg(flashbots_block_time.to_string()); - } - - if let Some(namespaces) = &self.namespaces { - cmd.arg("--http.api").arg(namespaces); - } - - if let Some(extra_params) = &self.extra_params { - cmd.args(extra_params.split_ascii_whitespace()); - } - - cmd - } - - #[allow(clippy::manual_async_fn)] - fn ready(&self, log_path: &Path) -> impl Future> + Send { - async move { - poll_logs( - log_path, - "Starting consensus engine", - Duration::from_millis(100), - Duration::from_secs(60), - ) - .await - } - } -} - -#[derive(Default, Debug)] -pub struct OpRethConfig { - auth_rpc_port: Option, - jwt_secret_path: Option, - chain_config_path: Option, - data_dir: Option, - http_port: Option, - network_port: Option, -} - -impl OpRethConfig { - pub fn new() -> Self { - Self::default() - } - - pub fn auth_rpc_port(mut self, port: u16) -> Self { - self.auth_rpc_port = Some(port); - self - } - - pub fn chain_config_path>(mut self, path: P) -> Self { - self.chain_config_path = Some(path.into()); - self - } - - pub fn data_dir>(mut self, path: P) -> Self { - self.data_dir = Some(path.into()); - self - } - - pub fn network_port(mut self, port: u16) -> Self { - self.network_port = Some(port); - self - } -} - -impl Service for OpRethConfig { - fn command(&self) -> Command { - let bin_path = PathBuf::from("op-reth"); - - let mut cmd = Command::new(bin_path); - let jwt_path = get_or_create_jwt_path(self.jwt_secret_path.as_ref()); - - cmd.arg("node") - .arg("--authrpc.port") - .arg( - self.auth_rpc_port - .expect("auth_rpc_port not set") - .to_string(), - ) - .arg("--authrpc.jwtsecret") - .arg( - jwt_path - .to_str() - .expect("Failed to convert jwt_path to string"), - ) - .arg("--chain") - .arg( - self.chain_config_path - .as_ref() - .expect("chain_config_path not set"), - ) - .arg("--datadir") - .arg(self.data_dir.as_ref().expect("data_dir not set")) - .arg("--disable-discovery") - .arg("--color") - .arg("never") - .arg("--port") - .arg(self.network_port.expect("network_port not set").to_string()) - .arg("--ipcdisable"); - - if let Some(http_port) = self.http_port { - cmd.arg("--http") - .arg("--http.port") - .arg(http_port.to_string()); - } - - cmd - } - - #[allow(clippy::manual_async_fn)] - fn ready(&self, log_path: &Path) -> impl Future> + Send { - async move { - poll_logs( - log_path, - "Starting consensus engine", - Duration::from_millis(100), - Duration::from_secs(60), - ) - .await - } - } -} - -fn get_or_create_jwt_path(jwt_path: Option<&PathBuf>) -> PathBuf { - jwt_path.cloned().unwrap_or_else(|| { - let tmp_dir = std::env::temp_dir(); - let jwt_path = tmp_dir.join("jwt.hex"); - std::fs::write(&jwt_path, DEFAULT_JWT_TOKEN).expect("Failed to write JWT secret file"); - jwt_path - }) -} - -/// Helper function to poll logs periodically -pub async fn poll_logs( - log_path: &Path, - pattern: &str, - interval: Duration, - timeout: Duration, -) -> Result<(), service::Error> { - let start = std::time::Instant::now(); - - loop { - if start.elapsed() > timeout { - return Err(service::Error::Spawn(ErrorKind::TimedOut)); - } - - let mut file = File::open(log_path).map_err(|_| service::Error::Logs)?; - let mut contents = String::new(); - file.read_to_string(&mut contents) - .map_err(|_| service::Error::Logs)?; - - if contents.contains(pattern) { - return Ok(()); - } - - sleep(interval).await; - } -} diff --git a/crates/op-rbuilder/src/tests/framework/service.rs b/crates/op-rbuilder/src/tests/framework/service.rs deleted file mode 100644 index 6bc587c4c..000000000 --- a/crates/op-rbuilder/src/tests/framework/service.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::{ - fs::{File, OpenOptions}, - future::Future, - io::{ErrorKind, Read}, - path::{Path, PathBuf}, - process::{Child, Command}, -}; -use thiserror::Error; - -#[derive(Debug, Error)] -pub enum Error { - #[error("Binary not found")] - BinaryNotFound, - - #[error("Failed to spawn process")] - Spawn(ErrorKind), - - #[error("Failed initialize log streams")] - Logs, - - #[error("Service is already running")] - ServiceAlreadyRunning, -} - -pub struct ServiceInstance { - process: Option, - pub log_path: PathBuf, -} - -impl ServiceInstance { - pub fn new(name: String, test_dir: PathBuf) -> Self { - let log_path = test_dir.join(format!("{name}.log")); - Self { - process: None, - log_path, - } - } - - pub fn start(&mut self, command: Command) -> Result<(), Error> { - if self.process.is_some() { - return Err(Error::ServiceAlreadyRunning); - } - - let log = open_log_file(&self.log_path)?; - let stdout = log.try_clone().map_err(|_| Error::Logs)?; - let stderr = log.try_clone().map_err(|_| Error::Logs)?; - - let mut cmd = command; - cmd.stdout(stdout).stderr(stderr); - - let child = match cmd.spawn() { - Ok(child) => Ok(child), - Err(e) => match e.kind() { - ErrorKind::NotFound => Err(Error::BinaryNotFound), - e => Err(Error::Spawn(e)), - }, - }?; - - self.process = Some(child); - Ok(()) - } - - pub fn stop(&mut self) -> Result<(), Error> { - if let Some(mut process) = self.process.take() { - return process.kill().map_err(|e| Error::Spawn(e.kind())); - } - Ok(()) - } - - /// Start a service using its configuration and wait for it to be ready - pub async fn start_with_config(&mut self, config: &T) -> Result<(), Error> { - self.start(config.command())?; - config.ready(&self.log_path).await?; - Ok(()) - } - - pub async fn find_log_line(&self, pattern: &str) -> eyre::Result<()> { - let mut file = - File::open(&self.log_path).map_err(|_| eyre::eyre!("Failed to open log file"))?; - let mut contents = String::new(); - file.read_to_string(&mut contents) - .map_err(|_| eyre::eyre!("Failed to read log file"))?; - - if contents.contains(pattern) { - Ok(()) - } else { - Err(eyre::eyre!("Pattern not found in log file: {}", pattern)) - } - } -} - -pub struct IntegrationFramework; - -pub trait Service { - /// Configure and return the command to run the service - fn command(&self) -> Command; - - /// Return a future that resolves when the service is ready - fn ready(&self, log_path: &Path) -> impl Future> + Send; -} - -fn open_log_file(path: &PathBuf) -> Result { - let prefix = path.parent().unwrap(); - std::fs::create_dir_all(prefix).map_err(|_| Error::Logs)?; - - OpenOptions::new() - .append(true) - .create(true) - .open(path) - .map_err(|_| Error::Logs) -} diff --git a/crates/op-rbuilder/src/tests/framework/txs.rs b/crates/op-rbuilder/src/tests/framework/txs.rs index 51fb57cf5..95a869fa0 100644 --- a/crates/op-rbuilder/src/tests/framework/txs.rs +++ b/crates/op-rbuilder/src/tests/framework/txs.rs @@ -188,7 +188,7 @@ impl TransactionBuilder { let bundle = Bundle { transactions: vec![transaction_encoded.into()], reverting_hashes: if with_reverted_hash { - Some(vec![txn_hash.into()]) + Some(vec![txn_hash]) } else { None }, diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs index 39434cab1..b9efbb0c8 100644 --- a/crates/op-rbuilder/src/tests/framework/utils.rs +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -1,12 +1,12 @@ use alloy_eips::Encodable2718; -use alloy_primitives::{hex, Address, TxKind, B256, U256}; +use alloy_primitives::{hex, Address, BlockHash, TxKind, B256, U256}; use alloy_rpc_types_eth::{Block, BlockTransactionHashes}; use core::future::Future; use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; use op_alloy_rpc_types::Transaction; use crate::{ - tests::{framework::driver::ChainDriver, ONE_ETH}, + tests::{framework::driver::ChainDriver, Protocol, ONE_ETH}, tx_signer::Signer, }; @@ -33,8 +33,9 @@ pub trait ChainDriverExt { &self, addresses: Vec
, amount: u128, - ) -> impl Future>; - fn fund(&self, address: Address, amount: u128) -> impl Future>; + ) -> impl Future>; + fn fund(&self, address: Address, amount: u128) + -> impl Future>; fn first_funded_address(&self) -> Address { FUNDED_PRIVATE_KEYS[0] .parse() @@ -55,7 +56,7 @@ pub trait ChainDriverExt { } } -impl ChainDriverExt for ChainDriver { +impl ChainDriverExt for ChainDriver

{ async fn fund_default_accounts(&self) -> eyre::Result<()> { for key in FUNDED_PRIVATE_KEYS { let signer: Signer = key.parse()?; @@ -64,7 +65,7 @@ impl ChainDriverExt for ChainDriver { Ok(()) } - async fn fund_many(&self, addresses: Vec

, amount: u128) -> eyre::Result<()> { + async fn fund_many(&self, addresses: Vec
, amount: u128) -> eyre::Result { let mut txs = Vec::with_capacity(addresses.len()); for address in addresses { @@ -85,11 +86,10 @@ impl ChainDriverExt for ChainDriver { txs.push(signed_tx_rlp.into()); } - self.build_new_block_with_txs(txs).await?; - Ok(()) + Ok(self.build_new_block_with_txs(txs).await?.header.hash) } - async fn fund(&self, address: Address, amount: u128) -> eyre::Result<()> { + async fn fund(&self, address: Address, amount: u128) -> eyre::Result { let deposit = TxDeposit { source_hash: B256::default(), from: address, // Set the sender to the address of the account to seed @@ -104,9 +104,11 @@ impl ChainDriverExt for ChainDriver { let signer = Signer::random(); let signed_tx = signer.sign_tx(OpTypedTransaction::Deposit(deposit))?; let signed_tx_rlp = signed_tx.encoded_2718(); - self.build_new_block_with_txs(vec![signed_tx_rlp.into()]) - .await?; - Ok(()) + Ok(self + .build_new_block_with_txs(vec![signed_tx_rlp.into()]) + .await? + .header + .hash) } } diff --git a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs index 03fb17f14..eb8fe1dd8 100644 --- a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs +++ b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs @@ -1,6 +1,4 @@ -use crate::tests::{ - framework::TestHarnessBuilder, BlockTransactionsExt, LocalInstance, TransactionBuilderExt, -}; +use crate::tests::{BlockTransactionsExt, LocalInstance, TransactionBuilderExt}; use alloy_provider::Provider; /// This test ensures that the transaction size limit is respected. diff --git a/crates/op-rbuilder/src/tests/vanilla/mod.rs b/crates/op-rbuilder/src/tests/vanilla/mod.rs index 976a3dc33..5eb1364e1 100644 --- a/crates/op-rbuilder/src/tests/vanilla/mod.rs +++ b/crates/op-rbuilder/src/tests/vanilla/mod.rs @@ -5,5 +5,3 @@ mod ordering; mod revert; mod smoke; mod txpool; - -use super::*; diff --git a/crates/op-rbuilder/src/tests/vanilla/ordering.rs b/crates/op-rbuilder/src/tests/vanilla/ordering.rs index b2a15c342..74fe89677 100644 --- a/crates/op-rbuilder/src/tests/vanilla/ordering.rs +++ b/crates/op-rbuilder/src/tests/vanilla/ordering.rs @@ -1,7 +1,4 @@ -use crate::tests::{ - framework::{TestHarnessBuilder, ONE_ETH}, - ChainDriverExt, LocalInstance, -}; +use crate::tests::{framework::ONE_ETH, ChainDriverExt, LocalInstance}; use alloy_consensus::Transaction; use futures::{future::join_all, stream, StreamExt}; diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 49375608a..17dccf5a3 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -6,8 +6,8 @@ use crate::{ builders::StandardBuilder, primitives::bundle::MAX_BLOCK_RANGE_BLOCKS, tests::{ - BlockTransactionsExt, BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, TestHarness, - TestHarnessBuilder, TransactionBuilderExt, ONE_ETH, + BlockTransactionsExt, BundleOpts, ChainDriver, ChainDriverExt, LocalInstance, + TransactionBuilderExt, ONE_ETH, }, }; @@ -22,7 +22,7 @@ async fn monitor_transaction_gc() -> eyre::Result<()> { }) .await?; - let driver = ChainDriver::new(&rbuilder).await?; + let driver = rbuilder.driver().await?; let accounts = driver.fund_accounts(10, ONE_ETH).await?; let latest_block_number = driver.latest().await?.header.number; diff --git a/crates/op-rbuilder/src/tests/vanilla/smoke.rs b/crates/op-rbuilder/src/tests/vanilla/smoke.rs index 157852696..178b99c03 100644 --- a/crates/op-rbuilder/src/tests/vanilla/smoke.rs +++ b/crates/op-rbuilder/src/tests/vanilla/smoke.rs @@ -1,4 +1,4 @@ -use crate::tests::{ChainDriver, ExternalNode, LocalInstance, TransactionBuilderExt}; +use crate::tests::{ExternalNode, LocalInstance, TransactionBuilderExt}; use alloy_primitives::TxHash; use core::{ sync::atomic::{AtomicBool, Ordering}, @@ -16,7 +16,8 @@ use tracing::info; #[tokio::test] async fn chain_produces_blocks() -> eyre::Result<()> { let rbuilder = LocalInstance::standard().await?; - let driver = ChainDriver::new(&rbuilder) + let driver = rbuilder + .driver() .await? .with_validation_node(ExternalNode::reth().await?) .await?; @@ -80,7 +81,7 @@ async fn chain_produces_blocks() -> eyre::Result<()> { #[tokio::test(flavor = "multi_thread")] async fn produces_blocks_under_load_within_deadline() -> eyre::Result<()> { let rbuilder = LocalInstance::standard().await?; - let driver = ChainDriver::new(&rbuilder).await?.with_gas_limit(10_00_000); + let driver = rbuilder.driver().await?.with_gas_limit(10_00_000); let done = AtomicBool::new(false); diff --git a/crates/op-rbuilder/src/tests/vanilla/txpool.rs b/crates/op-rbuilder/src/tests/vanilla/txpool.rs index edabf8bf2..09eb0f22a 100644 --- a/crates/op-rbuilder/src/tests/vanilla/txpool.rs +++ b/crates/op-rbuilder/src/tests/vanilla/txpool.rs @@ -1,9 +1,6 @@ use crate::{ builders::StandardBuilder, - tests::{ - default_node_config, BlockTransactionsExt, ChainDriver, ChainDriverExt, LocalInstance, - ONE_ETH, - }, + tests::{default_node_config, BlockTransactionsExt, ChainDriverExt, LocalInstance, ONE_ETH}, }; use reth::args::TxPoolArgs; use reth_node_builder::NodeConfig; @@ -24,7 +21,7 @@ async fn pending_pool_limit() -> eyre::Result<()> { ) .await?; - let driver = ChainDriver::new(&rbuilder).await?; + let driver = rbuilder.driver().await?; let accounts = driver.fund_accounts(50, ONE_ETH).await?; // Send 50 txs from different addrs From 213b7fff47ad6668b12eb5fefdc985552b51824a Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 14:40:30 +0000 Subject: [PATCH 07/23] lint --- crates/op-rbuilder/src/tests/framework/instance.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/instance.rs b/crates/op-rbuilder/src/tests/framework/instance.rs index 0e711855f..7c482c939 100644 --- a/crates/op-rbuilder/src/tests/framework/instance.rs +++ b/crates/op-rbuilder/src/tests/framework/instance.rs @@ -1,8 +1,14 @@ use crate::{ - args::OpRbuilderArgs, builders::{BuilderConfig, FlashblocksBuilder, PayloadBuilder, StandardBuilder}, primitives::reth::engine_api_builder::OpEngineApiBuilder, revert_protection::{EthApiExtServer, EthApiOverrideServer, RevertProtectionExt}, tests::{ + args::OpRbuilderArgs, + builders::{BuilderConfig, FlashblocksBuilder, PayloadBuilder, StandardBuilder}, + primitives::reth::engine_api_builder::OpEngineApiBuilder, + revert_protection::{EthApiExtServer, EthApiOverrideServer, RevertProtectionExt}, + tests::{ framework::{driver::ChainDriver, BUILDER_PRIVATE_KEY}, ChainDriverExt, EngineApi, Ipc, TransactionPoolObserver, - }, tx::FBPooledTransaction, tx_signer::Signer + }, + tx::FBPooledTransaction, + tx_signer::Signer, }; use alloy_provider::{Identity, ProviderBuilder, RootProvider}; use clap::Parser; From de85b603580ed7719b04de6831f5dec6c20574f1 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 15:01:44 +0000 Subject: [PATCH 08/23] Github Action login to GHCR.io --- .github/workflows/checks.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 0be051938..cb4104c8b 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -69,6 +69,13 @@ jobs: - name: Lint run: make lint + + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Test run: make test From 4417ae1aeb786a9fa3f3fe98a2da81c4c643f7cd Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 15:12:52 +0000 Subject: [PATCH 09/23] testing Github Actions ghcr access --- .github/workflows/checks.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index cb4104c8b..649a8efb8 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -77,6 +77,9 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Try pulling the image manually + run: docker pull ghcr.io/paradigmxyz/op-reth:latest + - name: Test run: make test env: From 0b8938ec0c23b5c65b3c5ba15ac3baa1114e5f5f Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Fri, 6 Jun 2025 15:14:19 +0000 Subject: [PATCH 10/23] testing Github Actions ghcr access --- .github/workflows/checks.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 649a8efb8..00bb24eb4 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -28,6 +28,9 @@ jobs: - name: Checkout sources uses: actions/checkout@v4 + - name: Try pulling the image manually + run: docker pull ghcr.io/paradigmxyz/op-reth:latest + # https://github.com/dtolnay/rust-toolchain - name: Setup rust toolchain uses: dtolnay/rust-toolchain@stable @@ -77,8 +80,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Try pulling the image manually - run: docker pull ghcr.io/paradigmxyz/op-reth:latest - name: Test run: make test From ba56dbafc158515100b2cc8550920de4cfdd5e97 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Mon, 9 Jun 2025 09:25:04 +0000 Subject: [PATCH 11/23] lint --- crates/op-rbuilder/src/tests/vanilla/revert.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 246d3bfd6..2947c5c1e 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -309,7 +309,6 @@ async fn bundle_range_limits() -> eyre::Result<()> { .await .is_err()); - Ok(()) } From 320b2d6d0b15cce0d82a62e41932f936855e1225 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 08:33:13 +0000 Subject: [PATCH 12/23] Closes issue 133 --- .github/workflows/checks.yaml | 11 -- .../src/tests/framework/external.rs | 108 +++++++++++------- 2 files changed, 67 insertions(+), 52 deletions(-) diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 00bb24eb4..0be051938 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -28,9 +28,6 @@ jobs: - name: Checkout sources uses: actions/checkout@v4 - - name: Try pulling the image manually - run: docker pull ghcr.io/paradigmxyz/op-reth:latest - # https://github.com/dtolnay/rust-toolchain - name: Setup rust toolchain uses: dtolnay/rust-toolchain@stable @@ -72,14 +69,6 @@ jobs: - name: Lint run: make lint - - - name: Log in to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Test run: make test diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index 3b8a3756d..d969bd640 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -8,16 +8,16 @@ use alloy_rpc_types_engine::{ use bollard::{ exec::{CreateExecOptions, StartExecResults}, query_parameters::{ - AttachContainerOptions, CreateContainerOptions, RemoveContainerOptions, + AttachContainerOptions, CreateContainerOptions, CreateImageOptions, RemoveContainerOptions, StartContainerOptions, StopContainerOptions, }, - secret::{ContainerCreateBody, HostConfig}, + secret::{ContainerCreateBody, ContainerCreateResponse, HostConfig}, Docker, }; -use futures::StreamExt; +use futures::{StreamExt, TryStreamExt}; use op_alloy_network::Optimism; use op_alloy_rpc_types_engine::OpExecutionPayloadV4; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use tracing::debug; use crate::tests::{EngineApi, Ipc}; @@ -65,43 +65,8 @@ impl ExternalNode { ) .map_err(|_| eyre::eyre!("Failed to write genesis file"))?; - let host_config = HostConfig { - binds: Some(vec![format!( - "{}:/home/op-reth-shared:rw", - tempdir.display() - )]), - ..Default::default() - }; - - // Don't expose any ports, as we will only use IPC for communication. - let container_config = ContainerCreateBody { - image: Some(format!("ghcr.io/paradigmxyz/op-reth:{version_tag}")), - entrypoint: Some(vec!["op-reth".to_string()]), - cmd: Some( - vec![ - "node", - "--chain=/home/op-reth-shared/genesis.json", - "--auth-ipc", - &format!("--auth-ipc.path={AUTH_CONTAINER_IPC_PATH}"), - &format!("--ipcpath={RPC_CONTAINER_IPC_PATH}"), - "--disable-discovery", - "--no-persist-peers", - "--max-outbound-peers=0", - "--max-inbound-peers=0", - "--trusted-only", - ] - .into_iter() - .map(String::from) - .collect(), - ), - host_config: Some(host_config), - ..Default::default() - }; - - // Create the container - let container = docker - .create_container(Some(CreateContainerOptions::default()), container_config) - .await?; + // Create Docker container with reth EL client + let container = create_container(&tempdir, &docker, version_tag).await?; docker .start_container(&container.id, None::) @@ -303,6 +268,67 @@ impl Drop for ExternalNode { } } +async fn create_container( + tempdir: &Path, + docker: &Docker, + version_tag: &str, +) -> eyre::Result { + let host_config = HostConfig { + binds: Some(vec![format!( + "{}:/home/op-reth-shared:rw", + tempdir.display() + )]), + ..Default::default() + }; + + // first pull the image locally + let mut pull_stream = docker.create_image( + Some(CreateImageOptions { + from_image: Some("ghcr.io/paradigmxyz/op-reth".to_string()), + tag: Some(version_tag.into()), + ..Default::default() + }), + None, + None, + ); + + while let Some(pull_result) = pull_stream.try_next().await? { + debug!( + "Pulling 'ghcr.io/paradigmxyz/op-reth:{version_tag}' locally: {:?}", + pull_result + ); + } + + // Don't expose any ports, as we will only use IPC for communication. + let container_config = ContainerCreateBody { + image: Some(format!("ghcr.io/paradigmxyz/op-reth:{version_tag}")), + entrypoint: Some(vec!["op-reth".to_string()]), + cmd: Some( + vec![ + "node", + "--chain=/home/op-reth-shared/genesis.json", + "--auth-ipc", + &format!("--auth-ipc.path={AUTH_CONTAINER_IPC_PATH}"), + &format!("--ipcpath={RPC_CONTAINER_IPC_PATH}"), + "--disable-discovery", + "--no-persist-peers", + "--max-outbound-peers=0", + "--max-inbound-peers=0", + "--trusted-only", + ] + .into_iter() + .map(String::from) + .collect(), + ), + host_config: Some(host_config), + ..Default::default() + }; + + Ok(docker + .create_container(Some(CreateContainerOptions::default()), container_config) + .await?) +} + async fn relax_permissions(docker: &Docker, container: &str, path: &str) -> eyre::Result<()> { let exec = docker .create_exec( From dd046c06d3c88e4d2556dd6bb5e15adb359e6ddb Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 08:53:38 +0000 Subject: [PATCH 13/23] review feedback --- crates/op-rbuilder/src/tests/framework/external.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index d969bd640..107039c54 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -239,11 +239,13 @@ impl ExternalNode { impl Drop for ExternalNode { fn drop(&mut self) { - // Clean up the temporary directory - if self.tempdir.exists() { - std::fs::remove_dir_all(&self.tempdir).expect("Failed to remove temporary directory"); + if !self.tempdir.exists() { + return; // If the tempdir does not exist, there's nothing to clean up. } + // Clean up the temporary directory + std::fs::remove_dir_all(&self.tempdir).expect("Failed to remove temporary directory"); + // Block on cleaning up the container let docker = self.docker.clone(); let container_id = self.container_id.clone(); From 600b119ce2eefa986352666cd2ceacf1a6e71cf5 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 08:55:15 +0000 Subject: [PATCH 14/23] review feedback --- crates/op-rbuilder/src/tests/framework/external.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index 107039c54..c4b19d80b 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -18,7 +18,7 @@ use futures::{StreamExt, TryStreamExt}; use op_alloy_network::Optimism; use op_alloy_rpc_types_engine::OpExecutionPayloadV4; use std::path::{Path, PathBuf}; -use tracing::debug; +use tracing::{debug, warn}; use crate::tests::{EngineApi, Ipc}; @@ -251,12 +251,14 @@ impl Drop for ExternalNode { let container_id = self.container_id.clone(); tokio::spawn(async move { - docker + if let Err(e) = docker .stop_container(&container_id, None::) .await - .expect("Failed to stop container"); + { + warn!("Failed to stop container {}: {}", container_id, e); + } - docker + if let Err(e) = docker .remove_container( &container_id, Some(RemoveContainerOptions { @@ -265,7 +267,9 @@ impl Drop for ExternalNode { }), ) .await - .expect("Failed to remove container"); + { + warn!("Failed to remove container {}: {}", container_id, e); + } }); } } From 8bb45d215d2380d87e743e41a0087159e2c3e696 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 09:07:57 +0000 Subject: [PATCH 15/23] Review feedback - ctrl-c --- .../src/tests/framework/external.rs | 80 ++++++++++++------- 1 file changed, 53 insertions(+), 27 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index c4b19d80b..0b8db15c6 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -18,6 +18,7 @@ use futures::{StreamExt, TryStreamExt}; use op_alloy_network::Optimism; use op_alloy_rpc_types_engine::OpExecutionPayloadV4; use std::path::{Path, PathBuf}; +use tokio::signal; use tracing::{debug, warn}; use crate::tests::{EngineApi, Ipc}; @@ -86,6 +87,19 @@ impl ExternalNode { .connect_ipc(rpc_ipc.into()) .await?; + // spin up a task that will clean up the container on ctrl-c + tokio::spawn({ + let docker = docker.clone(); + let container_id = container.id.clone(); + let tempdir = tempdir.clone(); + + async move { + if signal::ctrl_c().await.is_ok() { + cleanup(tempdir.clone(), docker.clone(), container_id.clone()).await; + } + } + }); + Ok(Self { engine_api, provider, @@ -239,37 +253,12 @@ impl ExternalNode { impl Drop for ExternalNode { fn drop(&mut self) { - if !self.tempdir.exists() { - return; // If the tempdir does not exist, there's nothing to clean up. - } - - // Clean up the temporary directory - std::fs::remove_dir_all(&self.tempdir).expect("Failed to remove temporary directory"); - // Block on cleaning up the container let docker = self.docker.clone(); let container_id = self.container_id.clone(); - + let tempdir = self.tempdir.clone(); tokio::spawn(async move { - if let Err(e) = docker - .stop_container(&container_id, None::) - .await - { - warn!("Failed to stop container {}: {}", container_id, e); - } - - if let Err(e) = docker - .remove_container( - &container_id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), - ) - .await - { - warn!("Failed to remove container {}: {}", container_id, e); - } + cleanup(tempdir, docker, container_id).await; }); } } @@ -421,6 +410,43 @@ async fn await_ipc_readiness(docker: &Docker, container: &str) -> eyre::Result<( Ok(()) } +async fn cleanup(tempdir: PathBuf, docker: Docker, container_id: String) { + // This is a no-op function that will be spawned to clean up the container on ctrl-c + // or Drop. + debug!( + "Cleaning up external node resources at {} [{container_id}]...", + tempdir.display() + ); + + if !tempdir.exists() { + return; // If the tempdir does not exist, there's nothing to clean up. + } + + // Clean up the temporary directory + std::fs::remove_dir_all(&tempdir).expect("Failed to remove temporary directory"); + + // Block on cleaning up the container + if let Err(e) = docker + .stop_container(&container_id, None::) + .await + { + warn!("Failed to stop container {}: {}", container_id, e); + } + + if let Err(e) = docker + .remove_container( + &container_id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await + { + warn!("Failed to remove container {}: {}", container_id, e); + } +} + trait OptimismProviderExt { async fn hash_at_height(&self, height: u64) -> eyre::Result; async fn latest_block_hash_and_number(&self) -> eyre::Result<(B256, u64)>; From 0cfa9dbd3cb0802ff1d6b840f50510a46c965dad Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 09:18:05 +0000 Subject: [PATCH 16/23] Review feedback --- .../op-rbuilder/src/tests/framework/utils.rs | 33 ++++++++++++++++++- .../src/tests/vanilla/data_availability.rs | 12 +++---- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs index b9efbb0c8..c4ecc5091 100644 --- a/crates/op-rbuilder/src/tests/framework/utils.rs +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -1,5 +1,5 @@ use alloy_eips::Encodable2718; -use alloy_primitives::{hex, Address, BlockHash, TxKind, B256, U256}; +use alloy_primitives::{hex, Address, BlockHash, TxHash, TxKind, B256, U256}; use alloy_rpc_types_eth::{Block, BlockTransactionHashes}; use core::future::Future; use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; @@ -54,6 +54,14 @@ pub trait ChainDriverExt { Ok(accounts) } } + + fn build_new_block_with_valid_transaction( + &self, + ) -> impl Future)>>; + + fn build_new_block_with_reverrting_transaction( + &self, + ) -> impl Future)>>; } impl ChainDriverExt for ChainDriver

{ @@ -110,6 +118,29 @@ impl ChainDriverExt for ChainDriver

{ .header .hash) } + + async fn build_new_block_with_valid_transaction( + &self, + ) -> eyre::Result<(TxHash, Block)> { + let tx = self + .create_transaction() + .random_valid_transfer() + .send() + .await?; + Ok((*tx.tx_hash(), self.build_new_block().await?)) + } + + async fn build_new_block_with_reverrting_transaction( + &self, + ) -> eyre::Result<(TxHash, Block)> { + let tx = self + .create_transaction() + .random_reverting_transaction() + .send() + .await?; + + Ok((*tx.tx_hash(), self.build_new_block().await?)) + } } pub trait BlockTransactionsExt { diff --git a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs index eb8fe1dd8..745f10b6e 100644 --- a/crates/op-rbuilder/src/tests/vanilla/data_availability.rs +++ b/crates/op-rbuilder/src/tests/vanilla/data_availability.rs @@ -1,4 +1,4 @@ -use crate::tests::{BlockTransactionsExt, LocalInstance, TransactionBuilderExt}; +use crate::tests::{BlockTransactionsExt, ChainDriverExt, LocalInstance}; use alloy_provider::Provider; /// This test ensures that the transaction size limit is respected. @@ -45,15 +45,11 @@ async fn block_size_limit() -> eyre::Result<()> { .await?; assert!(call, "miner_setMaxDASize should be executed successfully"); - let unfit_tx = driver - .create_transaction() - .random_valid_transfer() - .send() - .await?; - let block = driver.build_new_block().await?; + let (unfit_tx, block) = driver.build_new_block_with_valid_transaction().await?; + // tx should not be included because we set the tx_size_limit to 1 assert!( - !block.includes(unfit_tx.tx_hash()), + !block.includes(&unfit_tx), "transaction should not be included in the block" ); From 0216e3b14384b5f6f1ab69b2cc3afbde436edf89 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 09:21:07 +0000 Subject: [PATCH 17/23] review feedback --- crates/op-rbuilder/src/tests/vanilla/revert.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/revert.rs b/crates/op-rbuilder/src/tests/vanilla/revert.rs index 2947c5c1e..d137c1cc4 100644 --- a/crates/op-rbuilder/src/tests/vanilla/revert.rs +++ b/crates/op-rbuilder/src/tests/vanilla/revert.rs @@ -84,14 +84,8 @@ async fn disabled() -> eyre::Result<()> { .await?; let block = driver.build_new_block().await?; - assert!(block - .transactions - .hashes() - .any(|hash| hash == *valid_tx.tx_hash())); - assert!(block - .transactions - .hashes() - .any(|hash| hash == *reverting_tx.tx_hash())); + assert!(block.includes(valid_tx.tx_hash())); + assert!(block.includes(reverting_tx.tx_hash())); } Ok(()) From 39d99b228f7990655fc698ddeef9a6c8fdfe9403 Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 09:35:56 +0000 Subject: [PATCH 18/23] Review feedback --- .../op-rbuilder/src/tests/framework/utils.rs | 32 +++++++++++++++---- .../op-rbuilder/src/tests/vanilla/txpool.rs | 2 +- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/utils.rs b/crates/op-rbuilder/src/tests/framework/utils.rs index c4ecc5091..ceba8434f 100644 --- a/crates/op-rbuilder/src/tests/framework/utils.rs +++ b/crates/op-rbuilder/src/tests/framework/utils.rs @@ -144,19 +144,23 @@ impl ChainDriverExt for ChainDriver

{ } pub trait BlockTransactionsExt { - fn includes(&self, tx_hash: &B256) -> bool; + fn includes(&self, txs: &impl AsTxs) -> bool; } impl BlockTransactionsExt for Block { - fn includes(&self, tx_hash: &B256) -> bool { - self.transactions.hashes().any(|tx| tx == *tx_hash) + fn includes(&self, txs: &impl AsTxs) -> bool { + txs.as_txs() + .into_iter() + .all(|tx| self.transactions.hashes().any(|included| included == tx)) } } impl BlockTransactionsExt for BlockTransactionHashes<'_, Transaction> { - fn includes(&self, tx_hash: &B256) -> bool { - let mut iter = self.clone(); - iter.any(|tx| tx == *tx_hash) + fn includes(&self, txs: &impl AsTxs) -> bool { + let mut included_tx_iter = self.clone(); + txs.as_txs() + .iter() + .all(|tx| included_tx_iter.any(|included| included == *tx)) } } @@ -171,3 +175,19 @@ impl OpRbuilderArgsTestExt for crate::args::OpRbuilderArgs { default } } + +pub trait AsTxs { + fn as_txs(&self) -> Vec; +} + +impl AsTxs for TxHash { + fn as_txs(&self) -> Vec { + vec![*self] + } +} + +impl AsTxs for Vec { + fn as_txs(&self) -> Vec { + self.clone() + } +} diff --git a/crates/op-rbuilder/src/tests/vanilla/txpool.rs b/crates/op-rbuilder/src/tests/vanilla/txpool.rs index 09eb0f22a..be8aae121 100644 --- a/crates/op-rbuilder/src/tests/vanilla/txpool.rs +++ b/crates/op-rbuilder/src/tests/vanilla/txpool.rs @@ -66,7 +66,7 @@ async fn pending_pool_limit() -> eyre::Result<()> { let block = driver.build_new_block().await?; // Ensure that 10 extra txs got included - assert!(txs.into_iter().all(|tx| block.includes(&tx))); + assert!(block.includes(&txs)); Ok(()) } From dae17164c8385388e8a9852dace0bd5927dad61b Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 09:55:20 +0000 Subject: [PATCH 19/23] review feedback --- crates/op-rbuilder/src/tests/framework/external.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index 0b8db15c6..8ea2b1cfe 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -422,9 +422,6 @@ async fn cleanup(tempdir: PathBuf, docker: Docker, container_id: String) { return; // If the tempdir does not exist, there's nothing to clean up. } - // Clean up the temporary directory - std::fs::remove_dir_all(&tempdir).expect("Failed to remove temporary directory"); - // Block on cleaning up the container if let Err(e) = docker .stop_container(&container_id, None::) @@ -445,6 +442,10 @@ async fn cleanup(tempdir: PathBuf, docker: Docker, container_id: String) { { warn!("Failed to remove container {}: {}", container_id, e); } + + + // Clean up the temporary directory + std::fs::remove_dir_all(&tempdir).expect("Failed to remove temporary directory"); } trait OptimismProviderExt { From f4d327c7a744cbc4c2a4b60164c5ff5513ba374c Mon Sep 17 00:00:00 2001 From: Karim Agha Date: Tue, 10 Jun 2025 10:16:40 +0000 Subject: [PATCH 20/23] lint --- crates/op-rbuilder/src/tests/framework/external.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/op-rbuilder/src/tests/framework/external.rs b/crates/op-rbuilder/src/tests/framework/external.rs index 8ea2b1cfe..e67fcfcc3 100644 --- a/crates/op-rbuilder/src/tests/framework/external.rs +++ b/crates/op-rbuilder/src/tests/framework/external.rs @@ -443,7 +443,6 @@ async fn cleanup(tempdir: PathBuf, docker: Docker, container_id: String) { warn!("Failed to remove container {}: {}", container_id, e); } - // Clean up the temporary directory std::fs::remove_dir_all(&tempdir).expect("Failed to remove temporary directory"); } From 998d7f2e88dfe499f8f046795944fe54274f5084 Mon Sep 17 00:00:00 2001 From: karim Date: Tue, 10 Jun 2025 20:18:41 +0200 Subject: [PATCH 21/23] macos compatibility --- crates/op-rbuilder/src/tests/vanilla/smoke.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/smoke.rs b/crates/op-rbuilder/src/tests/vanilla/smoke.rs index 178b99c03..e472d847a 100644 --- a/crates/op-rbuilder/src/tests/vanilla/smoke.rs +++ b/crates/op-rbuilder/src/tests/vanilla/smoke.rs @@ -1,4 +1,4 @@ -use crate::tests::{ExternalNode, LocalInstance, TransactionBuilderExt}; +use crate::tests::{LocalInstance, TransactionBuilderExt}; use alloy_primitives::TxHash; use core::{ sync::atomic::{AtomicBool, Ordering}, @@ -18,8 +18,11 @@ async fn chain_produces_blocks() -> eyre::Result<()> { let rbuilder = LocalInstance::standard().await?; let driver = rbuilder .driver() - .await? - .with_validation_node(ExternalNode::reth().await?) + .await?; + + #[cfg(target_os = "linux")] + let driver = driver + .with_validation_node(super::ExternalNode::reth().await?) .await?; const SAMPLE_SIZE: usize = 10; From 68e43c0324d0c68e2c4a2e1ebc6192730453396a Mon Sep 17 00:00:00 2001 From: karim Date: Tue, 10 Jun 2025 20:32:13 +0200 Subject: [PATCH 22/23] lint --- crates/op-rbuilder/src/tests/vanilla/smoke.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/smoke.rs b/crates/op-rbuilder/src/tests/vanilla/smoke.rs index e472d847a..b3a0c010f 100644 --- a/crates/op-rbuilder/src/tests/vanilla/smoke.rs +++ b/crates/op-rbuilder/src/tests/vanilla/smoke.rs @@ -16,9 +16,7 @@ use tracing::info; #[tokio::test] async fn chain_produces_blocks() -> eyre::Result<()> { let rbuilder = LocalInstance::standard().await?; - let driver = rbuilder - .driver() - .await?; + let driver = rbuilder.driver().await?; #[cfg(target_os = "linux")] let driver = driver From 8b39740b9dd859b73b7a9f6ea6a538b38874fdea Mon Sep 17 00:00:00 2001 From: karim Date: Tue, 10 Jun 2025 20:39:33 +0200 Subject: [PATCH 23/23] lint --- crates/op-rbuilder/src/tests/vanilla/smoke.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/op-rbuilder/src/tests/vanilla/smoke.rs b/crates/op-rbuilder/src/tests/vanilla/smoke.rs index b3a0c010f..8fd537e25 100644 --- a/crates/op-rbuilder/src/tests/vanilla/smoke.rs +++ b/crates/op-rbuilder/src/tests/vanilla/smoke.rs @@ -20,7 +20,7 @@ async fn chain_produces_blocks() -> eyre::Result<()> { #[cfg(target_os = "linux")] let driver = driver - .with_validation_node(super::ExternalNode::reth().await?) + .with_validation_node(crate::tests::ExternalNode::reth().await?) .await?; const SAMPLE_SIZE: usize = 10;