From ad729dec42e31b2fb6a60d3e52954611270049f4 Mon Sep 17 00:00:00 2001 From: msheth-circle Date: Wed, 8 Apr 2026 18:32:21 +0000 Subject: [PATCH] chore: sync public-repo from internal@74a80c0 - remove ADR 0005 - bump trivy-action to v0.35.0 (compromised v0.34.2) - update cloudsmith namespace to circle/arc-network - remove internal-only CLAUDE.md and atlantis.yaml - sync latest from master (consensus, config, docs updates) --- .../src/handlers/consensus_ready.rs | 31 ++- .../src/handlers/process_synced_value.rs | 162 +++++++++++ crates/malachite-app/src/state.rs | 13 + crates/malachite-cli/src/cmd/start.rs | 4 +- crates/types/src/config.rs | 2 +- docs/installation.md | 74 +++-- docs/running-an-arc-node.md | 261 +++++++++++++----- 7 files changed, 443 insertions(+), 104 deletions(-) diff --git a/crates/malachite-app/src/handlers/consensus_ready.rs b/crates/malachite-app/src/handlers/consensus_ready.rs index a98ee43..240a282 100644 --- a/crates/malachite-app/src/handlers/consensus_ready.rs +++ b/crates/malachite-app/src/handlers/consensus_ready.rs @@ -47,25 +47,28 @@ pub async fn handle( engine: &Engine, reply: Reply<(Height, HeightParams)>, ) -> eyre::Result<()> { + // Create and attach the persistence meter before borrowing state fields, + // since set_persistence_meter requires &mut self. + { + let execution_config = &state.config().execution; + let meter = persistence_meter::create_with_fallback( + execution_config.persistence_backpressure, + engine.subscription_endpoint(), + execution_config.persistence_backpressure_threshold, + ) + .await; + + persistence_meter::seed_from_latest_block(meter.as_ref(), engine.eth.as_ref()).await; + + state.set_persistence_meter(meter); + } + let (store, stats, metrics) = (state.store(), state.stats(), state.metrics()); let max_pending_proposals = max_pending_proposals(&state.config().value_sync); let payload_validator = EnginePayloadValidator::new(engine, metrics); let block_finalizer = EngineBlockFinalizer::new(engine, stats, metrics); - let execution_config = &state.config().execution; - let persistence_meter = persistence_meter::create_with_fallback( - execution_config.persistence_backpressure, - engine.subscription_endpoint(), - execution_config.persistence_backpressure_threshold, - ) - .await; - - // Seed with the current EL height. Safe at startup since all blocks are - // persisted before the node begins replaying. - persistence_meter::seed_from_latest_block(persistence_meter.as_ref(), engine.eth.as_ref()) - .await; - let (next_height, next_validator_set, next_consensus_params, previous_block) = on_consensus_ready( metrics, @@ -76,7 +79,7 @@ pub async fn handle( block_finalizer, engine.api.as_ref(), engine.eth.as_ref(), - persistence_meter.as_ref(), + state.persistence_meter(), max_pending_proposals, ) .await?; diff --git a/crates/malachite-app/src/handlers/process_synced_value.rs b/crates/malachite-app/src/handlers/process_synced_value.rs index 88227dc..b8794e6 100644 --- a/crates/malachite-app/src/handlers/process_synced_value.rs +++ b/crates/malachite-app/src/handlers/process_synced_value.rs @@ -14,6 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::time::Duration; + use bytes::Bytes; use eyre::Context; use ssz::Decode; @@ -26,6 +28,7 @@ use malachitebft_app_channel::Reply; use alloy_rpc_types_engine::ExecutionPayloadV3; use arc_consensus_types::{Address, ArcContext, Height}; use arc_eth_engine::engine::Engine; +use arc_eth_engine::persistence_meter::PersistenceMeter; use malachitebft_app_channel::app::types::core::Validity; @@ -35,6 +38,9 @@ use crate::state::State; use crate::store::repositories::{InvalidPayloadsRepository, UndecidedBlocksRepository}; use arc_consensus_db::invalid_payloads::InvalidPayload; +/// Timeout when blocked waiting for EL persistence to catch up during sync. +const SYNC_PERSISTENCE_WAIT_TIMEOUT: Duration = Duration::from_secs(30); + /// Handles the `ProcessSyncedValue` message from the consensus engine. /// /// This is called when the consensus engine has received a value via sync for a given height and round. @@ -56,6 +62,7 @@ pub async fn handle( EnginePayloadValidator::new(engine, state.metrics()), state.store(), state.store(), + state.persistence_meter(), height, round, proposer, @@ -98,10 +105,12 @@ pub async fn handle( /// /// Returns `Ok(None)` when the raw bytes cannot be SSZ-decoded (the error is logged /// but not propagated). +#[allow(clippy::too_many_arguments)] async fn on_process_synced_value( engine: impl PayloadValidator, undecided_blocks_repo: impl UndecidedBlocksRepository, invalid_payloads_repo: impl InvalidPayloadsRepository, + persistence_meter: impl PersistenceMeter, height: Height, round: Round, proposer: Address, @@ -169,6 +178,19 @@ async fn on_process_synced_value( ) })?; + if validity.is_valid() { + if let Err(e) = persistence_meter + .wait_for_persisted_block(height.as_u64(), SYNC_PERSISTENCE_WAIT_TIMEOUT) + .await + { + error!( + block_number = height.as_u64(), + %e, + "ProcessSyncedValue: persistence backpressure timed out, proceeding" + ); + } + } + Ok(Some(proposal)) } @@ -182,6 +204,8 @@ mod tests { }; use arbitrary::{Arbitrary, Unstructured}; + use arc_eth_engine::mocks::MockPersistenceMeter; + use arc_eth_engine::persistence_meter::NoopPersistenceMeter; use bytes::Bytes; use malachitebft_core_types::Validity; use mockall::predicate::*; @@ -226,6 +250,7 @@ mod tests { engine, undecided, invalid, + NoopPersistenceMeter, height, round, proposer, @@ -277,6 +302,7 @@ mod tests { engine, undecided, invalid, + NoopPersistenceMeter, height, round, proposer, @@ -317,6 +343,7 @@ mod tests { engine, undecided, invalid, + NoopPersistenceMeter, height, round, proposer, @@ -350,6 +377,7 @@ mod tests { engine, undecided, invalid, + NoopPersistenceMeter, height, round, proposer, @@ -388,6 +416,7 @@ mod tests { engine, undecided, invalid, + NoopPersistenceMeter, height, round, proposer, @@ -398,4 +427,137 @@ mod tests { assert!(result.is_err()); assert!(result.unwrap_err().downcast_ref::().is_some()); } + + #[tokio::test] + async fn on_process_synced_value_calls_persistence_meter_for_valid_payload() { + let mut u = Unstructured::new(&[0u8; 512]); + + let height = Height::new(42); + let round = Round::new(0); + let proposer = Address::new([0u8; 20]); + let payload = ExecutionPayloadV3::arbitrary(&mut u).unwrap(); + let value_bytes = Bytes::from(payload.as_ssz_bytes()); + + let mut engine = MockPayloadValidator::new(); + engine + .expect_validate_payload() + .returning(|_| Ok(PayloadValidationResult::Valid)); + + let mut undecided = MockUndecidedBlocksRepository::new(); + undecided.expect_store().times(1).returning(|_| Ok(())); + + let mut invalid = MockInvalidPayloadsRepository::new(); + invalid.expect_append().times(0); + + let mut persistence_meter = MockPersistenceMeter::new(); + persistence_meter + .expect_wait_for_persisted_block() + .withf(|&block, _| block == 42) + .times(1) + .return_once(|_, _| Ok(())); + + let proposal = on_process_synced_value( + engine, + undecided, + invalid, + persistence_meter, + height, + round, + proposer, + value_bytes, + ) + .await + .expect("should succeed"); + + assert!(proposal.is_some()); + assert_eq!(proposal.unwrap().validity, Validity::Valid); + } + + #[tokio::test] + async fn on_process_synced_value_skips_persistence_meter_for_invalid_payload() { + let mut u = Unstructured::new(&[0u8; 512]); + + let height = Height::new(42); + let round = Round::new(0); + let proposer = Address::new([0u8; 20]); + let payload = ExecutionPayloadV3::arbitrary(&mut u).unwrap(); + let value_bytes = Bytes::from(payload.as_ssz_bytes()); + + let mut engine = MockPayloadValidator::new(); + engine.expect_validate_payload().returning(|_| { + Ok(PayloadValidationResult::Invalid { + reason: "bad".into(), + }) + }); + + let mut undecided = MockUndecidedBlocksRepository::new(); + undecided.expect_store().times(1).returning(|_| Ok(())); + + let mut invalid = MockInvalidPayloadsRepository::new(); + invalid.expect_append().times(1).returning(|_| Ok(())); + + let mut persistence_meter = MockPersistenceMeter::new(); + persistence_meter.expect_wait_for_persisted_block().times(0); + + let proposal = on_process_synced_value( + engine, + undecided, + invalid, + persistence_meter, + height, + round, + proposer, + value_bytes, + ) + .await + .expect("should succeed"); + + assert!(proposal.is_some()); + assert_eq!(proposal.unwrap().validity, Validity::Invalid); + } + + #[tokio::test] + async fn on_process_synced_value_proceeds_when_persistence_meter_fails() { + let mut u = Unstructured::new(&[0u8; 512]); + + let height = Height::new(7); + let round = Round::new(0); + let proposer = Address::new([0u8; 20]); + let payload = ExecutionPayloadV3::arbitrary(&mut u).unwrap(); + let value_bytes = Bytes::from(payload.as_ssz_bytes()); + + let mut engine = MockPayloadValidator::new(); + engine + .expect_validate_payload() + .returning(|_| Ok(PayloadValidationResult::Valid)); + + let mut undecided = MockUndecidedBlocksRepository::new(); + undecided.expect_store().times(1).returning(|_| Ok(())); + + let mut invalid = MockInvalidPayloadsRepository::new(); + invalid.expect_append().times(0); + + let mut persistence_meter = MockPersistenceMeter::new(); + persistence_meter + .expect_wait_for_persisted_block() + .withf(|&block, _| block == 7) + .times(1) + .return_once(|_, _| Err(eyre::eyre!("persistence meter timeout"))); + + let proposal = on_process_synced_value( + engine, + undecided, + invalid, + persistence_meter, + height, + round, + proposer, + value_bytes, + ) + .await + .expect("should succeed even when meter fails"); + + assert!(proposal.is_some()); + assert_eq!(proposal.unwrap().validity, Validity::Valid); + } } diff --git a/crates/malachite-app/src/state.rs b/crates/malachite-app/src/state.rs index 885f4c0..ad32dcc 100644 --- a/crates/malachite-app/src/state.rs +++ b/crates/malachite-app/src/state.rs @@ -31,6 +31,7 @@ use arc_consensus_types::{ Address, AlloyAddress, ArcContext, BlockHash, Config, ConsensusParams, Height, ValidatorSet, }; use arc_eth_engine::json_structures::ExecutionBlock; +use arc_eth_engine::persistence_meter::{NoopPersistenceMeter, PersistenceMeter}; use arc_signer::ArcSigningProvider; use malachitebft_core_types::HeightParams; @@ -135,6 +136,9 @@ pub struct State { /// Timestamps of heights that received a synced value via ProcessSyncedValue. synced_heights: HashMap, + /// Meters EL block persistence to apply backpressure during sync catch-up. + persistence_meter: Box, + /// Consensus-layer chain spec (fork activation by height/time). #[allow(dead_code)] pub spec: ConsensusSpec, @@ -199,6 +203,7 @@ impl State { consensus_params: ConsensusParams::default(), proposal_monitor: None, synced_heights: HashMap::new(), + persistence_meter: Box::new(NoopPersistenceMeter), spec, metrics, } @@ -273,6 +278,14 @@ impl State { self.consensus_params = consensus_params; } + pub fn persistence_meter(&self) -> &dyn PersistenceMeter { + self.persistence_meter.as_ref() + } + + pub fn set_persistence_meter(&mut self, meter: Box) { + self.persistence_meter = meter; + } + /// Get mutable reference to the streams map pub fn streams_map_mut(&mut self) -> &mut PartStreamsMap { &mut self.streams_map diff --git a/crates/malachite-cli/src/cmd/start.rs b/crates/malachite-cli/src/cmd/start.rs index b43eaab..420113d 100644 --- a/crates/malachite-cli/src/cmd/start.rs +++ b/crates/malachite-cli/src/cmd/start.rs @@ -187,7 +187,7 @@ pub struct StartCmd { #[clap( long = "execution-persistence-backpressure-threshold", value_name = "BLOCKS", - default_value = "100" + default_value = "16" )] pub execution_persistence_backpressure_threshold: u64, @@ -401,7 +401,7 @@ impl Default for StartCmd { execution_endpoint: None, execution_ws_endpoint: None, execution_persistence_backpressure: false, - execution_persistence_backpressure_threshold: 100, + execution_persistence_backpressure_threshold: 16, execution_jwt: None, metrics: None, rpc_addr: None, diff --git a/crates/types/src/config.rs b/crates/types/src/config.rs index 2056cb1..033b04d 100644 --- a/crates/types/src/config.rs +++ b/crates/types/src/config.rs @@ -186,7 +186,7 @@ pub struct ExecutionConfig { impl ExecutionConfig { const fn default_persistence_backpressure_threshold() -> u64 { - 100 + 16 } } diff --git a/docs/installation.md b/docs/installation.md index 9399624..1fe5307 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,32 +1,46 @@ -# Installation +# Install -Arc node can be installed in two ways: a pre-built binary via `arcup` or building from source. +The Arc node binaries can be installed in two ways: +downloading pre-built binaries via [`arcup`](#pre-built-binary), +or by building them from [source](#build-from-source). + +After the installation, refer to [Running an Arc Node](./running-an-arc-node.md) +for how to run an Arc node. > **Docker:** Container images and Docker Compose instructions are coming soon. ## Versions -Versions across networks may not be compatible. Consult the table below to confirm which version to run for each network. +Versions of the Arc node across networks may not be compatible. +Consult the table below to confirm which version to run for each network. -| Network | Version | -|---------|---------| -| Arc Testnet | v0.6.0 | +| Network | Version | +|-------------|---------| +| Arc Testnet | v0.6.0 | ## Pre-built Binary -`arcup` installs `arc-node-execution`, `arc-node-consensus`, and `arc-snapshots` to `~/.arc/bin`. +This repository includes `arcup`, a script that installs Arc node binaries +into `$ARC_BIN_DIR` directory, defaulting to `~/.arc/bin`: ```sh curl -L https://raw.githubusercontent.com/circlefin/arc-node/main/arcup/install | bash ``` -After installing, restart your shell or run: +More precisely, the [configured paths](./running-an-arc-node.md#configure-paths) +for Arc nodes are based on the `$ARC_HOME` variable, with `~/.arc` as default value. +If `$ARC_BIN_DIR` is not set, its default value is `$ARC_HOME/bin`, defaulting +to `~/.arc/bin`. +`$ARC_BIN_DIR` must be part of the system `PATH`. + +To be sure that the binaries installed under `$ARC_BIN_DIR` are available in +the `PATH`, load the produced environment file: ```sh -source ~/.arc/env +source $ARC_HOME/env ``` -Verify the installation: +Next, verify that the three Arc binaries are installed: ```sh arc-snapshots --version @@ -34,7 +48,8 @@ arc-node-execution --version arc-node-consensus --version ``` -To update in the future, run: +The `arcup` script should also be in the `PATH` +and can be used to update Arc binaries: ```sh arcup @@ -42,25 +57,35 @@ arcup ## Build from Source -**1. Install Rust:** +The Arc node source code is available in the +https://github.com/circlefin/arc-node repository: -```sh -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -source $HOME/.cargo/env -``` - -**2. Clone the repository:** +**1. Clone `arc-node`** ```sh git clone https://github.com/circlefin/arc-node.git cd arc-node -git checkout v0.6.0 +git checkout $VERSION git submodule update --init --recursive ``` +`$VERSION` is a tag for a released version. +Refer to the [Versions](#versions) section to find out which one to use. + +**2. Install Rust:** + +Make sure that you have [rust](https://rust-lang.org/tools/install/) installed. +If not, it can be installed with the following commands: + +```sh +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +source ~/.cargo/env +``` + **3. Build and install:** -By default, `cargo install` places binaries in `~/.cargo/bin`, which is added to `PATH` by the Rust installer. Pass `--root ` to install into `/bin` instead (e.g. `--root /usr/local`). +The following commands produce three Arc node binaries: +`arc-node-execution`, `arc-node-consensus`, and `arc-snapshots`: ```sh cargo install --path crates/node @@ -68,7 +93,13 @@ cargo install --path crates/malachite-app cargo install --path crates/snapshots ``` -Verify: +`cargo install` places compiled binaries into `~/.cargo/bin`, which is added +to `PATH` by loading `~/.cargo/env`. +Include the parameter `--root $BASE_DIR` to install the compiled binaries into +`$BASE_DIR/bin` instead (for instance, `--root /usr/local`). + +In either case, Arc node binaries should be in the `PATH`. +Verify by calling them: ```sh arc-snapshots --version @@ -76,4 +107,3 @@ arc-node-execution --version arc-node-consensus --version ``` -See [Running an Arc Node](./running-an-arc-node.md) for how to run the node after installation. diff --git a/docs/running-an-arc-node.md b/docs/running-an-arc-node.md index c49f0db..ebf76ff 100644 --- a/docs/running-an-arc-node.md +++ b/docs/running-an-arc-node.md @@ -4,80 +4,148 @@ Arc is an open, EVM-compatible Layer-1 blockchain. Anyone can run an Arc node ## What Your Node Does -- **Verifies every block** — Every block is cryptographically verified against the signatures of the validator set before it is accepted. Your node independently confirms that validators finalized each block -- **Executes every transaction** — Every transaction is re-executed locally through the EVM. Your node maintains its own copy of the complete blockchain state -- **Exposes a local RPC endpoint** — Your node provides a standard Ethereum JSON-RPC API (`http://localhost:8545`) for querying blocks, balances, and transactions, and for submitting calls directly against your own verified state +- **Verifies every block** — Every block is cryptographically verified against the signatures of the validator set before it is accepted. Your node independently confirms that validators finalized each block; +- **Executes every transaction** — Every transaction is re-executed locally through the EVM. Your node maintains its own copy of the complete blockchain state; +- **Exposes a local RPC endpoint** — Your node provides a standard Ethereum JSON-RPC API (`http://localhost:8545`) for querying blocks, balances, and transactions, and for submitting calls directly against your own verified state. ## Quick Start +An Arc node is composed of two processes: + +- **Execution Layer (EL)**: executes finalized transactions and maintains the state of the blockchain; +- **Consensus Layer (CL)**: fetches finalized blocks, verifies their cryptographic signatures, and passes them to the EL for execution. + +Refer to the [installation](installation.md) instructions to install +`arc-node-execution` (EL) and `arc-node-consensus` (CL). + > **Docker:** Container images and Docker Compose instructions are coming soon. -An Arc node runs two processes: the Execution Layer (EL) and the Consensus Layer (CL). The EL executes transactions and maintains blockchain state. The CL fetches blocks from the network, verifies their cryptographic signatures, and passes them to the EL for execution. +### Configure paths + +This guide adopts the following variables to define paths of Arc components: -See [installation](installation.md) for instructions on how to install the binaries on your machine. +| Variable | Meaning | Default | +|-----------------|----------------------------------------------------------------------------|-----------------------| +| `ARC_HOME` | Base directory of installation. Base location of data directories. | `~/.arc` | +| `ARC_EXECUTION` | Data directory for the Execution layer (EL) | `$ARC_HOME/execution` | +| `ARC_CONSENSUS` | Data directory for the Consensus layer (CL) | `$ARC_HOME/consensus` | +| `ARC_BIN_DIR` | Directory where Arc binaries are installed. Must be included in the `PATH` | `$ARC_HOME/bin` | +| `ARC_RUN` | Runtime directory for both Execution (EL) and Consensus (CL) layers. | `/run/arc` | -**0. Create data directories** (one-time setup): +In a simplified version, define `$ARC_HOME` and `$ARC_RUN` variables once, +then use the derived variables in the remaining of this guide: ```sh -mkdir -p ~/.arc/execution ~/.arc/consensus -sudo install -d -o $USER /run/arc +# Base directory for Arc node data (default: ~/.arc) +ARC_HOME="${ARC_HOME:-$HOME/.arc}" + +# Linux runtime directory: +ARC_RUN="/run/arc" +# Mac OS runtime directory: +#ARC_RUN="$ARC_HOME/run" + +ARC_EXECUTION=$ARC_HOME/execution +ARC_CONSENSUS=$ARC_HOME/consensus ``` -> **macOS:** `/run` does not exist on macOS. Use a user-local directory instead (e.g. `mkdir -p ~/.arc/run`) and adjust the `--ipcpath`, `--auth-ipc.path`, `--eth-socket`, and `--execution-socket` flags in the commands below accordingly. +### Setup directories -When running as a systemd service, `RuntimeDirectory=arc` creates `/run/arc` automatically — skip the second command. +The standard installation sets up `$ARC_HOME=~/.arc` as base directory. +Create the **data directories** for the execution and consensus layers: -**1. Download snapshots** (required). Syncing from genesis is not currently supported -- a snapshot is needed to bootstrap the node. +```sh +mkdir -p $ARC_EXECUTION $ARC_CONSENSUS +``` + +To set up the **runtime directory** in a **Linux** environment: ```sh -arc-snapshots download --chain=arc-testnet +sudo install -d -o $USER "$ARC_RUN" ``` -This command fetches the latest snapshot URLs from https://snapshots.arc.network, downloads the snapshots, and extracts them into `~/.arc/execution` and `~/.arc/consensus` respectively. +> When running Arc as a systemd service, `RuntimeDirectory=arc` +> sets up `/run/arc` automatically — the last command is not needed. -**2. Start the Execution Layer:** +To set up the **runtime directory** in a **MacOS** environment, +uncomment the `ARC_RUN="$ARC_HOME/run"` line above and run: ```sh -arc-node-execution node \ - --chain arc-testnet \ - --datadir ~/.arc/execution \ - --disable-discovery \ - --ipcpath /run/arc/reth.ipc \ - --auth-ipc \ - --auth-ipc.path /run/arc/auth.ipc \ - --http \ - --http.addr 127.0.0.1 \ - --http.port 8545 \ - --http.api eth,net,web3,txpool,trace,debug \ - --metrics 127.0.0.1:9001 \ - --full \ - --enable-arc-rpc \ - --rpc.forwarder https://rpc.quicknode.testnet.arc.network/ +mkdir -p "$ARC_RUN" ``` -> `--chain arc-testnet` uses the genesis configuration bundled in the binary. Replace with `--chain /path/to/genesis.json` if you have a custom genesis file. +### Download snapshots + +Syncing a new Arc node from genesis is currently not supported. +A **snapshot** is needed to bootstrap the node: + +```sh +arc-snapshots download \ + --chain=arc-testnet \ + --execution-path "$ARC_EXECUTION" \ + --consensus-path "$ARC_CONSENSUS" +``` + +The `arc-snapshots` binary is part of the Arc node installation. +The command above fetches the latest snapshots for `arc-testnet` chain from +https://snapshots.arc.network and extracts them into the +`$ARC_CONSENSUS` and `$ARC_EXECUTION` data directories. + +> **Download sizes:** At the time of writing, the most recent snapshot sizes +> (tagged `20260408`) are: **~68 GB** for EL and **~16 GB** for CL. +> These are the sizes of the downloaded compressed snapshots; when extracted, +> the sizes are ~103 GB for EL and ~36 GB for CL. +> +> On a fast connection (~100 Mbps) the download takes roughly 10-15 minutes; +> on slower or metered connections it can take hours. + +### Initialize consensus layer + +This is a one-time setup, producing the private key file used as network identity: -> `--http` / `--http.port` expose the JSON-RPC API on localhost. `--rpc.forwarder` routes transactions to an RPC node. +```sh +arc-node-consensus init --home $ARC_CONSENSUS +``` -See [reth node](https://reth.rs/cli/reth/node/) for additional flags. +### Start execution layer -**3. Initialize the Consensus Layer** (one-time setup): +The Execution Layer (EL) is deployed by the `arc-node-execution` binary and started as follows: ```sh -arc-node-consensus init --home ~/.arc/consensus +arc-node-execution node \ + --chain arc-testnet \ + --datadir $ARC_EXECUTION \ + --ipcpath $ARC_RUN/reth.ipc \ + --auth-ipc --auth-ipc.path $ARC_RUN/auth.ipc \ + --http --http.addr 127.0.0.1 --http.port 8545 \ + --http.api eth,net,web3,txpool,trace,debug \ + --rpc.forwarder https://rpc.quicknode.testnet.arc.network/ \ + --metrics 127.0.0.1:9001 \ + --disable-discovery \ + --enable-arc-rpc ``` -This generates a private key file used for P2P network identity. +The `--chain` parameter configures the genesis file. +By using `--chain arc-testnet`, the genesis configuration bundled in the binary is adopted. +Replace with `--chain /path/to/genesis.json` if you have a custom genesis file. + +The `--http`, `--http.addr`, and `--http.port` parameters expose a standard Ethereum +[JSON-RPC API](https://reth.rs/jsonrpc/intro). +The `--http.api` parameter defines the available RPC endpoints. +The `--rpc.forwarder` parameter routes requests not served locally to an existing RPC node. -**4. Start the Consensus Layer** (in a separate terminal): +The `arc-node-execution` binary accepts all parameters of a `reth` node. +Refer to its [documentation](https://reth.rs/cli/reth/node/) for details. + +### Start consensus layer + +After starting the [execution layer](#start-execution-layer), in a different terminal, start the consensus layer: ```sh arc-node-consensus start \ - --home ~/.arc/consensus \ - --eth-socket /run/arc/reth.ipc \ - --execution-socket /run/arc/auth.ipc \ + --home $ARC_CONSENSUS \ + --eth-socket $ARC_RUN/reth.ipc \ + --execution-socket $ARC_RUN/auth.ipc \ --rpc.addr 127.0.0.1:31000 \ - --full \ --follow \ --follow.endpoint https://rpc.drpc.testnet.arc.network,wss=rpc.drpc.testnet.arc.network \ --follow.endpoint https://rpc.quicknode.testnet.arc.network,wss=rpc.quicknode.testnet.arc.network \ @@ -85,57 +153,101 @@ arc-node-consensus start \ --metrics 127.0.0.1:29000 ``` -> **Note:** Start the Execution Layer first. The Consensus Layer connects to it on startup and will fail if the EL is not running. +The consensus layer attempts to connect to the execution layer via the provided +`--eth-socket`. +For this reason, always start the execution layer first. +Otherwise, the consensus layer may fail to start, if it fails to connect to the +companion execution layer. -> **Note:** The Blockdaemon endpoint does not currently support WebSocket connections. The node will log retry warnings for this endpoint but still syncs correctly via the other two endpoints. HTTP block fetching from Blockdaemon works normally. +The consensus layer operates in the **follow** mode. +We provide three endpoints from which the node retrieves finalized blocks. -**5. Verify the node is syncing:** +> **Note:** The Blockdaemon endpoint currently does not support WebSocket +> connections. The node will log retry warnings for this endpoint but still +> syncs correctly via the other two endpoints. HTTP block fetching from +> Blockdaemon works normally. -Wait about 30 seconds, then check the block height: +### Verify operation + +After starting both the consensus and execution layer, wait about 30 seconds. +Then, check the latest block height: ```sh curl -s -X POST http://localhost:8545 \ -H "Content-Type: application/json" \ - -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' + -d '{ "jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}' ``` -The `result` field should be a hex block number that increases over time. If it stays at `0x0`, check the Consensus Layer logs for connection errors. +The produced output is in JSON format. +The `result` field represents the next block height, in hexadecimal +(you can use `printf "%0d"` to translate it into decimal). +It should increase over time. +If it remains `0x0`, check the logs of the consensus layer for errors. + +> Notice that this command queries the execution layer's HTTP server offering +> a local JSON-RPC API. +> If the address and port of the HTTP endpoint are configured differently than +> the above example, adapt the command accordingly. + +## Separated hosts -### EL ↔ CL Communication +The [Quick Start](#quick-start) section describes the setup of the execution +(EL) and consensus (CL) layers running in the same host. +The two processes interact via Inter-Process Communication (IPC), +namely using local sockets to which both processes have read and write access. -The Quick Start above uses IPC sockets, which require EL and CL to run on the same host. If they are on separate hosts, use RPC instead. +To run EL and CL in separated hosts, the two processes must instead interact +using the Remote Procedure Call (RPC) protocol. -**Generate a JWT secret** (one-time setup). The EL and CL use this to authenticate with each other: +### Authentication + +To authenticate the connection between EL and CL, a JSON Web Token (JWT) is employed: ```sh -openssl rand -hex 32 | tr -d "\n" > ~/.arc/jwtsecret -chmod 600 ~/.arc/jwtsecret +openssl rand -hex 32 | tr -d "\n" > "$ARC_HOME/jwtsecret" +chmod 600 "$ARC_HOME/jwtsecret" ``` +Notice that both hosts must have access to this random token file. +Generate it in one host and securely copy it into the other host. -**EL flags (RPC):** +### Execution layer -Remove the IPC flags and add: +From the [Start execution layer](#start-execution-layer) instructions, two changes are required: +1. Remove all flags related to IPC communication: `--ipcpath`, `--auth-ipc`, `--auth-ipc.path`; +2. Add the following parameters to configure the RPC interaction: ```sh ---authrpc.addr 0.0.0.0 \ ---authrpc.port 8551 \ ---authrpc.jwtsecret ~/.arc/jwtsecret + --authrpc.addr 0.0.0.0 \ + --authrpc.port 8551 \ + --authrpc.jwtsecret "$ARC_HOME/jwtsecret" ``` -> **Security:** When using `--authrpc.addr 0.0.0.0`, restrict access to the Engine API port (8551) using firewall rules or a private network. The Engine API controls block production — do not expose it to the public internet. +**Important:** with this setup, port 8551 is exposed via all network +interfaces (`0.0.0.0`). +Make sure to configure the firewall to restrict the access to this port to the +consensus layer's host. +The Engine API controls block production — do not expose it to the public internet. -**CL flags (RPC):** +### Consensus layer -Remove `--eth-socket` and `--execution-socket`, and add: +From the [Start consensus layer](#start-consensus-layer) instructions, two changes are required: +1. Remove all flags related to IPC communication: `--eth-socket` and `--execution-socket`; +2. Add the following parameters to configure the RPC interaction: ```sh ---eth-rpc-endpoint http://:8545 \ ---execution-endpoint http://:8551 \ ---execution-jwt ~/.arc/jwtsecret + --eth-rpc-endpoint http://$EL_ADDR:8545 \ + --execution-endpoint http://$EL_ADDR:8551 \ + --execution-jwt "$ARC_HOME/jwtsecret" ``` -> IPC and RPC are mutually exclusive. Both processes must have read/write access to the IPC socket directory when using IPC. +Where `EL_ADDR` is the network address (IP or hostname) of the host running the execution layer. + +The `--eth-rpc-endpoint` parameter refers to the EL's HTTP server exposing a +standard and open Ethereum [JSON-RPC API](https://reth.rs/jsonrpc/intro). + +The `--execution-endpoint` parameter should match the EL's `--authrpc` +address and port, exposing the _protected_ RPC endpoint. --- @@ -153,6 +265,23 @@ Remove `--eth-socket` and `--execution-socket`, and add: Check out [reth system requirements](https://reth.rs/run/system-requirements/) for more info on EL configuration. +**Note**: during periods of sustained high load, such as during startup or extended sync if the node is far behind, the execution layer memory may surge on some hardware. This should not be an issue if running with the suggested System Requirements. However, if you do observe this, you can enable backpressure to throttle the pace of execution according to the speed of disk writes, which will constrain memory growth. + +To enable this, the `reth_` namespace should enabled on the **execution layer**: + +```sh +--http.api eth,net,web3,txpool,trace,debug,reth +``` + +And on the **consensus layer** backpressure must be activated: + +```sh +--execution-persistence-backpressure \ +--execution-persistence-backpressure-threshold=10 +``` + +Note: arc-node is alpha software and this performance issue is actively being worked on. + ### Production Deployment For production, run both processes as systemd services. @@ -187,7 +316,6 @@ ExecStart=/usr/local/bin/arc-node-execution node \ --http.port 8545 \ --http.api eth,net,web3,txpool,trace,debug \ --metrics 127.0.0.1:9001 \ - --full \ --enable-arc-rpc \ --rpc.forwarder https://rpc.quicknode.testnet.arc.network/ @@ -225,7 +353,6 @@ ExecStart=/usr/local/bin/arc-node-consensus start \ --eth-socket /run/arc/reth.ipc \ --execution-socket /run/arc/auth.ipc \ --rpc.addr 127.0.0.1:31000 \ - --full \ --follow \ --follow.endpoint https://rpc.drpc.testnet.arc.network,wss=rpc.drpc.testnet.arc.network \ --follow.endpoint https://rpc.quicknode.testnet.arc.network,wss=rpc.quicknode.testnet.arc.network \ @@ -280,3 +407,7 @@ For production monitoring, scrape the Prometheus metrics endpoints with Grafana: |----------|-------------| | `localhost:9001/metrics` | Execution Layer metrics | | `localhost:29000/metrics` | Consensus Layer metrics | + +### Pruning + +The `--full` flag is accepted by both the CL and EL and will enable pruning. However, EL pruning is currently considered unstable and is not recommended at this time.