Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ docker-build: ## 🐳 Build the Docker image
@echo

# 2026-04-29
# NOTE(type1-type2): an attempted bump to anshalshukla/leanSpec@0ab09dd ("dummy
# type 1 and type 2 aggregation with block proofs") was reverted because the
# testing harness in that branch still imports `AttestationSignatures`, which
# the same commit removed β€” the fixture generator fails to load. We stay on
# the canonical commit and skip the affected SSZ-spec and signature-spec test
# cases until the upstream refactor lands together with matching testing-side
# updates.
LEAN_SPEC_COMMIT_HASH:=18fe71fee49f8865a5c8a4cb8b1787b0cbc9e25b

leanSpec:
Expand Down
2 changes: 2 additions & 0 deletions crates/blockchain/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ ethlambda-crypto.workspace = true
ethlambda-metrics.workspace = true
ethlambda-types.workspace = true

libssz.workspace = true

spawned-concurrency.workspace = true

tokio.workspace = true
Expand Down
30 changes: 19 additions & 11 deletions crates/blockchain/src/aggregation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use ethlambda_crypto::aggregate_mixed;
use ethlambda_storage::Store;
use ethlambda_types::{
attestation::{AggregationBits, HashedAttestationData},
block::{AggregatedSignatureProof, ByteListMiB},
block::{ByteListMiB, BytecodeClaim, TypeOneInfo, TypeOneMultiSignature},
primitives::H256,
signature::{ValidatorPublicKey, ValidatorSignature},
state::Validator,
Expand Down Expand Up @@ -65,7 +65,7 @@ pub struct AggregationSnapshot {
/// as a message payload so the store can be updated and gossip publish fired.
pub struct AggregatedGroupOutput {
pub(crate) hashed: HashedAttestationData,
pub(crate) proof: AggregatedSignatureProof,
pub(crate) proof: TypeOneMultiSignature,
pub(crate) participants: Vec<u64>,
pub(crate) keys_to_delete: Vec<(u64, H256)>,
}
Expand Down Expand Up @@ -232,7 +232,7 @@ fn build_job(
/// can't be fully resolved (passing fewer pubkeys than the proof expects would
/// produce an invalid aggregate).
fn resolve_child_pubkeys(
child_proofs: &[AggregatedSignatureProof],
child_proofs: &[TypeOneMultiSignature],
validators: &[Validator],
) -> (Vec<(Vec<ValidatorPublicKey>, ByteListMiB)>, Vec<u64>) {
let mut children = Vec::with_capacity(child_proofs.len());
Expand All @@ -253,7 +253,7 @@ fn resolve_child_pubkeys(
continue;
}
accepted_child_ids.extend(&participant_ids);
children.push((child_pubkeys, proof.proof_data.clone()));
children.push((child_pubkeys, proof.proof.clone()));
}

(children, accepted_child_ids)
Expand Down Expand Up @@ -290,8 +290,16 @@ pub fn aggregate_job(job: AggregationJob) -> Option<AggregatedGroupOutput> {
participants.dedup();

let aggregation_bits = aggregation_bits_from_validator_indices(&participants);
let proof = AggregatedSignatureProof::new(aggregation_bits, proof_data);
metrics::observe_aggregated_proof_size(proof.proof_data.len());
let proof = TypeOneMultiSignature {
info: TypeOneInfo {
message: data_root,
slot: job.slot,
participants: aggregation_bits,
bytecode_claim: BytecodeClaim::ZERO,
},
proof: proof_data,
};
metrics::observe_aggregated_proof_size(proof.proof.len());

Some(AggregatedGroupOutput {
hashed: job.hashed,
Expand Down Expand Up @@ -328,14 +336,14 @@ pub fn finalize_aggregation_session(store: &Store) {
/// no proof adds new coverage. This keeps the number of children minimal
/// while maximizing the validators we can skip re-aggregating from scratch.
fn select_proofs_greedily(
new_proofs: &[AggregatedSignatureProof],
known_proofs: &[AggregatedSignatureProof],
) -> (Vec<AggregatedSignatureProof>, HashSet<u64>) {
let mut selected: Vec<AggregatedSignatureProof> = Vec::new();
new_proofs: &[TypeOneMultiSignature],
known_proofs: &[TypeOneMultiSignature],
) -> (Vec<TypeOneMultiSignature>, HashSet<u64>) {
let mut selected: Vec<TypeOneMultiSignature> = Vec::new();
let mut covered: HashSet<u64> = HashSet::new();

for proof_set in [new_proofs, known_proofs] {
let mut remaining: Vec<&AggregatedSignatureProof> = proof_set.iter().collect();
let mut remaining: Vec<&TypeOneMultiSignature> = proof_set.iter().collect();

while !remaining.is_empty() {
let best_idx = remaining
Expand Down
34 changes: 21 additions & 13 deletions crates/blockchain/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@ use ethlambda_types::{
ShortRoot,
aggregator::AggregatorController,
attestation::{SignedAggregatedAttestation, SignedAttestation},
block::{BlockSignatures, SignedBlock},
block::{ByteListMiB, SignedBlock, TypeOneMultiSignature, TypeTwoMultiSignature},
primitives::{H256, HashTreeRoot as _},
};
use libssz::SszEncode as _;

use crate::aggregation::{
AGGREGATION_DEADLINE, AggregateProduced, AggregationDeadline, AggregationDone,
Expand Down Expand Up @@ -42,10 +43,7 @@ pub const MILLISECONDS_PER_INTERVAL: u64 = 800;
pub const INTERVALS_PER_SLOT: u64 = 5;
/// Milliseconds in a slot (derived from interval duration and count).
pub const MILLISECONDS_PER_SLOT: u64 = MILLISECONDS_PER_INTERVAL * INTERVALS_PER_SLOT;
/// Maximum number of distinct AttestationData entries per block.
///
/// See: leanSpec commit 0c9528a (PR #536).
pub const MAX_ATTESTATIONS_DATA: usize = 16;
pub use ethlambda_types::block::MAX_ATTESTATIONS_DATA;
/// Future-slot tolerance for gossip attestations, expressed in intervals.
///
/// Bounds the clock skew the time check is willing to absorb when admitting a
Expand Down Expand Up @@ -318,7 +316,7 @@ impl BlockChainServer {
let _timing = metrics::time_block_building();

// Build the block with attestation signatures
let Ok((block, attestation_signatures, _post_checkpoints)) =
let Ok((block, type_one_proofs, _post_checkpoints)) =
store::produce_block_with_signatures(&mut self.store, slot, validator_id)
.inspect_err(|err| error!(%slot, %validator_id, %err, "Failed to build block"))
else {
Expand All @@ -337,15 +335,25 @@ impl BlockChainServer {
return;
};

// Assemble SignedBlock
// Assemble SignedBlock: wrap the proposer's XMSS signature as a
// singleton Type-1 and fold every attestation Type-1 plus the
// proposer Type-1 into the block's single merged Type-2 proof.
let proposer_proof_bytes = ByteListMiB::try_from(proposer_signature.to_vec())
.expect("XMSS signature fits in ByteListMiB");
let proposer_t1 = TypeOneMultiSignature::for_proposer(
validator_id,
proposer_proof_bytes,
block_root,
slot,
);
let mut all_proofs = type_one_proofs;
all_proofs.push(proposer_t1);
let merged = TypeTwoMultiSignature::from_type_1s(all_proofs);
let proof_bytes = ByteListMiB::try_from(merged.to_ssz())
.expect("merged Type-2 proof fits in ByteListMiB");
let signed_block = SignedBlock {
message: block,
signature: BlockSignatures {
proposer_signature,
attestation_signatures: attestation_signatures
.try_into()
.expect("attestation signatures within limit"),
},
proof: proof_bytes,
};

// Process the block locally before publishing
Expand Down
Loading