From 036ca3a94d0edbe06818de3837d1a901bd641200 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 16:28:57 +0200 Subject: [PATCH 1/9] refactor(forks): move verify_signatures body into the spec class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moves the full XMSS signature verification logic from SignedBlock.verify_signatures into LstarSpec.verify_signatures. SignedBlock becomes a pure SSZ data container. Internal callers that still hold a Store (notably Store.on_block, which itself moves to the spec class in a follow-up) reach the verification path via a deferred import of LstarSpec to sidestep the spec ↔ store module-load cycle. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../forks/lstar/containers/block/block.py | 95 +------------------ src/lean_spec/forks/lstar/spec.py | 91 +++++++++++++++++- src/lean_spec/forks/lstar/store.py | 7 +- 3 files changed, 94 insertions(+), 99 deletions(-) diff --git a/src/lean_spec/forks/lstar/containers/block/block.py b/src/lean_spec/forks/lstar/containers/block/block.py index b8244008..8880f9c0 100644 --- a/src/lean_spec/forks/lstar/containers/block/block.py +++ b/src/lean_spec/forks/lstar/containers/block/block.py @@ -6,12 +6,8 @@ The proposer is determined by slot assignment. """ -from lean_spec.forks.lstar.containers.validator import Validators -from lean_spec.subspecs.ssz.hash import hash_tree_root -from lean_spec.subspecs.xmss.aggregation import AggregationError from lean_spec.subspecs.xmss.containers import Signature -from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME, GeneralizedXmssScheme -from lean_spec.types import Bytes32, Slot, Uint64, ValidatorIndex +from lean_spec.types import Bytes32, Slot, ValidatorIndex from lean_spec.types.container import Container from .types import ( @@ -92,92 +88,3 @@ class SignedBlock(Container): signature: BlockSignatures """Aggregated signature payload for the block.""" - - def verify_signatures( - self, - validators: Validators, - scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, - ) -> bool: - """ - Verify all XMSS signatures in this signed block. - - Checks that: - - - Each body attestation is signed by participating validators - - The proposer signed the block root with the proposal key - - Args: - validators: Validator registry providing public keys for verification. - scheme: XMSS signature scheme for verification. - - Returns: - True if all signatures are valid. - - Raises: - AssertionError: On verification failure. - """ - block = self.block - signatures = self.signature - aggregated_attestations = self.block.body.attestations - attestation_signatures = signatures.attestation_signatures - - # Each attestation in the body must have a corresponding signature entry. - assert len(aggregated_attestations) == len(attestation_signatures), ( - "Attestation signature groups must align with block body attestations" - ) - - # Attestations and signatures are parallel arrays. - # - Each attestation says "validators X, Y, Z voted for this data". - # - Each signature proves those validators actually signed. - for aggregated_attestation, aggregated_signature in zip( - aggregated_attestations, attestation_signatures, strict=True - ): - # Extract which validators participated in this attestation. - # The aggregation bits encode validator indices as a bitfield. - validator_ids = aggregated_attestation.aggregation_bits.to_validator_indices() - - # The signed message is the attestation data root. - # All validators in this group signed this exact data. - attestation_data_root = hash_tree_root(aggregated_attestation.data) - - for validator_id in validator_ids: - num_validators = Uint64(len(validators)) - assert validator_id.is_valid(num_validators), "Validator index out of range" - - # Collect attestation public keys for all participating validators. - # Order matters: must match the order in the aggregated signature. - public_keys = [validators[vid].get_attestation_pubkey() for vid in validator_ids] - - try: - aggregated_signature.verify( - public_keys=public_keys, - message=attestation_data_root, - slot=aggregated_attestation.data.slot, - ) - except AggregationError as exc: - raise AssertionError( - f"Attestation aggregated signature verification failed: {exc}" - ) from exc - - # Verify the proposer's signature over the block root. - # - # The proposer signs hash_tree_root(block) with their proposal key. - # This proves the proposer endorsed this specific block. - proposer_index = block.proposer_index - assert proposer_index.is_valid(Uint64(len(validators))), "Proposer index out of range" - - proposer = validators[proposer_index] - block_root = hash_tree_root(block) - - try: - valid = scheme.verify( - proposer.get_proposal_pubkey(), - block.slot, - block_root, - signatures.proposer_signature, - ) - except (ValueError, IndexError): - valid = False - assert valid, "Proposer block signature verification failed" - - return True diff --git a/src/lean_spec/forks/lstar/spec.py b/src/lean_spec/forks/lstar/spec.py index c4c377b2..16cb27da 100644 --- a/src/lean_spec/forks/lstar/spec.py +++ b/src/lean_spec/forks/lstar/spec.py @@ -25,9 +25,10 @@ from lean_spec.forks.lstar.containers.state import State from lean_spec.forks.lstar.containers.validator import Validators from lean_spec.subspecs.chain.clock import Interval -from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof +from lean_spec.subspecs.ssz.hash import hash_tree_root +from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, AggregationError from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME, GeneralizedXmssScheme -from lean_spec.types import Bytes32, Slot, ValidatorIndex +from lean_spec.types import Bytes32, Slot, Uint64, ValidatorIndex from ..protocol import ForkProtocol, SpecStateType from .store import Store @@ -127,8 +128,90 @@ def verify_signatures( validators: Validators, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> bool: - """Check that every signature carried by a signed block is valid.""" - return signed_block.verify_signatures(validators, scheme) + """ + Verify all XMSS signatures in this signed block. + + Checks that: + + - Each body attestation is signed by participating validators + - The proposer signed the block root with the proposal key + + Args: + signed_block: The signed block whose signatures are checked. + validators: Validator registry providing public keys for verification. + scheme: XMSS signature scheme for verification. + + Returns: + True if all signatures are valid. + + Raises: + AssertionError: On verification failure. + """ + block = signed_block.block + signatures = signed_block.signature + aggregated_attestations = block.body.attestations + attestation_signatures = signatures.attestation_signatures + + # Each attestation in the body must have a corresponding signature entry. + assert len(aggregated_attestations) == len(attestation_signatures), ( + "Attestation signature groups must align with block body attestations" + ) + + # Attestations and signatures are parallel arrays. + # - Each attestation says "validators X, Y, Z voted for this data". + # - Each signature proves those validators actually signed. + for aggregated_attestation, aggregated_signature in zip( + aggregated_attestations, attestation_signatures, strict=True + ): + # Extract which validators participated in this attestation. + # The aggregation bits encode validator indices as a bitfield. + validator_ids = aggregated_attestation.aggregation_bits.to_validator_indices() + + # The signed message is the attestation data root. + # All validators in this group signed this exact data. + attestation_data_root = hash_tree_root(aggregated_attestation.data) + + for validator_id in validator_ids: + num_validators = Uint64(len(validators)) + assert validator_id.is_valid(num_validators), "Validator index out of range" + + # Collect attestation public keys for all participating validators. + # Order matters: must match the order in the aggregated signature. + public_keys = [validators[vid].get_attestation_pubkey() for vid in validator_ids] + + try: + aggregated_signature.verify( + public_keys=public_keys, + message=attestation_data_root, + slot=aggregated_attestation.data.slot, + ) + except AggregationError as exc: + raise AssertionError( + f"Attestation aggregated signature verification failed: {exc}" + ) from exc + + # Verify the proposer's signature over the block root. + # + # The proposer signs hash_tree_root(block) with their proposal key. + # This proves the proposer endorsed this specific block. + proposer_index = block.proposer_index + assert proposer_index.is_valid(Uint64(len(validators))), "Proposer index out of range" + + proposer = validators[proposer_index] + block_root = hash_tree_root(block) + + try: + valid = scheme.verify( + proposer.get_proposal_pubkey(), + block.slot, + block_root, + signatures.proposer_signature, + ) + except (ValueError, IndexError): + valid = False + assert valid, "Proposer block signature verification failed" + + return True def on_block( self, diff --git a/src/lean_spec/forks/lstar/store.py b/src/lean_spec/forks/lstar/store.py index fab5b2a5..36fe53bf 100644 --- a/src/lean_spec/forks/lstar/store.py +++ b/src/lean_spec/forks/lstar/store.py @@ -516,7 +516,12 @@ def on_block( ) # Validate cryptographic signatures - valid_signatures = signed_block.verify_signatures(parent_state.validators, scheme) + # Deferred import breaks the spec ↔ store cycle. + from lean_spec.forks.lstar.spec import LstarSpec + + valid_signatures = LstarSpec().verify_signatures( + signed_block, parent_state.validators, scheme + ) # Execute state transition function to compute post-block state post_state = parent_state.state_transition(block, valid_signatures) From 6d05f1b5f781210c443f7a82ff9031ce30477e2b Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 16:52:32 +0200 Subject: [PATCH 2/9] refactor(forks): move State and Store bodies into the spec class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migrates every State and Store method body into LstarSpec. Containers (State, Store, SignedBlock) become thin Pydantic data classes whose methods are one-line forwarders to the active fork spec, reached via a deferred import that breaks the spec ↔ container module-load cycle. Inside the moved bodies, every literal Block(...), BlockBody(...), BlockHeader(...), Config(...), AggregatedAttestations(...), and other container constructor is now self._class(...). An inheriting fork that swaps a single container type therefore receives the parent fork's logic for free. Observability hooks (observe_state_transition, observe_on_block, observe_on_attestation) ride along with the bodies, preserving the metrics surface. The obsolete delegator-forwarding test file is removed; behavioural coverage now lives in the existing state-transition, fork-choice, and block-production test suites. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../forks/lstar/containers/state/state.py | 735 +------ src/lean_spec/forks/lstar/spec.py | 1726 ++++++++++++++++- src/lean_spec/forks/lstar/store.py | 1194 +----------- .../forks/test_lstar_spec_delegators.py | 235 --- 4 files changed, 1765 insertions(+), 2125 deletions(-) delete mode 100644 tests/lean_spec/forks/test_lstar_spec_delegators.py diff --git a/src/lean_spec/forks/lstar/containers/state/state.py b/src/lean_spec/forks/lstar/containers/state/state.py index e6d6fff5..624a3443 100644 --- a/src/lean_spec/forks/lstar/containers/state/state.py +++ b/src/lean_spec/forks/lstar/containers/state/state.py @@ -6,8 +6,7 @@ from collections.abc import Set as AbstractSet from lean_spec.forks.lstar.containers.attestation import AggregatedAttestation, AttestationData -from lean_spec.forks.lstar.containers.block import Block, BlockBody, BlockHeader -from lean_spec.forks.lstar.containers.block.types import AggregatedAttestations +from lean_spec.forks.lstar.containers.block import Block, BlockHeader from lean_spec.forks.lstar.containers.config import Config from lean_spec.forks.lstar.containers.state.types import ( HistoricalBlockHashes, @@ -16,21 +15,20 @@ JustifiedSlots, ) from lean_spec.forks.lstar.containers.validator import Validators -from lean_spec.subspecs.chain.config import MAX_ATTESTATIONS_DATA -from lean_spec.subspecs.observability import observe_state_transition -from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof -from lean_spec.types import ( - ZERO_HASH, - Boolean, - Bytes32, - Checkpoint, - Container, - Slot, - Uint8, - Uint64, - ValidatorIndex, -) +from lean_spec.types import Bytes32, Checkpoint, Container, Slot, Uint64, ValidatorIndex + +_LAZY_SPEC: object = None + + +def _spec() -> object: + """Return the lstar fork spec; deferred import breaks the spec ↔ state cycle.""" + global _LAZY_SPEC + if _LAZY_SPEC is None: + from lean_spec.forks.lstar.spec import LstarSpec + + _LAZY_SPEC = LstarSpec() + return _LAZY_SPEC class State(Container): @@ -73,560 +71,33 @@ class State(Container): @classmethod def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> State: - """ - Generate a genesis state with empty history and proper initial values. - - Args: - genesis_time: The genesis timestamp. - validators: The list of validators in the genesis state. - - Returns: - A properly initialized genesis state. - """ - # Configure the genesis state. - genesis_config = Config( - genesis_time=genesis_time, - ) - - # Build the genesis block header for the state. - genesis_header = BlockHeader( - slot=Slot(0), - proposer_index=ValidatorIndex(0), - parent_root=Bytes32.zero(), - state_root=Bytes32.zero(), - body_root=hash_tree_root(BlockBody(attestations=AggregatedAttestations(data=[]))), - ) - - # Assemble and return the full genesis state. - return cls( - config=genesis_config, - slot=Slot(0), - latest_block_header=genesis_header, - latest_justified=Checkpoint(root=Bytes32.zero(), slot=Slot(0)), - latest_finalized=Checkpoint(root=Bytes32.zero(), slot=Slot(0)), - historical_block_hashes=HistoricalBlockHashes(data=[]), - justified_slots=JustifiedSlots(data=[]), - validators=validators, - justifications_roots=JustificationRoots(data=[]), - justifications_validators=JustificationValidators(data=[]), - ) + """Generate a genesis state with empty history and proper initial values.""" + return _spec().generate_genesis(genesis_time, validators) # type: ignore[attr-defined] def process_slots(self, target_slot: Slot) -> State: - """ - Advance the state through empty slots up to, but not including, target_slot. - - The loop: - - Performs per-slot maintenance (e.g., state root caching). - - Increments the slot counter after each call. - The function returns a new state with slot == target_slot. - - Args: - target_slot: The slot to reach by processing empty slots. - - Returns: - A new state that has progressed to target_slot. - - Raises: - AssertionError: If target_slot is not in the future. - """ - # The target must be strictly greater than the current slot. - assert self.slot < target_slot, "Target slot must be in the future" - - # Work on a local variable. Do not mutate self. - state = self - - # Step through each missing slot. - while state.slot < target_slot: - # Per-Slot Housekeeping & Slot Increment - # - # This single statement performs two tasks for each empty slot - # in a single, immutable update: - # - # 1. State Root Caching (Conditional): - # It checks if the latest block header has an empty state root. - # This is true only for the *first* empty slot immediately - # following a block. - # - # - If it is empty, we must cache the pre-block state root - # (the hash of the state *before* this slot increment) into that - # header. We do this by: - # a) Computing the root of the current (pre-block) state. - # b) Creating a *new* header object with this computed state root - # to be included in the update. - # - # - If the state root is *not* empty, it means we are in a - # sequence of empty slots, and we simply use the existing header. - # - # 2. Slot Increment: - # It always increments the slot number by one. - needs_state_root = state.latest_block_header.state_root == Bytes32.zero() - cached_state_root = ( - hash_tree_root(state) if needs_state_root else state.latest_block_header.state_root - ) - - state = state.model_copy( - update={ - "latest_block_header": ( - state.latest_block_header.model_copy( - update={"state_root": cached_state_root} - ) - if needs_state_root - else state.latest_block_header - ), - "slot": Slot(state.slot + Slot(1)), - } - ) - - # Reached the target slot. Return the advanced state. - return state + """Advance the state through empty slots up to, but not including, target_slot.""" + return _spec().process_slots(self, target_slot) # type: ignore[attr-defined] def process_block_header(self, block: Block) -> State: - """ - Validate the block header and update header-linked state. - - Checks: - - The block slot equals the current state slot. - - The block slot is newer than the latest header slot. - - The proposer index matches the round-robin selection. - - The parent root matches the hash of the latest block header. - - Updates: - - For the first post-genesis block, mark genesis as justified/finalized. - - Append the parent root to historical hashes. - - Append the justified bit for the parent (true only for genesis). - - Insert ZERO_HASH entries for any skipped empty slots. - - Set latest_block_header for the new block with an empty state_root. - - Args: - block: The block whose header is being processed. - - Returns: - A new state with header-related fields updated. - - Raises: - AssertionError: If any header check fails. - """ - # Validation - # - # - Retrieve the header of the previous block (the parent). - # - Compute the parent root hash. - parent_header = self.latest_block_header - parent_root = hash_tree_root(parent_header) - - # Consensus checks - - # Verify the block corresponds to the current state slot. - # - # To move to this slot, we have processed any intermediate slots before. - assert block.slot == self.slot, "Block slot mismatch" - - # The block must be newer than the current latest header. - assert block.slot > parent_header.slot, "Block is older than latest header" - - # Verify the block proposer. - # - # Ensures the block was proposed by the assigned validator for this round. - assert block.proposer_index.is_proposer_for( - slot=self.slot, - num_validators=Uint64(len(self.validators)), - ), "Incorrect block proposer" - - # Verify the chain link. - # - # The block must cryptographically point to the known parent. - assert block.parent_root == parent_root, "Block parent root mismatch" - - # Checkpoint Updates - - # Detect if we are transitioning from the genesis block. - # - # This flag is True only when processing the very first block of the chain. - # This means the parent is the Genesis block (Slot 0). - is_genesis_parent = parent_header.slot == Slot(0) - - # Update the consensus checkpoints. - # - # This logic acts as the trust anchor for the chain: - # - # - If the parent is the Genesis block: It cannot receive votes as it - # precedes the start of the chain. Therefore, we explicitly force it - # to be Justified and Finalized immediately. - # - # - For all other blocks: We retain the existing checkpoints. Future - # updates rely entirely on validator attestations which are processed - # later in the block body. - if is_genesis_parent: - new_latest_justified = self.latest_justified.model_copy(update={"root": parent_root}) - new_latest_finalized = self.latest_finalized.model_copy(update={"root": parent_root}) - else: - new_latest_justified = self.latest_justified - new_latest_finalized = self.latest_finalized - - # Historical Data Management - - # Calculate the gap between the parent and the current block. - # - # If slots were skipped (missed proposals), we must record them. - # - # Formula: (Current - Parent - 1). Adjacent blocks have a gap of 0. - num_empty_slots = int(block.slot - parent_header.slot - Slot(1)) - - # Update the list of historical block roots. - # - # Structure: [Existing history] + [Parent root] + [Zero hash for gaps] - new_historical_hashes_data = ( - self.historical_block_hashes + [parent_root] + [ZERO_HASH] * num_empty_slots - ) - - # Update the list of justified slot flags. - # - # IMPORTANT: This list is stored relative to the finalized boundary. - # - # The first entry corresponds to the slot immediately following the - # latest finalized checkpoint. - # - # Here, we extend the storage capacity to ensure the range from the - # finalized boundary up to the last materialized slot is fully tracked - # and addressable. The current block's slot is not materialized until - # its header is fully processed, so we stop at slot (block.slot - 1). - last_materialized_slot = block.slot - Slot(1) - new_justified_slots_data = self.justified_slots.extend_to_slot( - self.latest_finalized.slot, - last_materialized_slot, - ) - - # Construct the new latest block header. - # - # The new header object represents the tip of the chain. - # - # Leave state root empty. - # It is not computed until the block body is fully processed or the next slot begins. - new_header = BlockHeader( - slot=block.slot, - proposer_index=block.proposer_index, - parent_root=block.parent_root, - body_root=hash_tree_root(block.body), - state_root=Bytes32.zero(), - ) - - # Final Immutable Copy - # - # Return a new immutable state instance. - # All calculated updates are applied atomically here. - return self.model_copy( - update={ - "latest_justified": new_latest_justified, - "latest_finalized": new_latest_finalized, - "historical_block_hashes": new_historical_hashes_data, - "justified_slots": new_justified_slots_data, - "latest_block_header": new_header, - } - ) + """Validate the block header and update header-linked state.""" + return _spec().process_block_header(self, block) # type: ignore[attr-defined] def process_block(self, block: Block) -> State: - """ - Apply full block processing including header and body. - - Args: - block: The block to process. - - Returns: - A new state with the processed block. - - Raises: - AssertionError: If block contains duplicate aggregated attestations - with no unique participant. - """ - # First process the block header. - state = self.process_block_header(block) - - return state.process_attestations(block.body.attestations) + """Apply full block processing including header and body.""" + return _spec().process_block(self, block) # type: ignore[attr-defined] def process_attestations( self, attestations: Iterable[AggregatedAttestation], ) -> State: - """ - Apply attestations and update justification/finalization - according to the Lean Consensus 3SF-mini rules. - - This simplified consensus mechanism: - 1. Processes each attestation - 2. Updates justified status for target checkpoints - 3. Applies finalization rules based on justified status - - Args: - attestations: The aggregated attestations to process. - - Returns: - A new state with updated justification/finalization. - """ - # Reconstruct the vote-tracking structure - # - # The state stores justification data in a compact SSZ layout: - # - # - A list of block roots that are currently being tracked. - # - One long flat list containing validator vote flags. - # - # For each tracked block, there is a consecutive segment of vote flags. - # Every segment has the same length: the number of validators. - # - # Conceptually, we want to recover a more natural view: - # - # "For each block root, here is the list of votes from all validators." - # - # We rebuild this intuitive structure by slicing the flat vote list back - # into its individual segments. Each slice corresponds to one tracked block. - # - # This gives us a mapping: - # - # (block root) → [vote flags for validators 0..N-1] - # - # which makes the rest of the logic easier to express and understand. - assert not any(root == ZERO_HASH for root in self.justifications_roots), ( - "zero hash is not allowed in justifications roots" - ) - justifications = ( - { - root: self.justifications_validators[ - i * len(self.validators) : (i + 1) * len(self.validators) - ] - for i, root in enumerate(self.justifications_roots) - } - if self.justifications_roots - else {} - ) - - # Track state changes to be applied at the end - latest_justified = self.latest_justified - latest_finalized = self.latest_finalized - finalized_slot = latest_finalized.slot - justified_slots = self.justified_slots - - # Map roots to their latest slot for pruning. - # - # Votes for zero hash are ignored, so we only need the most recent slot - # where a root appears to decide whether it is still unfinalized. - start_slot = int(finalized_slot) + 1 - root_to_slot: dict[Bytes32, Slot] = {} - for i in range(start_slot, len(self.historical_block_hashes)): - root_to_slot[self.historical_block_hashes[i]] = Slot(i) - - # Process each attestation independently. - # - # Every attestation is a claim: - # "I vote to extend the chain from SOURCE to TARGET." - # - # The rules below filter out invalid or irrelevant votes. - for attestation in attestations: - source = attestation.data.source - target = attestation.data.target - - # Check that the source is already trusted. - # - # A vote may only originate from a point in history that is already justified. - # A source that lacks existing justification cannot be used to anchor a new vote. - if not justified_slots.is_slot_justified(finalized_slot, source.slot): - continue - - # Ignore votes for targets that have already reached consensus. - # - # If a block is already justified, additional votes do not change anything. - # We simply skip them. - if justified_slots.is_slot_justified(finalized_slot, target.slot): - continue - - # Ignore votes that reference zero-hash slots. - if source.root == ZERO_HASH or target.root == ZERO_HASH: - continue - - # Ensure the vote refers to blocks that actually exist on our chain. - # - # The attestation must match our canonical chain. - # Both the source root and target root must equal the recorded block roots - # stored for those slots in history. - # - # This prevents votes about unknown or conflicting forks. - source_slot_int = int(source.slot) - target_slot_int = int(target.slot) - source_matches = ( - source.root == self.historical_block_hashes[source_slot_int] - if source_slot_int < len(self.historical_block_hashes) - else False - ) - target_matches = ( - target.root == self.historical_block_hashes[target_slot_int] - if target_slot_int < len(self.historical_block_hashes) - else False - ) - - if not source_matches or not target_matches: - continue - - # Ensure time flows forward. - # - # A target must always lie strictly after its source slot. - # Otherwise the vote makes no chronological sense. - if target.slot <= source.slot: - continue - - # Ensure the target falls on a slot that can be justified after the finalized one. - # - # In 3SF-mini, justification does not advance freely through time. - # - # Only certain positions beyond the finalized slot are allowed to - # receive new votes. These positions form a small, structured set: - # - # - the immediate steps right after finalization, - # - the square-number distances, - # - and the pronic-number distances. - # - # Any target outside this pattern is not eligible for justification, - # so votes for it are simply ignored. - if not target.slot.is_justifiable_after(finalized_slot): - continue - - # Record the vote. - # - # If this is the first vote for the target block, create a fresh tally sheet: - # - one boolean per validator, all initially False. - if target.root not in justifications: - justifications[target.root] = [Boolean(False)] * len(self.validators) - - # Mark that each validator in this aggregation has voted for the target. - # - # A vote is represented as a boolean flag. - # If it was previously absent, flip it to True. - for validator_id in attestation.aggregation_bits.to_validator_indices(): - if not justifications[target.root][validator_id]: - justifications[target.root][validator_id] = Boolean(True) - - # Check whether the vote count crosses the supermajority threshold. - # - # A block becomes justified when at least two-thirds of validators - # have voted for it. - # - # We compare integers to avoid floating-point division: - # - # 3 * (number of votes) ≥ 2 * (total validators) - count = sum(bool(justified) for justified in justifications[target.root]) - - if 3 * count >= (2 * len(self.validators)): - # The block becomes justified - # - # The chain now considers this block part of its safe head. - latest_justified = target - justified_slots = justified_slots.with_justified( - finalized_slot, - target.slot, - Boolean(True), - ) - - # There is no longer any need to track individual votes for this block. - del justifications[target.root] - - # Consider whether finalization can advance - # - # Finalization requires a continuous chain of trust from the - # previously finalized checkpoint up to the new justified point. - # - # If every slot in between is justifiable relative to the old - # finalized point, then the earlier source checkpoint becomes finalized. - # - # In short: - # - # If there is no break in the chain, advance finalization. - if not any( - Slot(slot).is_justifiable_after(finalized_slot) - for slot in range(source.slot + Slot(1), target.slot) - ): - old_finalized_slot = finalized_slot - latest_finalized = source - finalized_slot = latest_finalized.slot - - # Rebase/prune justification tracking across the new finalized boundary. - # - # The state stores justified slot flags starting at (finalized_slot + 1), - # so when finalization advances by `delta`, we drop the first `delta` bits. - # - # We also prune any pending justifications whose latest slot - # is now finalized (latest <= finalized_slot). - delta = int(finalized_slot - old_finalized_slot) - if delta > 0: - justified_slots = justified_slots.shift_window(delta) - assert all(root in root_to_slot for root in justifications), ( - "Justification root missing from root_to_slot" - ) - justifications = { - root: votes - for root, votes in justifications.items() - if root_to_slot[root] > finalized_slot - } - - # Convert the vote structure back into SSZ format - # - # Internally, we used a mapping: - # - # block root → list of votes - # - # SSZ requires: - # - # - a sorted list of block roots - # - a single flat list of votes (all roots concatenated in sorted order) - # - # Sorting ensures that every node produces identical state representation. - sorted_roots = sorted(justifications.keys()) - - # Construct and return the updated state - return self.model_copy( - update={ - "justifications_roots": JustificationRoots(data=sorted_roots), - "justifications_validators": JustificationValidators( - data=[vote for root in sorted_roots for vote in justifications[root]] - ), - "justified_slots": justified_slots, - "latest_justified": latest_justified, - "latest_finalized": latest_finalized, - } - ) + """Apply attestations and update justification/finalization.""" + return _spec().process_attestations(self, attestations) # type: ignore[attr-defined] def state_transition(self, block: Block, valid_signatures: bool = True) -> State: - """ - Apply the complete state transition function for a block. - - This method represents the full state transition function: - 1. Validate signatures if required - 2. Process slots up to the block's slot - 3. Process the block header and body - 4. Validate the computed state root - - Args: - block: The block to apply to the state. - valid_signatures: Whether to validate block signatures. Defaults to True. - - Returns: - A new state after applying the block. - - Raises: - AssertionError: If signature validation fails or state root is invalid. - """ - # Validate signatures if required - if not valid_signatures: - raise AssertionError("Block signatures must be valid") - - with observe_state_transition(): - # First, process any intermediate slots. - state = self.process_slots(block.slot) - - # Process the block itself. - new_state = state.process_block(block) - - # Validate that the block's state root matches the computed state - computed_state_root = hash_tree_root(new_state) - if block.state_root != computed_state_root: - raise AssertionError("Invalid block state root") - - return new_state + """Apply the complete state transition function for a block.""" + return _spec().state_transition( # type: ignore[attr-defined] + self, block, valid_signatures + ) def build_block( self, @@ -636,152 +107,12 @@ def build_block( known_block_roots: AbstractSet[Bytes32], aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] | None = None, ) -> tuple[Block, State, list[AggregatedAttestation], list[AggregatedSignatureProof]]: - """ - Build a valid block on top of this state. - - Computes the post-state and creates a block with the correct state root. - - Uses a fixed-point algorithm: finds attestation_data entries whose source - matches the current justified checkpoint, greedily selects proofs maximizing - new validator coverage, then applies the STF. If justification advances, - repeats with the new checkpoint. - - Args: - slot: Target slot for the block. - proposer_index: Validator index of the proposer. - parent_root: Root of the parent block. - known_block_roots: Set of known block roots for attestation validation. - aggregated_payloads: Aggregated signature payloads keyed by attestation data. - - Returns: - Tuple of (Block, post-State, collected attestations, signatures). - """ - aggregated_attestations: list[AggregatedAttestation] = [] - aggregated_signatures: list[AggregatedSignatureProof] = [] - - if aggregated_payloads: - # Fixed-point loop: find attestation_data entries matching the current - # justified checkpoint and greedily select proofs. Processing attestations - # may advance justification, unlocking more entries. - # When building on top of genesis (slot 0), process_block_header - # updates the justified root to parent_root. Apply the same - # derivation here so attestation sources match. - if self.latest_block_header.slot == Slot(0): - current_justified = self.latest_justified.model_copy(update={"root": parent_root}) - else: - current_justified = self.latest_justified - - processed_att_data: set[AttestationData] = set() - - while True: - found_entries = False - - for att_data, proofs in sorted( - aggregated_payloads.items(), key=lambda item: item[0].target.slot - ): - if ( - Uint8(len(processed_att_data)) >= MAX_ATTESTATIONS_DATA - and att_data not in processed_att_data - ): - break - - if att_data.head.root not in known_block_roots: - continue - - if att_data.source != current_justified: - continue - - if att_data in processed_att_data: - continue - processed_att_data.add(att_data) - - found_entries = True - - selected, _ = AggregatedSignatureProof.select_greedily(proofs) - aggregated_signatures.extend(selected) - for proof in selected: - aggregated_attestations.append( - AggregatedAttestation( - aggregation_bits=proof.participants, - data=att_data, - ) - ) - - if not found_entries: - break - - # Build candidate block and check if justification changed. - candidate_block = Block( - slot=slot, - proposer_index=proposer_index, - parent_root=parent_root, - state_root=Bytes32.zero(), - body=BlockBody( - attestations=AggregatedAttestations(data=list(aggregated_attestations)) - ), - ) - post_state = self.process_slots(slot).process_block(candidate_block) - - if post_state.latest_justified != current_justified: - current_justified = post_state.latest_justified - continue - - break - - # Compact: merge all proofs sharing the same AttestationData into one - # using recursive children aggregation. - # - # During the fixed-point loop above, multiple proofs may have been - # selected for the same AttestationData across iterations. Group them - # and merge each group into a single recursive proof. - proof_groups: dict[AttestationData, list[AggregatedSignatureProof]] = {} - for att, sig in zip(aggregated_attestations, aggregated_signatures, strict=True): - proof_groups.setdefault(att.data, []).append(sig) - - aggregated_attestations = [] - aggregated_signatures = [] - for att_data, proofs in proof_groups.items(): - if len(proofs) == 1: - sig = proofs[0] - else: - # Multiple proofs for the same data were aggregated separately. - # Merge them into one recursive proof using children-only - # aggregation (no new raw signatures). - children = [ - ( - proof, - [ - self.validators[vid].get_attestation_pubkey() - for vid in proof.participants.to_validator_indices() - ], - ) - for proof in proofs - ] - sig = AggregatedSignatureProof.aggregate( - xmss_participants=None, - children=children, - raw_xmss=[], - message=hash_tree_root(att_data), - slot=att_data.slot, - ) - aggregated_signatures.append(sig) - aggregated_attestations.append( - AggregatedAttestation(aggregation_bits=sig.participants, data=att_data) - ) - - # Create the final block with selected attestations. - final_block = Block( + """Build a valid block on top of this state.""" + return _spec().build_block( # type: ignore[attr-defined] + self, slot=slot, proposer_index=proposer_index, parent_root=parent_root, - state_root=Bytes32.zero(), - body=BlockBody( - attestations=AggregatedAttestations(data=aggregated_attestations), - ), + known_block_roots=known_block_roots, + aggregated_payloads=aggregated_payloads, ) - - # Recompute state from the final block. - post_state = self.process_slots(slot).process_block(final_block) - final_block = final_block.model_copy(update={"state_root": hash_tree_root(post_state)}) - - return final_block, post_state, aggregated_attestations, aggregated_signatures diff --git a/src/lean_spec/forks/lstar/spec.py b/src/lean_spec/forks/lstar/spec.py index 16cb27da..11509994 100644 --- a/src/lean_spec/forks/lstar/spec.py +++ b/src/lean_spec/forks/lstar/spec.py @@ -1,8 +1,9 @@ """Lstar fork — identity and construction facade.""" +from collections import defaultdict from collections.abc import Iterable from collections.abc import Set as AbstractSet -from typing import ClassVar +from typing import Any, ClassVar, cast from lean_spec.forks.lstar.containers import ( AggregatedAttestation, @@ -23,15 +24,43 @@ AttestationSignatures, ) from lean_spec.forks.lstar.containers.state import State +from lean_spec.forks.lstar.containers.state.types import ( + HistoricalBlockHashes, + JustificationRoots, + JustificationValidators, + JustifiedSlots, +) from lean_spec.forks.lstar.containers.validator import Validators from lean_spec.subspecs.chain.clock import Interval +from lean_spec.subspecs.chain.config import ( + GOSSIP_DISPARITY_INTERVALS, + INTERVALS_PER_SLOT, + JUSTIFICATION_LOOKBACK_SLOTS, + MAX_ATTESTATIONS_DATA, +) +from lean_spec.subspecs.observability import ( + observe_on_attestation, + observe_on_block, + observe_state_transition, +) from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof, AggregationError from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME, GeneralizedXmssScheme -from lean_spec.types import Bytes32, Slot, Uint64, ValidatorIndex +from lean_spec.types import ( + ZERO_HASH, + Boolean, + Bytes32, + Checkpoint, + Slot, + SSZList, + Uint8, + Uint64, + ValidatorIndex, + ValidatorIndices, +) -from ..protocol import ForkProtocol, SpecStateType -from .store import Store +from ..protocol import ForkProtocol, SpecBlockType, SpecStateType, SpecStoreType +from .store import AttestationSignatureEntry, Store class LstarSpec(ForkProtocol): @@ -75,34 +104,528 @@ def upgrade_state(self, state: SpecStateType) -> State: assert isinstance(state, State) return state - def state_transition( - self, - state: State, - block: Block, - valid_signatures: bool = True, - ) -> State: - """Compute the post-state obtained by applying a block to a pre-state.""" - return state.state_transition(block, valid_signatures) + def generate_genesis(self, genesis_time: Uint64, validators: SSZList[Any]) -> State: + """Generate a genesis state with empty history and proper initial values.""" + assert isinstance(validators, Validators) + + # Configure the genesis state. + genesis_config = self.config_class( + genesis_time=genesis_time, + ) + + # Build the genesis block header for the state. + genesis_header = self.block_header_class( + slot=Slot(0), + proposer_index=ValidatorIndex(0), + parent_root=Bytes32.zero(), + state_root=Bytes32.zero(), + body_root=hash_tree_root( + self.block_body_class(attestations=self.aggregated_attestations_class(data=[])) + ), + ) + + # Assemble and return the full genesis state. + return self.state_class( + config=genesis_config, + slot=Slot(0), + latest_block_header=genesis_header, + latest_justified=Checkpoint(root=Bytes32.zero(), slot=Slot(0)), + latest_finalized=Checkpoint(root=Bytes32.zero(), slot=Slot(0)), + historical_block_hashes=HistoricalBlockHashes(data=[]), + justified_slots=JustifiedSlots(data=[]), + validators=validators, + justifications_roots=JustificationRoots(data=[]), + justifications_validators=JustificationValidators(data=[]), + ) def process_slots(self, state: State, target_slot: Slot) -> State: - """Advance the state through empty slots up to a target slot.""" - return state.process_slots(target_slot) + """ + Advance the state through empty slots up to, but not including, target_slot. - def process_block(self, state: State, block: Block) -> State: - """Apply a full block (header and body) to the state.""" - return state.process_block(block) + The loop: + - Performs per-slot maintenance (e.g., state root caching). + - Increments the slot counter after each call. + The function returns a new state with slot == target_slot. + + Raises: + AssertionError: If target_slot is not in the future. + """ + # The target must be strictly greater than the current slot. + assert state.slot < target_slot, "Target slot must be in the future" + + # Step through each missing slot. + while state.slot < target_slot: + # Per-Slot Housekeeping & Slot Increment + # + # This single statement performs two tasks for each empty slot + # in a single, immutable update: + # + # 1. State Root Caching (Conditional): + # It checks if the latest block header has an empty state root. + # This is true only for the *first* empty slot immediately + # following a block. + # + # - If it is empty, we must cache the pre-block state root + # (the hash of the state *before* this slot increment) into that + # header. We do this by: + # a) Computing the root of the current (pre-block) state. + # b) Creating a *new* header object with this computed state root + # to be included in the update. + # + # - If the state root is *not* empty, it means we are in a + # sequence of empty slots, and we simply use the existing header. + # + # 2. Slot Increment: + # It always increments the slot number by one. + needs_state_root = state.latest_block_header.state_root == Bytes32.zero() + cached_state_root = ( + hash_tree_root(state) if needs_state_root else state.latest_block_header.state_root + ) + + state = state.model_copy( + update={ + "latest_block_header": ( + state.latest_block_header.model_copy( + update={"state_root": cached_state_root} + ) + if needs_state_root + else state.latest_block_header + ), + "slot": Slot(state.slot + Slot(1)), + } + ) + + # Reached the target slot. Return the advanced state. + return state def process_block_header(self, state: State, block: Block) -> State: - """Apply only the header portion of a block to the state.""" - return state.process_block_header(block) + """ + Validate the block header and update header-linked state. + + Checks: + - The block slot equals the current state slot. + - The block slot is newer than the latest header slot. + - The proposer index matches the round-robin selection. + - The parent root matches the hash of the latest block header. + + Updates: + - For the first post-genesis block, mark genesis as justified/finalized. + - Append the parent root to historical hashes. + - Append the justified bit for the parent (true only for genesis). + - Insert ZERO_HASH entries for any skipped empty slots. + - Set latest_block_header for the new block with an empty state_root. + + Raises: + AssertionError: If any header check fails. + """ + # Validation + # + # - Retrieve the header of the previous block (the parent). + # - Compute the parent root hash. + parent_header = state.latest_block_header + parent_root = hash_tree_root(parent_header) + + # Consensus checks + + # Verify the block corresponds to the current state slot. + # + # To move to this slot, we have processed any intermediate slots before. + assert block.slot == state.slot, "Block slot mismatch" + + # The block must be newer than the current latest header. + assert block.slot > parent_header.slot, "Block is older than latest header" + + # Verify the block proposer. + # + # Ensures the block was proposed by the assigned validator for this round. + assert block.proposer_index.is_proposer_for( + slot=state.slot, + num_validators=Uint64(len(state.validators)), + ), "Incorrect block proposer" + + # Verify the chain link. + # + # The block must cryptographically point to the known parent. + assert block.parent_root == parent_root, "Block parent root mismatch" + + # Checkpoint Updates + + # Detect if we are transitioning from the genesis block. + # + # This flag is True only when processing the very first block of the chain. + # This means the parent is the Genesis block (Slot 0). + is_genesis_parent = parent_header.slot == Slot(0) + + # Update the consensus checkpoints. + # + # This logic acts as the trust anchor for the chain: + # + # - If the parent is the Genesis block: It cannot receive votes as it + # precedes the start of the chain. Therefore, we explicitly force it + # to be Justified and Finalized immediately. + # + # - For all other blocks: We retain the existing checkpoints. Future + # updates rely entirely on validator attestations which are processed + # later in the block body. + if is_genesis_parent: + new_latest_justified = state.latest_justified.model_copy(update={"root": parent_root}) + new_latest_finalized = state.latest_finalized.model_copy(update={"root": parent_root}) + else: + new_latest_justified = state.latest_justified + new_latest_finalized = state.latest_finalized + + # Historical Data Management + + # Calculate the gap between the parent and the current block. + # + # If slots were skipped (missed proposals), we must record them. + # + # Formula: (Current - Parent - 1). Adjacent blocks have a gap of 0. + num_empty_slots = int(block.slot - parent_header.slot - Slot(1)) + + # Update the list of historical block roots. + # + # Structure: [Existing history] + [Parent root] + [Zero hash for gaps] + new_historical_hashes_data = ( + state.historical_block_hashes + [parent_root] + [ZERO_HASH] * num_empty_slots + ) + + # Update the list of justified slot flags. + # + # IMPORTANT: This list is stored relative to the finalized boundary. + # + # The first entry corresponds to the slot immediately following the + # latest finalized checkpoint. + # + # Here, we extend the storage capacity to ensure the range from the + # finalized boundary up to the last materialized slot is fully tracked + # and addressable. The current block's slot is not materialized until + # its header is fully processed, so we stop at slot (block.slot - 1). + last_materialized_slot = block.slot - Slot(1) + new_justified_slots_data = state.justified_slots.extend_to_slot( + state.latest_finalized.slot, + last_materialized_slot, + ) + + # Construct the new latest block header. + # + # The new header object represents the tip of the chain. + # + # Leave state root empty. + # It is not computed until the block body is fully processed or the next slot begins. + new_header = self.block_header_class( + slot=block.slot, + proposer_index=block.proposer_index, + parent_root=block.parent_root, + body_root=hash_tree_root(block.body), + state_root=Bytes32.zero(), + ) + + # Final Immutable Copy + # + # Return a new immutable state instance. + # All calculated updates are applied atomically here. + return state.model_copy( + update={ + "latest_justified": new_latest_justified, + "latest_finalized": new_latest_finalized, + "historical_block_hashes": new_historical_hashes_data, + "justified_slots": new_justified_slots_data, + "latest_block_header": new_header, + } + ) + + def process_block(self, state: State, block: Block) -> State: + """ + Apply full block processing including header and body. + + Raises: + AssertionError: If block contains duplicate aggregated attestations + with no unique participant. + """ + # First process the block header. + state = self.process_block_header(state, block) + + return self.process_attestations(state, block.body.attestations) def process_attestations( self, state: State, attestations: Iterable[AggregatedAttestation], ) -> State: - """Fold attestations into the state and update justification and finalization.""" - return state.process_attestations(attestations) + """ + Apply attestations and update justification/finalization + according to the Lean Consensus 3SF-mini rules. + + This simplified consensus mechanism: + 1. Processes each attestation + 2. Updates justified status for target checkpoints + 3. Applies finalization rules based on justified status + """ + # Reconstruct the vote-tracking structure + # + # The state stores justification data in a compact SSZ layout: + # + # - A list of block roots that are currently being tracked. + # - One long flat list containing validator vote flags. + # + # For each tracked block, there is a consecutive segment of vote flags. + # Every segment has the same length: the number of validators. + # + # Conceptually, we want to recover a more natural view: + # + # "For each block root, here is the list of votes from all validators." + # + # We rebuild this intuitive structure by slicing the flat vote list back + # into its individual segments. Each slice corresponds to one tracked block. + # + # This gives us a mapping: + # + # (block root) → [vote flags for validators 0..N-1] + # + # which makes the rest of the logic easier to express and understand. + assert not any(root == ZERO_HASH for root in state.justifications_roots), ( + "zero hash is not allowed in justifications roots" + ) + justifications = ( + { + root: state.justifications_validators[ + i * len(state.validators) : (i + 1) * len(state.validators) + ] + for i, root in enumerate(state.justifications_roots) + } + if state.justifications_roots + else {} + ) + + # Track state changes to be applied at the end + latest_justified = state.latest_justified + latest_finalized = state.latest_finalized + finalized_slot = latest_finalized.slot + justified_slots = state.justified_slots + + # Map roots to their latest slot for pruning. + # + # Votes for zero hash are ignored, so we only need the most recent slot + # where a root appears to decide whether it is still unfinalized. + start_slot = int(finalized_slot) + 1 + root_to_slot: dict[Bytes32, Slot] = {} + for i in range(start_slot, len(state.historical_block_hashes)): + root_to_slot[state.historical_block_hashes[i]] = Slot(i) + + # Process each attestation independently. + # + # Every attestation is a claim: + # "I vote to extend the chain from SOURCE to TARGET." + # + # The rules below filter out invalid or irrelevant votes. + for attestation in attestations: + source = attestation.data.source + target = attestation.data.target + + # Check that the source is already trusted. + # + # A vote may only originate from a point in history that is already justified. + # A source that lacks existing justification cannot be used to anchor a new vote. + if not justified_slots.is_slot_justified(finalized_slot, source.slot): + continue + + # Ignore votes for targets that have already reached consensus. + # + # If a block is already justified, additional votes do not change anything. + # We simply skip them. + if justified_slots.is_slot_justified(finalized_slot, target.slot): + continue + + # Ignore votes that reference zero-hash slots. + if source.root == ZERO_HASH or target.root == ZERO_HASH: + continue + + # Ensure the vote refers to blocks that actually exist on our chain. + # + # The attestation must match our canonical chain. + # Both the source root and target root must equal the recorded block roots + # stored for those slots in history. + # + # This prevents votes about unknown or conflicting forks. + source_slot_int = int(source.slot) + target_slot_int = int(target.slot) + source_matches = ( + source.root == state.historical_block_hashes[source_slot_int] + if source_slot_int < len(state.historical_block_hashes) + else False + ) + target_matches = ( + target.root == state.historical_block_hashes[target_slot_int] + if target_slot_int < len(state.historical_block_hashes) + else False + ) + + if not source_matches or not target_matches: + continue + + # Ensure time flows forward. + # + # A target must always lie strictly after its source slot. + # Otherwise the vote makes no chronological sense. + if target.slot <= source.slot: + continue + + # Ensure the target falls on a slot that can be justified after the finalized one. + # + # In 3SF-mini, justification does not advance freely through time. + # + # Only certain positions beyond the finalized slot are allowed to + # receive new votes. These positions form a small, structured set: + # + # - the immediate steps right after finalization, + # - the square-number distances, + # - and the pronic-number distances. + # + # Any target outside this pattern is not eligible for justification, + # so votes for it are simply ignored. + if not target.slot.is_justifiable_after(finalized_slot): + continue + + # Record the vote. + # + # If this is the first vote for the target block, create a fresh tally sheet: + # - one boolean per validator, all initially False. + if target.root not in justifications: + justifications[target.root] = [Boolean(False)] * len(state.validators) + + # Mark that each validator in this aggregation has voted for the target. + # + # A vote is represented as a boolean flag. + # If it was previously absent, flip it to True. + for validator_id in attestation.aggregation_bits.to_validator_indices(): + if not justifications[target.root][validator_id]: + justifications[target.root][validator_id] = Boolean(True) + + # Check whether the vote count crosses the supermajority threshold. + # + # A block becomes justified when at least two-thirds of validators + # have voted for it. + # + # We compare integers to avoid floating-point division: + # + # 3 * (number of votes) ≥ 2 * (total validators) + count = sum(bool(justified) for justified in justifications[target.root]) + + if 3 * count >= (2 * len(state.validators)): + # The block becomes justified + # + # The chain now considers this block part of its safe head. + latest_justified = target + justified_slots = justified_slots.with_justified( + finalized_slot, + target.slot, + Boolean(True), + ) + + # There is no longer any need to track individual votes for this block. + del justifications[target.root] + + # Consider whether finalization can advance + # + # Finalization requires a continuous chain of trust from the + # previously finalized checkpoint up to the new justified point. + # + # If every slot in between is justifiable relative to the old + # finalized point, then the earlier source checkpoint becomes finalized. + # + # In short: + # + # If there is no break in the chain, advance finalization. + if not any( + Slot(slot).is_justifiable_after(finalized_slot) + for slot in range(source.slot + Slot(1), target.slot) + ): + old_finalized_slot = finalized_slot + latest_finalized = source + finalized_slot = latest_finalized.slot + + # Rebase/prune justification tracking across the new finalized boundary. + # + # The state stores justified slot flags starting at (finalized_slot + 1), + # so when finalization advances by `delta`, we drop the first `delta` bits. + # + # We also prune any pending justifications whose latest slot + # is now finalized (latest <= finalized_slot). + delta = int(finalized_slot - old_finalized_slot) + if delta > 0: + justified_slots = justified_slots.shift_window(delta) + assert all(root in root_to_slot for root in justifications), ( + "Justification root missing from root_to_slot" + ) + justifications = { + root: votes + for root, votes in justifications.items() + if root_to_slot[root] > finalized_slot + } + + # Convert the vote structure back into SSZ format + # + # Internally, we used a mapping: + # + # block root → list of votes + # + # SSZ requires: + # + # - a sorted list of block roots + # - a single flat list of votes (all roots concatenated in sorted order) + # + # Sorting ensures that every node produces identical state representation. + sorted_roots = sorted(justifications.keys()) + + # Construct and return the updated state + return state.model_copy( + update={ + "justifications_roots": JustificationRoots(data=sorted_roots), + "justifications_validators": JustificationValidators( + data=[vote for root in sorted_roots for vote in justifications[root]] + ), + "justified_slots": justified_slots, + "latest_justified": latest_justified, + "latest_finalized": latest_finalized, + } + ) + + def state_transition( + self, + state: State, + block: Block, + valid_signatures: bool = True, + ) -> State: + """ + Apply the complete state transition function for a block. + + This method represents the full state transition function: + 1. Validate signatures if required + 2. Process slots up to the block's slot + 3. Process the block header and body + 4. Validate the computed state root + + Raises: + AssertionError: If signature validation fails or state root is invalid. + """ + # Validate signatures if required + if not valid_signatures: + raise AssertionError("Block signatures must be valid") + + with observe_state_transition(): + # First, process any intermediate slots. + advanced = self.process_slots(state, block.slot) + + # Process the block itself. + new_state = self.process_block(advanced, block) + + # Validate that the block's state root matches the computed state + computed_state_root = hash_tree_root(new_state) + if block.state_root != computed_state_root: + raise AssertionError("Invalid block state root") + + return new_state def build_block( self, @@ -113,15 +636,150 @@ def build_block( known_block_roots: AbstractSet[Bytes32], aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] | None = None, ) -> tuple[Block, State, list[AggregatedAttestation], list[AggregatedSignatureProof]]: - """Assemble a valid block on top of the given pre-state.""" - return state.build_block( + """ + Build a valid block on top of the given pre-state. + + Computes the post-state and creates a block with the correct state root. + + Uses a fixed-point algorithm: finds attestation_data entries whose source + matches the current justified checkpoint, greedily selects proofs maximizing + new validator coverage, then applies the STF. If justification advances, + repeats with the new checkpoint. + """ + aggregated_attestations: list[AggregatedAttestation] = [] + aggregated_signatures: list[AggregatedSignatureProof] = [] + + if aggregated_payloads: + # Fixed-point loop: find attestation_data entries matching the current + # justified checkpoint and greedily select proofs. Processing attestations + # may advance justification, unlocking more entries. + # When building on top of genesis (slot 0), process_block_header + # updates the justified root to parent_root. Apply the same + # derivation here so attestation sources match. + if state.latest_block_header.slot == Slot(0): + current_justified = state.latest_justified.model_copy(update={"root": parent_root}) + else: + current_justified = state.latest_justified + + processed_att_data: set[AttestationData] = set() + + while True: + found_entries = False + + for att_data, proofs in sorted( + aggregated_payloads.items(), key=lambda item: item[0].target.slot + ): + if ( + Uint8(len(processed_att_data)) >= MAX_ATTESTATIONS_DATA + and att_data not in processed_att_data + ): + break + + if att_data.head.root not in known_block_roots: + continue + + if att_data.source != current_justified: + continue + + if att_data in processed_att_data: + continue + processed_att_data.add(att_data) + + found_entries = True + + selected, _ = AggregatedSignatureProof.select_greedily(proofs) + aggregated_signatures.extend(selected) + for proof in selected: + aggregated_attestations.append( + self.aggregated_attestation_class( + aggregation_bits=proof.participants, + data=att_data, + ) + ) + + if not found_entries: + break + + # Build candidate block and check if justification changed. + candidate_block = self.block_class( + slot=slot, + proposer_index=proposer_index, + parent_root=parent_root, + state_root=Bytes32.zero(), + body=self.block_body_class( + attestations=self.aggregated_attestations_class( + data=list(aggregated_attestations) + ) + ), + ) + post_state = self.process_block(self.process_slots(state, slot), candidate_block) + + if post_state.latest_justified != current_justified: + current_justified = post_state.latest_justified + continue + + break + + # Compact: merge all proofs sharing the same AttestationData into one + # using recursive children aggregation. + # + # During the fixed-point loop above, multiple proofs may have been + # selected for the same AttestationData across iterations. Group them + # and merge each group into a single recursive proof. + proof_groups: dict[AttestationData, list[AggregatedSignatureProof]] = {} + for att, sig in zip(aggregated_attestations, aggregated_signatures, strict=True): + proof_groups.setdefault(att.data, []).append(sig) + + aggregated_attestations = [] + aggregated_signatures = [] + for att_data, proofs in proof_groups.items(): + if len(proofs) == 1: + sig = proofs[0] + else: + # Multiple proofs for the same data were aggregated separately. + # Merge them into one recursive proof using children-only + # aggregation (no new raw signatures). + children = [ + ( + proof, + [ + state.validators[vid].get_attestation_pubkey() + for vid in proof.participants.to_validator_indices() + ], + ) + for proof in proofs + ] + sig = AggregatedSignatureProof.aggregate( + xmss_participants=None, + children=children, + raw_xmss=[], + message=hash_tree_root(att_data), + slot=att_data.slot, + ) + aggregated_signatures.append(sig) + aggregated_attestations.append( + self.aggregated_attestation_class( + aggregation_bits=sig.participants, data=att_data + ) + ) + + # Create the final block with selected attestations. + final_block = self.block_class( slot=slot, proposer_index=proposer_index, parent_root=parent_root, - known_block_roots=known_block_roots, - aggregated_payloads=aggregated_payloads, + state_root=Bytes32.zero(), + body=self.block_body_class( + attestations=self.aggregated_attestations_class(data=aggregated_attestations), + ), ) + # Recompute state from the final block. + post_state = self.process_block(self.process_slots(state, slot), final_block) + final_block = final_block.model_copy(update={"state_root": hash_tree_root(post_state)}) + + return final_block, post_state, aggregated_attestations, aggregated_signatures + def verify_signatures( self, signed_block: SignedBlock, @@ -213,46 +871,929 @@ def verify_signatures( return True + def create_store( + self, + state: SpecStateType, + anchor_block: SpecBlockType, + validator_id: ValidatorIndex | None, + ) -> SpecStoreType: + """Initialize a forkchoice store from an anchor state and block. + + The anchor block and state form the starting point for fork choice. + Both are treated as justified and finalized. + + Raises: + AssertionError: + If the anchor block's state root does not match the hash + of the state. + """ + assert isinstance(state, State) + assert isinstance(anchor_block, Block) + + # Compute the SSZ root of this state. + # + # This is the canonical hash that should appear in the block's state root. + computed_state_root = hash_tree_root(state) + + # Check that the block actually points to this state. + # + # If this fails, the caller has supplied inconsistent inputs. + assert anchor_block.state_root == computed_state_root, ( + "Anchor block state root must match anchor state hash" + ) + + # Compute the SSZ root of the anchor block itself. + # + # This root will be used as: + # - the key in the blocks/states maps, + # - the initial head, + # - the root of the initial checkpoints. + anchor_root = hash_tree_root(anchor_block) + + # Read the slot at which the anchor block was proposed. + anchor_slot = anchor_block.slot + + # Seed both checkpoints from the anchor block itself. + # + # The store treats the anchor as the new genesis for fork choice: + # all history below it is pruned. The justified and finalized checkpoints + # therefore point at the anchor block with the anchor's own slot, + # regardless of what the anchor state's embedded checkpoints say. + anchor_checkpoint = Checkpoint(root=anchor_root, slot=anchor_slot) + + return cast( + SpecStoreType, + self.store_class( + time=Interval.from_slot(anchor_slot), + config=state.config, + head=anchor_root, + safe_target=anchor_root, + latest_justified=anchor_checkpoint, + latest_finalized=anchor_checkpoint, + blocks={anchor_root: anchor_block}, + states={anchor_root: state}, + validator_id=validator_id, + ), + ) + + def prune_stale_attestation_data(self, store: Store) -> Store: + """Remove attestation data that can no longer influence fork choice. + + An attestation becomes stale when its target checkpoint falls at or before + the finalized slot. Such attestations cannot affect chain selection since + the target is already finalized. + + Pruning removes all attestation-related data: + + - Attestation signatures + - Pending aggregated payloads + - Processed aggregated payloads + """ + # Filter out stale entries from all attestation-related mappings. + # + # Each mapping is keyed by attestation data, so we check membership by slot + # against the finalized slot. + return store.model_copy( + update={ + "attestation_signatures": { + attestation_data: sigs + for attestation_data, sigs in store.attestation_signatures.items() + if attestation_data.target.slot > store.latest_finalized.slot + }, + "latest_new_aggregated_payloads": { + attestation_data: proofs + for attestation_data, proofs in store.latest_new_aggregated_payloads.items() + if attestation_data.target.slot > store.latest_finalized.slot + }, + "latest_known_aggregated_payloads": { + attestation_data: proofs + for attestation_data, proofs in store.latest_known_aggregated_payloads.items() + if attestation_data.target.slot > store.latest_finalized.slot + }, + } + ) + + def validate_attestation(self, store: Store, attestation_data: AttestationData) -> None: + """Validate incoming attestation before processing. + + Ensures the vote respects the basic laws of time and topology: + 1. The blocks voted for must exist in our store. + 2. A vote cannot span backwards in time (source > target). + 3. The head must be at least as recent as source and target. + 4. Checkpoint slots must match the actual block slots. + 5. The vote's slot must have started locally (a small disparity margin is allowed). + + Raises: + AssertionError: If attestation fails validation. + """ + data = attestation_data + + # Availability Check + # + # We cannot count a vote if we haven't seen the blocks involved. + assert data.source.root in store.blocks, f"Unknown source block: {data.source.root.hex()}" + assert data.target.root in store.blocks, f"Unknown target block: {data.target.root.hex()}" + assert data.head.root in store.blocks, f"Unknown head block: {data.head.root.hex()}" + + # Topology Check + # + # History is linear and monotonic: source <= target <= head. + # The second check implies head >= source by transitivity. + assert data.source.slot <= data.target.slot, "Source checkpoint slot must not exceed target" + assert data.head.slot >= data.target.slot, "Head checkpoint must not be older than target" + + # Consistency Check + # + # Validate checkpoint slots match block slots. + source_block = store.blocks[data.source.root] + target_block = store.blocks[data.target.root] + head_block = store.blocks[data.head.root] + assert source_block.slot == data.source.slot, "Source checkpoint slot mismatch" + assert target_block.slot == data.target.slot, "Target checkpoint slot mismatch" + assert head_block.slot == data.head.slot, "Head checkpoint slot mismatch" + + # Time Check + # + # Honest validators emit votes only after their slot has begun. + # Allow a small disparity margin for clock skew between peers. + # + # The bound is in intervals, not slots: a whole-slot margin would + # let an adversary pre-publish next-slot aggregates ahead of any + # honest validator. + attestation_start_interval = Interval.from_slot(data.slot) + assert attestation_start_interval <= store.time + GOSSIP_DISPARITY_INTERVALS, ( + "Attestation too far in future" + ) + + def on_gossip_attestation( + self, + store: Store, + signed_attestation: SignedAttestation, + scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, + is_aggregator: bool = False, + ) -> Store: + """Process a signed attestation received via gossip network. + + This method: + 1. Verifies the XMSS signature + 2. Stores the signature when the node is in aggregator mode + + Subnet filtering happens at the p2p subscription layer — only + attestations from subscribed subnets reach this method. No + additional subnet check is needed here. + + Raises: + ValueError: If validator not found in state. + AssertionError: If signature verification fails. + """ + with observe_on_attestation(): + validator_id = signed_attestation.validator_id + attestation_data = signed_attestation.data + signature = signed_attestation.signature + + # Validate the attestation first so unknown blocks are rejected cleanly + # (instead of raising a raw KeyError when state is missing). + self.validate_attestation(store, attestation_data) + + key_state = store.states.get(attestation_data.target.root) + assert key_state is not None, ( + f"No state available to verify attestation signature for target block " + f"{attestation_data.target.root.hex()}" + ) + assert validator_id.is_valid(Uint64(len(key_state.validators))), ( + f"Validator {validator_id} not found in state {attestation_data.target.root.hex()}" + ) + public_key = key_state.validators[validator_id].get_attestation_pubkey() + + assert scheme.verify( + public_key, attestation_data.slot, hash_tree_root(attestation_data), signature + ), "Signature verification failed" + + # Store signature and attestation data for later aggregation. + # Copy the inner sets so we can add to them without mutating the previous store. + new_committee_sigs = {k: set(v) for k, v in store.attestation_signatures.items()} + + # Aggregators store all received gossip signatures. + # The p2p layer only delivers attestations from subscribed subnets, + # so subnet filtering happens at subscription time, not here. + # Non-aggregator nodes validate and drop — they never store gossip signatures. + if is_aggregator: + new_committee_sigs.setdefault(attestation_data, set()).add( + AttestationSignatureEntry(validator_id, signature) + ) + + # Return store with updated signature map and attestation data + return store.model_copy( + update={ + "attestation_signatures": new_committee_sigs, + } + ) + + def on_gossip_aggregated_attestation( + self, + store: Store, + signed_attestation: SignedAggregatedAttestation, + ) -> Store: + """Process a signed aggregated attestation received via aggregation topic. + + This method: + 1. Verifies the aggregated attestation + 2. Stores the aggregation in aggregation_payloads map + + Raises: + ValueError: If validator not found in state. + AssertionError: If signature verification fails. + """ + data = signed_attestation.data + proof = signed_attestation.proof + + self.validate_attestation(store, data) + + # Get validator IDs who participated in this aggregation + validator_ids = proof.participants.to_validator_indices() + + # Retrieve the relevant state to look up public keys for verification. + key_state = store.states.get(data.target.root) + assert key_state is not None, ( + f"No state available to verify committee aggregation for target " + f"{data.target.root.hex()}" + ) + + # Ensure all participants exist in the active set + validators = key_state.validators + for validator_id in validator_ids: + assert validator_id.is_valid(Uint64(len(validators))), ( + f"Validator {validator_id} not found in state {data.target.root.hex()}" + ) + + # Prepare public keys for verification + public_keys = [validators[vid].get_attestation_pubkey() for vid in validator_ids] + + # Verify the leanVM aggregated proof + try: + proof.verify( + public_keys=public_keys, + message=hash_tree_root(data), + slot=data.slot, + ) + except AggregationError as exc: + raise AssertionError( + f"Committee aggregation signature verification failed: {exc}" + ) from exc + + # Shallow-copy the dict and its inner sets to preserve immutability. + new_aggregated_payloads = { + k: set(v) for k, v in store.latest_new_aggregated_payloads.items() + } + new_aggregated_payloads.setdefault(data, set()).add(proof) + + # Return store with updated aggregated payloads and attestation data + return store.model_copy( + update={ + "latest_new_aggregated_payloads": new_aggregated_payloads, + } + ) + def on_block( self, store: Store, signed_block: SignedBlock, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> Store: - """Incorporate a newly received block into the forkchoice view.""" - return store.on_block(signed_block, scheme) + """Process a new block and update the forkchoice state. - def on_tick( + This method integrates a block into the forkchoice store by: + 1. Validating the block's parent exists + 2. Computing the post-state via the state transition function + 3. Processing attestations included in the block body (on-chain) + 4. Updating the forkchoice head + + Raises: + AssertionError: If parent block/state not found in store. + """ + with observe_on_block(): + block = signed_block.block + block_root = hash_tree_root(block) + + # Skip duplicate blocks (idempotent operation) + if block_root in store.blocks: + return store + + # Capture the finalized slot before any updates so we can decide + # at the end whether finalization advanced and pruning is needed. + previous_finalized_slot = store.latest_finalized.slot + + # Verify parent chain is available + # + # The parent state must exist before processing this block. + # If missing, the node must sync the parent chain first. + parent_state = store.states.get(block.parent_root) + assert parent_state is not None, ( + f"Parent state not found (root={block.parent_root.hex()}). " + f"Sync parent chain before processing block at slot {block.slot}." + ) + + # Validate cryptographic signatures + valid_signatures = self.verify_signatures(signed_block, parent_state.validators, scheme) + + # Execute state transition function to compute post-block state + post_state = self.state_transition(parent_state, block, valid_signatures) + + # Propagate checkpoint advances from the post-state. + # + # Keep the checkpoint with the higher slot. + # On slot ties, prefer the store's own checkpoint. + # + # The store's checkpoint is pinned to the anchor at init and only + # moves forward via real justification/finalization events. + # On ties the store's view is authoritative. + latest_justified = max(store.latest_justified, post_state.latest_justified) + latest_finalized = max(store.latest_finalized, post_state.latest_finalized) + + store = store.model_copy( + update={ + "blocks": store.blocks | {block_root: block}, + "states": store.states | {block_root: post_state}, + "latest_justified": latest_justified, + "latest_finalized": latest_finalized, + } + ) + + # Process block body attestations and their signatures + # Block attestations go directly to "known" payloads + aggregated_attestations = block.body.attestations + attestation_signatures = signed_block.signature.attestation_signatures + + assert len(aggregated_attestations) == len(attestation_signatures), ( + "Attestation signature groups must match aggregated attestations" + ) + + # Each unique AttestationData must appear at most once per block. + att_data_set = {att.data for att in aggregated_attestations} + assert len(att_data_set) == len(aggregated_attestations), ( + "Block contains duplicate AttestationData entries; " + "each AttestationData must appear at most once" + ) + assert Uint8(len(att_data_set)) <= MAX_ATTESTATIONS_DATA, ( + f"Block contains {len(att_data_set)} distinct AttestationData entries; " + f"maximum is {MAX_ATTESTATIONS_DATA}" + ) + + # Copy the aggregated proof map for updates + # Shallow-copy the dict and its inner sets to preserve immutability + # Block attestations go directly to "known" payloads + # (like is_from_block=True in the spec) + block_proofs: dict[AttestationData, set[AggregatedSignatureProof]] = { + k: set(v) for k, v in store.latest_known_aggregated_payloads.items() + } + + for att, proof in zip(aggregated_attestations, attestation_signatures, strict=True): + block_proofs.setdefault(att.data, set()).add(proof) + + # Update store with new aggregated proofs and attestation data + store = store.model_copy(update={"latest_known_aggregated_payloads": block_proofs}) + + # Update forkchoice head based on new block and attestations + store = self.update_head(store) + + # Prune stale attestation data when finalization advances + if store.latest_finalized.slot > previous_finalized_slot: + store = self.prune_stale_attestation_data(store) + + return store + + def extract_attestations_from_aggregated_payloads( + self, + store: Store, + aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]], + ) -> dict[ValidatorIndex, AttestationData]: + """Extract attestations from aggregated payloads. + + Given a mapping of aggregated signature proofs, extract the attestation data + for each validator that participated in the aggregation. + """ + attestations: dict[ValidatorIndex, AttestationData] = {} + + for attestation_data, proofs in aggregated_payloads.items(): + for proof in proofs: + for validator_id in proof.participants.to_validator_indices(): + existing = attestations.get(validator_id) + if existing is None or existing.slot < attestation_data.slot: + attestations[validator_id] = attestation_data + return attestations + + def compute_block_weights(self, store: Store) -> dict[Bytes32, int]: + """Compute attestation-based weight for each block above the finalized slot. + + Walks backward from each validator's latest head vote, incrementing weight + for every ancestor above the finalized slot. + """ + attestations = self.extract_attestations_from_aggregated_payloads( + store, store.latest_known_aggregated_payloads + ) + + start_slot = store.latest_finalized.slot + + weights: dict[Bytes32, int] = defaultdict(int) + + for attestation_data in attestations.values(): + current_root = attestation_data.head.root + + while current_root in store.blocks and store.blocks[current_root].slot > start_slot: + weights[current_root] += 1 + current_root = store.blocks[current_root].parent_root + + return dict(weights) + + def _compute_lmd_ghost_head( + self, + store: Store, + start_root: Bytes32, + attestations: dict[ValidatorIndex, AttestationData], + min_score: int = 0, + ) -> Bytes32: + """Walk the block tree according to the LMD GHOST rule. + + The walk starts from a chosen root. + At each fork, the child subtree with the highest weight is taken. + The process stops when a leaf is reached. + That leaf is the chosen head. + + Weights are derived from votes as follows: + - Each validator contributes its full weight to its most recent head vote. + - The weight of that vote also flows to every ancestor of the voted block. + - The weight of a subtree is the sum of all such contributions inside it. + + An optional threshold can be applied: + - If a threshold is set, children below this threshold are ignored. + + When two branches have equal weight, the one with the lexicographically + larger hash is chosen to break ties. + """ + # If the starting point is not defined, choose the earliest known block. + # + # This ensures that the walk always has an anchor. + if start_root == ZERO_HASH: + start_root = min( + store.blocks.keys(), key=lambda block_hash: store.blocks[block_hash].slot + ) + + # Remember the slot of the anchor once and reuse it during the walk. + # + # This avoids repeated lookups inside the inner loop. + start_slot = store.blocks[start_root].slot + + # Prepare a table that will collect voting weight for each block. + # + # Each entry starts conceptually at zero and then accumulates contributions. + weights: dict[Bytes32, int] = defaultdict(int) + + # For every vote, follow the chosen head upward through its ancestors. + # + # Each visited block accumulates one unit of weight from that validator. + for attestation_data in attestations.values(): + current_root = attestation_data.head.root + + # Climb towards the anchor while staying inside the known tree. + # + # This naturally handles partial views and ongoing sync. + while current_root in store.blocks and store.blocks[current_root].slot > start_slot: + weights[current_root] += 1 + current_root = store.blocks[current_root].parent_root + + # Build the adjacency tree (parent -> children). + # + # We use a defaultdict to avoid checking if keys exist. + children_map: dict[Bytes32, list[Bytes32]] = defaultdict(list) + + for root, block in store.blocks.items(): + # 1. Structural check: skip blocks without parents (e.g., purely genesis/orphans) + if not block.parent_root: + continue + + # 2. Heuristic check: prune branches early if they lack sufficient weight + if min_score > 0 and weights[root] < min_score: + continue + + children_map[block.parent_root].append(root) + + # Now perform the greedy walk. + # + # At each step, pick the child with the highest weight among the candidates. + head = start_root + + # Descend the tree, choosing the heaviest branch at every fork. + while children := children_map.get(head): + # Choose best child: most attestations, then lexicographically highest hash + head = max(children, key=lambda x: (weights[x], x)) + + return head + + def update_head(self, store: Store) -> Store: + """Compute updated store with new canonical head. + + Selects the canonical chain head using: + + 1. Latest justified checkpoint as the starting root + 2. LMD-GHOST fork choice rule (heaviest subtree by attestation weight) + """ + # Extract attestations from known aggregated payloads + attestations = self.extract_attestations_from_aggregated_payloads( + store, store.latest_known_aggregated_payloads + ) + + # Run LMD-GHOST fork choice algorithm. + # + # Starts from the justified root and greedily descends to the heaviest + # leaf. The result is always a descendant of the justified root by + # construction: the walk only follows child edges within the subtree. + new_head = self._compute_lmd_ghost_head( + store, + start_root=store.latest_justified.root, + attestations=attestations, + ) + + return store.model_copy( + update={ + "head": new_head, + } + ) + + def accept_new_attestations(self, store: Store) -> Store: + """Process pending aggregated payloads and update forkchoice head. + + Moves aggregated payloads from latest_new_aggregated_payloads to + latest_known_aggregated_payloads, making them eligible to contribute to + fork choice weights. This migration happens at specific interval ticks. + + The Interval Tick System + ------------------------- + Aggregated payloads progress through intervals: + - Interval 0: Block proposal + - Interval 1: Validators cast attestations (enter "new") + - Interval 2: Aggregators create proofs & broadcast + - Interval 3: Safe target update + - Interval 4: Process accumulated attestations + + This staged progression ensures proper timing and prevents premature + influence on fork choice decisions. + """ + # Merge new aggregated payloads into known aggregated payloads + merged_aggregated_payloads = { + attestation_data: set(proofs) + for attestation_data, proofs in store.latest_known_aggregated_payloads.items() + } + for attestation_data, proofs in store.latest_new_aggregated_payloads.items(): + merged_aggregated_payloads.setdefault(attestation_data, set()).update(proofs) + + # Create store with migrated aggregated payloads + store = store.model_copy( + update={ + "latest_known_aggregated_payloads": merged_aggregated_payloads, + "latest_new_aggregated_payloads": {}, + } + ) + + # Update head with newly accepted aggregated payloads + return self.update_head(store) + + def update_safe_target(self, store: Store) -> Store: + """Compute the deepest block that has 2/3+ supermajority attestation weight. + + The safe target is the furthest-from-genesis block where enough validators + agree. Validators use it to decide which block is safe to attest to. + Only blocks meeting the supermajority threshold qualify. + + This runs at interval 3 of the slot cycle: + + - Interval 0: Block proposal + - Interval 1: Validators cast attestation votes + - Interval 2: Aggregators create proofs, broadcast via gossip + - Interval 3: Safe target update (HERE) + - Interval 4: New attestations migrate to "known" pool + + Only the "new" pool counts. Migration into "known" runs at interval 4, + after this step, so safe target sees only votes received this slot. + + Safe target is an *availability* signal, not durable knowledge: + + - A block is safe when 2/3 of currently online validators vote for a descendant. + - "Known" carries block-included, previously migrated, and self-attestations. + - Those reflect historical knowledge, not current liveness. + - Counting them would advance safe target on stale evidence after a participation collapse. + """ + # Look up the post-state of the current head block. + # + # The validator registry in this state tells us how many active + # validators exist. We need that count to compute the threshold. + head_state = store.states[store.head] + num_validators = Uint64(len(head_state.validators)) + + # Compute the 2/3 supermajority threshold. + # + # A block needs at least this many attestation votes to be "safe". + # The ceiling division (negation trick) ensures we round UP. + # For example, 100 validators => threshold is 67, not 66. + min_target_score = -(-num_validators * 2 // 3) + + # Unpack "new" payloads into a flat validator -> vote mapping. + # "Known" is excluded by design. + attestations = self.extract_attestations_from_aggregated_payloads( + store, + store.latest_new_aggregated_payloads, + ) + + # Run LMD GHOST with the supermajority threshold. + # + # The walk starts from the latest justified checkpoint and descends + # through the block tree. At each fork, only children with at least + # `min_target_score` attestation weight are considered. The result + # is the deepest block that clears the 2/3 bar. + # + # If no child meets the threshold at some fork, the walk stops + # early. The safe target is then shallower than the actual head. + safe_target = self._compute_lmd_ghost_head( + store, + start_root=store.latest_justified.root, + attestations=attestations, + min_score=min_target_score, + ) + + # Return a new Store with only the safe target updated. + # + # The head and attestation pools remain unchanged. + return store.model_copy(update={"safe_target": safe_target}) + + def aggregate(self, store: Store) -> tuple[Store, list[SignedAggregatedAttestation]]: + """Turn raw validator votes into compact aggregated attestations. + + Validators cast individual signatures over gossip. Before those + votes can influence fork choice or be included in a block, they + must be combined into compact cryptographic proofs. + + The store holds three pools of attestation evidence: + + - **Gossip signatures**: individual validator votes arriving in real-time. + - **New payloads**: aggregated proofs from the current round, not yet + committed to the chain. + - **Known payloads**: previously accepted proofs, reusable as building + blocks for deeper aggregation. + + For each unique piece of attestation data the algorithm proceeds in three phases: + + 1. **Select** — greedily pick existing proofs that maximize + validator coverage (new before known). + 2. **Fill** — collect raw gossip signatures for any validators + not yet covered. + 3. **Aggregate** — delegate to the XMSS subspec to produce a + single cryptographic proof. + + After aggregation the store is updated: + + - Consumed gossip signatures are removed. + - Newly produced proofs are recorded for future reuse. + """ + validators = store.states[store.head].validators + gossip_sigs = store.attestation_signatures + new = store.latest_new_aggregated_payloads + known = store.latest_known_aggregated_payloads + + new_aggregates: list[SignedAggregatedAttestation] = [] + + # Only attestation data with a new payload or a raw gossip signature + # can trigger aggregation. Known payloads alone cannot — they exist + # only to help extend coverage when combined with fresh evidence. + for data in new.keys() | gossip_sigs.keys(): + # Phase 1: Select + # + # Start with the cheapest option: reuse proofs that already + # cover many validators. + # + # Child proofs are aggregated signatures from prior rounds. + # Selecting them first keeps the final proof tree shallow + # and avoids redundant cryptographic work. + # + # New payloads go first because they represent uncommitted + # work — known payloads fill remaining gaps. + child_proofs, covered = AggregatedSignatureProof.select_greedily( + new.get(data), known.get(data) + ) + + # Phase 2: Fill + # + # For every validator not yet covered by a child proof, + # include its individual gossip signature. + # + # Sorting by validator index guarantees deterministic proof + # construction regardless of network arrival order. + raw_entries = [ + ( + e.validator_id, + validators[e.validator_id].get_attestation_pubkey(), + e.signature, + ) + for e in sorted(gossip_sigs.get(data, set()), key=lambda e: e.validator_id) + if e.validator_id not in covered + ] + + # The XMSS layer enforces a minimum: either at least one raw + # signature, or at least two child proofs to merge. + # + # A lone child proof is already a valid proof — nothing to do. + if not raw_entries and len(child_proofs) < 2: + continue + + # Encode the set of raw signers as a compact bitfield. + xmss_participants = ValidatorIndices( + data=[vid for vid, _, _ in raw_entries] + ).to_aggregation_bits() + raw_xmss = [(pk, sig) for _, pk, sig in raw_entries] + + # Phase 3: Aggregate + # + # Build the recursive proof tree. + # + # Each child proof needs its participants' public keys so + # the XMSS prover can verify inner proofs while constructing + # the outer one. + children = [ + ( + child, + [ + validators[vid].get_attestation_pubkey() + for vid in child.participants.to_validator_indices() + ], + ) + for child in child_proofs + ] + + # Hand everything to the XMSS subspec. + # Out comes a single proof covering all selected validators. + proof = AggregatedSignatureProof.aggregate( + xmss_participants=xmss_participants, + children=children, + raw_xmss=raw_xmss, + message=hash_tree_root(data), + slot=data.slot, + ) + new_aggregates.append(SignedAggregatedAttestation(data=data, proof=proof)) + + # ── Store bookkeeping ──────────────────────────────────────── + # + # Record freshly produced proofs so future rounds can reuse them. + # Remove gossip signatures that were consumed by this aggregation. + new_aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] = {} + for signed_att in new_aggregates: + new_aggregated_payloads.setdefault(signed_att.data, set()).add(signed_att.proof) + + remaining_attestation_signatures = { + data: sigs + for data, sigs in store.attestation_signatures.items() + if data not in new_aggregated_payloads + } + + return store.model_copy( + update={ + "latest_new_aggregated_payloads": new_aggregated_payloads, + "attestation_signatures": remaining_attestation_signatures, + } + ), new_aggregates + + def tick_interval( self, store: Store, - target_interval: Interval, has_proposal: bool, is_aggregator: bool = False, ) -> tuple[Store, list[SignedAggregatedAttestation]]: - """Advance forkchoice time to a target interval and emit any due aggregates.""" - return store.on_tick(target_interval, has_proposal, is_aggregator) + """Advance store time by one interval and perform interval-specific actions. - def on_gossip_attestation( + Different actions are performed based on interval within slot: + - Interval 0: Process attestations if proposal exists + - Interval 1: Validator attesting period (no action) + - Interval 2: Aggregators create proofs & broadcast + - Interval 3: Update safe target (fast confirm) + - Interval 4: Process accumulated attestations + """ + # Advance time by one interval + store = store.model_copy(update={"time": store.time + Interval(1)}) + current_interval = store.time % INTERVALS_PER_SLOT + new_aggregates: list[SignedAggregatedAttestation] = [] + + if current_interval == Interval(0) and has_proposal: + store = self.accept_new_attestations(store) + elif current_interval == Interval(2) and is_aggregator: + store, new_aggregates = self.aggregate(store) + elif current_interval == Interval(3): + store = self.update_safe_target(store) + elif current_interval == Interval(4): + store = self.accept_new_attestations(store) + + return store, new_aggregates + + def on_tick( self, store: Store, - signed_attestation: SignedAttestation, - scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, + target_interval: Interval, + has_proposal: bool, is_aggregator: bool = False, - ) -> Store: - """Incorporate a single-validator attestation received from the network.""" - return store.on_gossip_attestation(signed_attestation, scheme, is_aggregator) + ) -> tuple[Store, list[SignedAggregatedAttestation]]: + """Advance forkchoice store time to given interval count. - def on_gossip_aggregated_attestation( - self, - store: Store, - signed_attestation: SignedAggregatedAttestation, - ) -> Store: - """Incorporate an aggregated attestation received from the network.""" - return store.on_gossip_aggregated_attestation(signed_attestation) + Ticks store forward interval by interval, performing appropriate + actions for each interval type. This method handles time progression + incrementally to ensure all interval-specific actions are performed. + """ + all_new_aggregates: list[SignedAggregatedAttestation] = [] + + # Tick forward one interval at a time + while store.time < target_interval: + # Check if proposal should be signaled for next interval + next_interval = Interval(int(store.time) + 1) + should_signal_proposal = has_proposal and next_interval == target_interval + + # Advance by one interval with appropriate signaling + store, new_aggregates = self.tick_interval(store, should_signal_proposal, is_aggregator) + all_new_aggregates.extend(new_aggregates) + + return store, all_new_aggregates + + def get_proposal_head(self, store: Store, slot: Slot) -> tuple[Store, Bytes32]: + """Get the head for block proposal at given slot. + + Ensures store is up-to-date and processes any pending attestations + before returning the canonical head. This guarantees the proposer + builds on the most recent view of the chain. + """ + # Advance time to this slot's first interval + target_interval = Interval.from_slot(slot) + store, _ = self.on_tick(store, target_interval, True) + + # Process any pending attestations before proposal + store = self.accept_new_attestations(store) + + return store, store.head + + def get_attestation_target(self, store: Store) -> Checkpoint: + """Calculate target checkpoint for validator attestations. + + Determines appropriate attestation target based on head, safe target, + and finalization constraints. The algorithm balances between advancing + the chain head and maintaining safety guarantees. + + The walk starts at the head and goes backward (up to + ``JUSTIFICATION_LOOKBACK_SLOTS`` steps) until both the safe-target + bound and the justifiability rules of the slot are satisfied. + """ + # Start from current head + target_block_root = store.head + + # Walk back toward safe target (up to `JUSTIFICATION_LOOKBACK_SLOTS` steps) + # + # This ensures the target doesn't advance too far ahead of safe target, + # providing a balance between liveness and safety. + for _ in range(JUSTIFICATION_LOOKBACK_SLOTS): + if store.blocks[target_block_root].slot > store.blocks[store.safe_target].slot: + target_block_root = store.blocks[target_block_root].parent_root + else: + break + + # Ensure target is in justifiable slot range + # + # Walk back until we find a slot that satisfies justifiability rules + # relative to the latest finalized checkpoint. + while not store.blocks[target_block_root].slot.is_justifiable_after( + store.latest_finalized.slot + ): + target_block_root = store.blocks[target_block_root].parent_root + + # Create checkpoint from selected target block + target_block = store.blocks[target_block_root] + + return Checkpoint(root=target_block_root, slot=target_block.slot) def produce_attestation_data(self, store: Store, slot: Slot) -> AttestationData: - """Build the attestation payload that a validator should sign at this slot.""" - return store.produce_attestation_data(slot) + """Produce attestation data for the given slot. + + This method constructs an AttestationData object according to the lean protocol + specification. The attestation data represents the chain state view including + head, target, and source checkpoints. + """ + # Get the head block the validator sees for this slot + head_checkpoint = Checkpoint( + root=store.head, + slot=store.blocks[store.head].slot, + ) + + # Calculate the target checkpoint for this attestation + target_checkpoint = self.get_attestation_target(store) + + # Construct attestation data + return self.attestation_data_class( + slot=slot, + head=head_checkpoint, + target=target_checkpoint, + source=store.latest_justified, + ) def produce_block_with_signatures( self, @@ -260,9 +1801,92 @@ def produce_block_with_signatures( slot: Slot, validator_index: ValidatorIndex, ) -> tuple[Store, Block, list[AggregatedSignatureProof]]: - """Produce a proposal block together with the aggregated signature proofs it needs.""" - return store.produce_block_with_signatures(slot, validator_index) + """Produce a block and its aggregated signature proofs for the target slot. - def get_proposal_head(self, store: Store, slot: Slot) -> tuple[Store, Bytes32]: - """Resolve the head root that a proposal at this slot should extend.""" - return store.get_proposal_head(slot) + Block production proceeds in four stages: + 1. Retrieve the current chain head as the parent block + 2. Verify proposer authorization for the target slot + 3. Build the block with maximal valid attestations + 4. Store the block and update checkpoints + + The block builder uses a fixed-point algorithm to collect attestations. + Each iteration may update the justified checkpoint. + + Raises: + AssertionError: If validator is not the proposer for this slot, + or if the produced block fails to close a justified divergence + between the store and the head chain. + """ + # Retrieve parent block. + # + # The proposal head reflects the latest chain view after processing + # all pending attestations. Building on stale state would orphan the block. + store, head_root = self.get_proposal_head(store, slot) + head_state = store.states[head_root] + + # Verify proposer authorization. + # + # Only one validator may propose per slot. + # Unauthorized proposals would be rejected by other nodes. + num_validators = Uint64(len(head_state.validators)) + assert validator_index.is_proposer_for(slot, num_validators), ( + f"Validator {validator_index} is not the proposer for slot {slot}" + ) + + # Build the block. + # + # The builder iteratively collects valid attestations from aggregated + # payloads matching the justified checkpoint. Each iteration may advance + # justification, unlocking more attestation data entries. + final_block, final_post_state, _, signatures = self.build_block( + head_state, + slot=slot, + proposer_index=validator_index, + parent_root=head_root, + known_block_roots=set(store.blocks.keys()), + aggregated_payloads=store.latest_known_aggregated_payloads, + ) + + # Invariant: the produced block must close any justified divergence. + # + # The store may have advanced its justified checkpoint from attestations + # on a minority fork that the head state never processed. The fixed-point + # loop above must incorporate those attestations from the pool, advancing + # the block's justified checkpoint to at least match the store. + # + # Without this, other nodes processing the block would never see the + # justification advance, degrading consensus liveness: only nodes that + # happened to receive the minority fork would know justification moved. + block_justified = final_post_state.latest_justified.slot + store_justified = store.latest_justified.slot + assert block_justified >= store_justified, ( + f"Produced block justified={block_justified} < store justified=" + f"{store_justified}. Fixed-point attestation loop did not converge." + ) + + # Compute block hash for storage. + block_hash = hash_tree_root(final_block) + + # Update checkpoints from post-state. + # + # Locally produced blocks bypass normal block processing. + # We must manually propagate any checkpoint advances. + # Higher slots indicate more recent justified/finalized states. + latest_justified = max(final_post_state.latest_justified, store.latest_justified) + latest_finalized = max(final_post_state.latest_finalized, store.latest_finalized) + + # Persist block and state immutably. + new_store = store.model_copy( + update={ + "blocks": store.blocks | {block_hash: final_block}, + "states": store.states | {block_hash: final_post_state}, + "latest_justified": latest_justified, + "latest_finalized": latest_finalized, + } + ) + + # Prune stale attestation data when finalization advances + if new_store.latest_finalized.slot > store.latest_finalized.slot: + new_store = self.prune_stale_attestation_data(new_store) + + return new_store, final_block, signatures diff --git a/src/lean_spec/forks/lstar/store.py b/src/lean_spec/forks/lstar/store.py index 36fe53bf..3b397694 100644 --- a/src/lean_spec/forks/lstar/store.py +++ b/src/lean_spec/forks/lstar/store.py @@ -6,7 +6,6 @@ __all__ = ["AttestationSignatureEntry", "Store"] -from collections import defaultdict from typing import NamedTuple from lean_spec.forks.lstar.containers import ( @@ -19,34 +18,31 @@ from lean_spec.forks.lstar.containers.attestation.attestation import SignedAggregatedAttestation from lean_spec.forks.lstar.containers.block import BlockLookup from lean_spec.subspecs.chain.clock import Interval -from lean_spec.subspecs.chain.config import ( - GOSSIP_DISPARITY_INTERVALS, - INTERVALS_PER_SLOT, - JUSTIFICATION_LOOKBACK_SLOTS, - MAX_ATTESTATIONS_DATA, -) -from lean_spec.subspecs.observability import observe_on_attestation, observe_on_block -from lean_spec.subspecs.ssz.hash import hash_tree_root -from lean_spec.subspecs.xmss.aggregation import ( - AggregatedSignatureProof, - AggregationError, -) +from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof from lean_spec.subspecs.xmss.containers import Signature from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME, GeneralizedXmssScheme from lean_spec.types import ( - ZERO_HASH, Bytes32, Checkpoint, Slot, - Uint8, - Uint64, ValidatorIndex, - ValidatorIndices, ) from lean_spec.types.base import StrictBaseModel from .containers.state import State +_LAZY_SPEC: object = None + + +def _spec() -> object: + """Return the lstar fork spec; deferred import breaks the spec ↔ store cycle.""" + global _LAZY_SPEC + if _LAZY_SPEC is None: + from lean_spec.forks.lstar.spec import LstarSpec + + _LAZY_SPEC = LstarSpec() + return _LAZY_SPEC + class AttestationSignatureEntry(NamedTuple): """ @@ -170,165 +166,16 @@ def from_anchor( anchor_block: Block, validator_id: ValidatorIndex | None, ) -> "Store": - """ - Initialize a forkchoice store from an anchor state and block. - - The anchor block and state form the starting point for fork choice. - Both are treated as justified and finalized. - - Args: - state: The post-state of the anchor block. - anchor_block: A trusted block (e.g. genesis or checkpoint). - validator_id: Index of the validator running this store. - - Returns: - A new Store instance, ready to accept blocks and attestations. - - Raises: - AssertionError: - If the anchor block's state root does not match the hash - of the state. - """ - # Compute the SSZ root of this state. - # - # This is the canonical hash that should appear in the block's state root. - computed_state_root = hash_tree_root(state) - - # Check that the block actually points to this state. - # - # If this fails, the caller has supplied inconsistent inputs. - assert anchor_block.state_root == computed_state_root, ( - "Anchor block state root must match anchor state hash" - ) - - # Compute the SSZ root of the anchor block itself. - # - # This root will be used as: - # - the key in the blocks/states maps, - # - the initial head, - # - the root of the initial checkpoints. - anchor_root = hash_tree_root(anchor_block) - - # Read the slot at which the anchor block was proposed. - anchor_slot = anchor_block.slot - - # Seed both checkpoints from the anchor block itself. - # - # The store treats the anchor as the new genesis for fork choice: - # all history below it is pruned. The justified and finalized checkpoints - # therefore point at the anchor block with the anchor's own slot, - # regardless of what the anchor state's embedded checkpoints say. - anchor_checkpoint = Checkpoint(root=anchor_root, slot=anchor_slot) - - return cls( - time=Interval.from_slot(anchor_slot), - config=state.config, - head=anchor_root, - safe_target=anchor_root, - latest_justified=anchor_checkpoint, - latest_finalized=anchor_checkpoint, - blocks={anchor_root: anchor_block}, - states={anchor_root: state}, - validator_id=validator_id, - ) + """Initialize a forkchoice store from an anchor state and block.""" + return _spec().create_store(state, anchor_block, validator_id) # type: ignore[attr-defined] def prune_stale_attestation_data(self) -> "Store": - """ - Remove attestation data that can no longer influence fork choice. - - An attestation becomes stale when its target checkpoint falls at or before - the finalized slot. Such attestations cannot affect chain selection since - the target is already finalized. - - Pruning removes all attestation-related data: - - - Attestation signatures - - Pending aggregated payloads - - Processed aggregated payloads - - Returns: - New Store with stale attestation data removed. - """ - # Filter out stale entries from all attestation-related mappings. - # - # Each mapping is keyed by attestation data, so we check membership by slot - # against the finalized slot. - - return self.model_copy( - update={ - "attestation_signatures": { - attestation_data: sigs - for attestation_data, sigs in self.attestation_signatures.items() - if attestation_data.target.slot > self.latest_finalized.slot - }, - "latest_new_aggregated_payloads": { - attestation_data: proofs - for attestation_data, proofs in self.latest_new_aggregated_payloads.items() - if attestation_data.target.slot > self.latest_finalized.slot - }, - "latest_known_aggregated_payloads": { - attestation_data: proofs - for attestation_data, proofs in self.latest_known_aggregated_payloads.items() - if attestation_data.target.slot > self.latest_finalized.slot - }, - } - ) + """Remove attestation data that can no longer influence fork choice.""" + return _spec().prune_stale_attestation_data(self) # type: ignore[attr-defined] def validate_attestation(self, attestation_data: AttestationData) -> None: - """ - Validate incoming attestation before processing. - - Ensures the vote respects the basic laws of time and topology: - 1. The blocks voted for must exist in our store. - 2. A vote cannot span backwards in time (source > target). - 3. The head must be at least as recent as source and target. - 4. Checkpoint slots must match the actual block slots. - 5. The vote's slot must have started locally (a small disparity margin is allowed). - - Args: - attestation_data: AttestationData whose checkpoints and slot should be validated. - - Raises: - AssertionError: If attestation fails validation. - """ - data = attestation_data - - # Availability Check - # - # We cannot count a vote if we haven't seen the blocks involved. - assert data.source.root in self.blocks, f"Unknown source block: {data.source.root.hex()}" - assert data.target.root in self.blocks, f"Unknown target block: {data.target.root.hex()}" - assert data.head.root in self.blocks, f"Unknown head block: {data.head.root.hex()}" - - # Topology Check - # - # History is linear and monotonic: source <= target <= head. - # The second check implies head >= source by transitivity. - assert data.source.slot <= data.target.slot, "Source checkpoint slot must not exceed target" - assert data.head.slot >= data.target.slot, "Head checkpoint must not be older than target" - - # Consistency Check - # - # Validate checkpoint slots match block slots. - source_block = self.blocks[data.source.root] - target_block = self.blocks[data.target.root] - head_block = self.blocks[data.head.root] - assert source_block.slot == data.source.slot, "Source checkpoint slot mismatch" - assert target_block.slot == data.target.slot, "Target checkpoint slot mismatch" - assert head_block.slot == data.head.slot, "Head checkpoint slot mismatch" - - # Time Check - # - # Honest validators emit votes only after their slot has begun. - # Allow a small disparity margin for clock skew between peers. - # - # The bound is in intervals, not slots: a whole-slot margin would - # let an adversary pre-publish next-slot aggregates ahead of any - # honest validator. - attestation_start_interval = Interval.from_slot(data.slot) - assert attestation_start_interval <= self.time + GOSSIP_DISPARITY_INTERVALS, ( - "Attestation too far in future" - ) + """Validate incoming attestation before processing.""" + _spec().validate_attestation(self, attestation_data) # type: ignore[attr-defined] def on_gossip_attestation( self, @@ -336,141 +183,17 @@ def on_gossip_attestation( scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, is_aggregator: bool = False, ) -> "Store": - """ - Process a signed attestation received via gossip network. - - This method: - 1. Verifies the XMSS signature - 2. Stores the signature when the node is in aggregator mode - - Subnet filtering happens at the p2p subscription layer — only - attestations from subscribed subnets reach this method. No - additional subnet check is needed here. - - Args: - signed_attestation: The signed attestation from gossip. - scheme: XMSS signature scheme for verification. - is_aggregator: True if current validator holds aggregator role. - Only aggregator nodes store gossip attestation signatures. - - Returns: - New Store with attestation processed and signature stored if aggregating. - - Raises: - ValueError: If validator not found in state. - AssertionError: If signature verification fails. - """ - with observe_on_attestation(): - validator_id = signed_attestation.validator_id - attestation_data = signed_attestation.data - signature = signed_attestation.signature - - # Validate the attestation first so unknown blocks are rejected cleanly - # (instead of raising a raw KeyError when state is missing). - self.validate_attestation(attestation_data) - - key_state = self.states.get(attestation_data.target.root) - assert key_state is not None, ( - f"No state available to verify attestation signature for target block " - f"{attestation_data.target.root.hex()}" - ) - assert validator_id.is_valid(Uint64(len(key_state.validators))), ( - f"Validator {validator_id} not found in state {attestation_data.target.root.hex()}" - ) - public_key = key_state.validators[validator_id].get_attestation_pubkey() - - assert scheme.verify( - public_key, attestation_data.slot, hash_tree_root(attestation_data), signature - ), "Signature verification failed" - - # Store signature and attestation data for later aggregation. - # Copy the inner sets so we can add to them without mutating the previous store. - new_committee_sigs = {k: set(v) for k, v in self.attestation_signatures.items()} - - # Aggregators store all received gossip signatures. - # The p2p layer only delivers attestations from subscribed subnets, - # so subnet filtering happens at subscription time, not here. - # Non-aggregator nodes validate and drop — they never store gossip signatures. - if is_aggregator: - new_committee_sigs.setdefault(attestation_data, set()).add( - AttestationSignatureEntry(validator_id, signature) - ) - - # Return store with updated signature map and attestation data - return self.model_copy( - update={ - "attestation_signatures": new_committee_sigs, - } - ) + """Process a signed attestation received via gossip network.""" + return _spec().on_gossip_attestation( # type: ignore[attr-defined] + self, signed_attestation, scheme, is_aggregator + ) def on_gossip_aggregated_attestation( self, signed_attestation: SignedAggregatedAttestation ) -> "Store": - """ - Process a signed aggregated attestation received via aggregation topic - - This method: - 1. Verifies the aggregated attestation - 2. Stores the aggregation in aggregation_payloads map - - Args: - signed_attestation: The signed aggregated attestation from committee aggregation. - - Returns: - New Store with aggregation processed and stored. - - Raises: - ValueError: If validator not found in state. - AssertionError: If signature verification fails. - """ - data = signed_attestation.data - proof = signed_attestation.proof - - self.validate_attestation(data) - - # Get validator IDs who participated in this aggregation - validator_ids = proof.participants.to_validator_indices() - - # Retrieve the relevant state to look up public keys for verification. - key_state = self.states.get(data.target.root) - assert key_state is not None, ( - f"No state available to verify committee aggregation for target " - f"{data.target.root.hex()}" - ) - - # Ensure all participants exist in the active set - validators = key_state.validators - for validator_id in validator_ids: - assert validator_id.is_valid(Uint64(len(validators))), ( - f"Validator {validator_id} not found in state {data.target.root.hex()}" - ) - - # Prepare public keys for verification - public_keys = [validators[vid].get_attestation_pubkey() for vid in validator_ids] - - # Verify the leanVM aggregated proof - try: - proof.verify( - public_keys=public_keys, - message=hash_tree_root(data), - slot=data.slot, - ) - except AggregationError as exc: - raise AssertionError( - f"Committee aggregation signature verification failed: {exc}" - ) from exc - - # Shallow-copy the dict and its inner sets to preserve immutability. - new_aggregated_payloads = { - k: set(v) for k, v in self.latest_new_aggregated_payloads.items() - } - new_aggregated_payloads.setdefault(data, set()).add(proof) - - # Return store with updated aggregated payloads and attestation data - return self.model_copy( - update={ - "latest_new_aggregated_payloads": new_aggregated_payloads, - } + """Process a signed aggregated attestation received via gossip.""" + return _spec().on_gossip_aggregated_attestation( # type: ignore[attr-defined] + self, signed_attestation ) def on_block( @@ -478,872 +201,69 @@ def on_block( signed_block: SignedBlock, scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, ) -> "Store": - """ - Process a new block and update the forkchoice state. - - This method integrates a block into the forkchoice store by: - 1. Validating the block's parent exists - 2. Computing the post-state via the state transition function - 3. Processing attestations included in the block body (on-chain) - 4. Updating the forkchoice head - - Args: - signed_block: Complete signed block. - scheme: XMSS signature scheme to use for signature verification. - - Returns: - New Store with block integrated and head updated. - - Raises: - AssertionError: If parent block/state not found in store. - """ - with observe_on_block(): - block = signed_block.block - block_root = hash_tree_root(block) - - # Skip duplicate blocks (idempotent operation) - if block_root in self.blocks: - return self - - # Verify parent chain is available - # - # The parent state must exist before processing this block. - # If missing, the node must sync the parent chain first. - parent_state = self.states.get(block.parent_root) - assert parent_state is not None, ( - f"Parent state not found (root={block.parent_root.hex()}). " - f"Sync parent chain before processing block at slot {block.slot}." - ) - - # Validate cryptographic signatures - # Deferred import breaks the spec ↔ store cycle. - from lean_spec.forks.lstar.spec import LstarSpec - - valid_signatures = LstarSpec().verify_signatures( - signed_block, parent_state.validators, scheme - ) - - # Execute state transition function to compute post-block state - post_state = parent_state.state_transition(block, valid_signatures) - - # Propagate checkpoint advances from the post-state. - # - # Keep the checkpoint with the higher slot. - # On slot ties, prefer the store's own checkpoint. - # - # The store's checkpoint is pinned to the anchor at init and only - # moves forward via real justification/finalization events. - # On ties the store's view is authoritative. - latest_justified = max(self.latest_justified, post_state.latest_justified) - latest_finalized = max(self.latest_finalized, post_state.latest_finalized) - - store = self.model_copy( - update={ - "blocks": self.blocks | {block_root: block}, - "states": self.states | {block_root: post_state}, - "latest_justified": latest_justified, - "latest_finalized": latest_finalized, - } - ) - - # Process block body attestations and their signatures - # Block attestations go directly to "known" payloads - aggregated_attestations = block.body.attestations - attestation_signatures = signed_block.signature.attestation_signatures - - assert len(aggregated_attestations) == len(attestation_signatures), ( - "Attestation signature groups must match aggregated attestations" - ) - - # Each unique AttestationData must appear at most once per block. - att_data_set = {att.data for att in aggregated_attestations} - assert len(att_data_set) == len(aggregated_attestations), ( - "Block contains duplicate AttestationData entries; " - "each AttestationData must appear at most once" - ) - assert Uint8(len(att_data_set)) <= MAX_ATTESTATIONS_DATA, ( - f"Block contains {len(att_data_set)} distinct AttestationData entries; " - f"maximum is {MAX_ATTESTATIONS_DATA}" - ) - - # Copy the aggregated proof map for updates - # Shallow-copy the dict and its inner sets to preserve immutability - # Block attestations go directly to "known" payloads - # (like is_from_block=True in the spec) - block_proofs: dict[AttestationData, set[AggregatedSignatureProof]] = { - k: set(v) for k, v in store.latest_known_aggregated_payloads.items() - } - - for att, proof in zip(aggregated_attestations, attestation_signatures, strict=True): - block_proofs.setdefault(att.data, set()).add(proof) - - # Update store with new aggregated proofs and attestation data - store = store.model_copy(update={"latest_known_aggregated_payloads": block_proofs}) - - # Update forkchoice head based on new block and attestations - store = store.update_head() - - # Prune stale attestation data when finalization advances - if store.latest_finalized.slot > self.latest_finalized.slot: - store = store.prune_stale_attestation_data() - - return store + """Process a new block and update the forkchoice state.""" + return _spec().on_block(self, signed_block, scheme) # type: ignore[attr-defined] def extract_attestations_from_aggregated_payloads( self, aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] ) -> dict[ValidatorIndex, AttestationData]: - """ - Extract attestations from aggregated payloads. - - Given a mapping of aggregated signature proofs, extract the attestation data - for each validator that participated in the aggregation. - - Args: - aggregated_payloads: Mapping from AttestationData to set of aggregated proofs. - - Returns: - Mapping from ValidatorIndex to AttestationData for each validator. - """ - attestations: dict[ValidatorIndex, AttestationData] = {} - - for attestation_data, proofs in aggregated_payloads.items(): - for proof in proofs: - for validator_id in proof.participants.to_validator_indices(): - existing = attestations.get(validator_id) - if existing is None or existing.slot < attestation_data.slot: - attestations[validator_id] = attestation_data - return attestations - - def compute_block_weights(self) -> dict[Bytes32, int]: - """ - Compute attestation-based weight for each block above the finalized slot. - - Walks backward from each validator's latest head vote, incrementing weight - for every ancestor above the finalized slot. - - Returns: - Mapping from block root to accumulated attestation weight. - """ - attestations = self.extract_attestations_from_aggregated_payloads( - self.latest_known_aggregated_payloads + """Extract attestations from aggregated payloads.""" + return _spec().extract_attestations_from_aggregated_payloads( # type: ignore[attr-defined] + self, aggregated_payloads ) - start_slot = self.latest_finalized.slot - - weights: dict[Bytes32, int] = defaultdict(int) - - for attestation_data in attestations.values(): - current_root = attestation_data.head.root - - while current_root in self.blocks and self.blocks[current_root].slot > start_slot: - weights[current_root] += 1 - current_root = self.blocks[current_root].parent_root - - return dict(weights) - - def _compute_lmd_ghost_head( - self, - start_root: Bytes32, - attestations: dict[ValidatorIndex, AttestationData], - min_score: int = 0, - ) -> Bytes32: - """ - Walk the block tree according to the LMD GHOST rule. - - The walk starts from a chosen root. - At each fork, the child subtree with the highest weight is taken. - The process stops when a leaf is reached. - That leaf is the chosen head. - - Weights are derived from votes as follows: - - Each validator contributes its full weight to its most recent head vote. - - The weight of that vote also flows to every ancestor of the voted block. - - The weight of a subtree is the sum of all such contributions inside it. - - An optional threshold can be applied: - - If a threshold is set, children below this threshold are ignored. - - When two branches have equal weight, the one with the lexicographically - larger hash is chosen to break ties. - - Args: - start_root: Starting point root (usually latest justified). - attestations: Attestation data to consider for fork choice weights. - min_score: Minimum attestation count for block inclusion. - - Returns: - Hash of the chosen head block. - """ - # If the starting point is not defined, choose the earliest known block. - # - # This ensures that the walk always has an anchor. - if start_root == ZERO_HASH: - start_root = min( - self.blocks.keys(), key=lambda block_hash: self.blocks[block_hash].slot - ) - - # Remember the slot of the anchor once and reuse it during the walk. - # - # This avoids repeated lookups inside the inner loop. - start_slot = self.blocks[start_root].slot - - # Prepare a table that will collect voting weight for each block. - # - # Each entry starts conceptually at zero and then accumulates contributions. - weights: dict[Bytes32, int] = defaultdict(int) - - # For every vote, follow the chosen head upward through its ancestors. - # - # Each visited block accumulates one unit of weight from that validator. - for attestation_data in attestations.values(): - current_root = attestation_data.head.root - - # Climb towards the anchor while staying inside the known tree. - # - # This naturally handles partial views and ongoing sync. - while current_root in self.blocks and self.blocks[current_root].slot > start_slot: - weights[current_root] += 1 - current_root = self.blocks[current_root].parent_root - - # Build the adjacency tree (parent -> children). - # - # We use a defaultdict to avoid checking if keys exist. - children_map: dict[Bytes32, list[Bytes32]] = defaultdict(list) - - for root, block in self.blocks.items(): - # 1. Structural check: skip blocks without parents (e.g., purely genesis/orphans) - if not block.parent_root: - continue - - # 2. Heuristic check: prune branches early if they lack sufficient weight - if min_score > 0 and weights[root] < min_score: - continue - - children_map[block.parent_root].append(root) - - # Now perform the greedy walk. - # - # At each step, pick the child with the highest weight among the candidates. - head = start_root - - # Descend the tree, choosing the heaviest branch at every fork. - while children := children_map.get(head): - # Choose best child: most attestations, then lexicographically highest hash - head = max(children, key=lambda x: (weights[x], x)) - - return head + def compute_block_weights(self) -> dict[Bytes32, int]: + """Compute attestation-based weight for each block above the finalized slot.""" + return _spec().compute_block_weights(self) # type: ignore[attr-defined] def update_head(self) -> "Store": - """ - Compute updated store with new canonical head. - - Selects the canonical chain head using: - - 1. Latest justified checkpoint as the starting root - 2. LMD-GHOST fork choice rule (heaviest subtree by attestation weight) - - Returns: - New Store with updated head. - """ - # Extract attestations from known aggregated payloads - attestations = self.extract_attestations_from_aggregated_payloads( - self.latest_known_aggregated_payloads - ) - - # Run LMD-GHOST fork choice algorithm. - # - # Starts from the justified root and greedily descends to the heaviest - # leaf. The result is always a descendant of the justified root by - # construction: the walk only follows child edges within the subtree. - new_head = self._compute_lmd_ghost_head( - start_root=self.latest_justified.root, - attestations=attestations, - ) - - return self.model_copy( - update={ - "head": new_head, - } - ) + """Compute updated store with new canonical head.""" + return _spec().update_head(self) # type: ignore[attr-defined] def accept_new_attestations(self) -> "Store": - """ - Process pending aggregated payloads and update forkchoice head. - - Moves aggregated payloads from latest_new_aggregated_payloads to - latest_known_aggregated_payloads, making them eligible to contribute to - fork choice weights. This migration happens at specific interval ticks. - - The Interval Tick System - ------------------------- - Aggregated payloads progress through intervals: - - Interval 0: Block proposal - - Interval 1: Validators cast attestations (enter "new") - - Interval 2: Aggregators create proofs & broadcast - - Interval 3: Safe target update - - Interval 4: Process accumulated attestations - - This staged progression ensures proper timing and prevents premature - influence on fork choice decisions. - - Returns: - New Store with migrated aggregated payloads and updated head. - """ - # Merge new aggregated payloads into known aggregated payloads - merged_aggregated_payloads = { - attestation_data: set(proofs) - for attestation_data, proofs in self.latest_known_aggregated_payloads.items() - } - for attestation_data, proofs in self.latest_new_aggregated_payloads.items(): - merged_aggregated_payloads.setdefault(attestation_data, set()).update(proofs) - - # Create store with migrated aggregated payloads - store = self.model_copy( - update={ - "latest_known_aggregated_payloads": merged_aggregated_payloads, - "latest_new_aggregated_payloads": {}, - } - ) - - # Update head with newly accepted aggregated payloads - return store.update_head() + """Process pending aggregated payloads and update forkchoice head.""" + return _spec().accept_new_attestations(self) # type: ignore[attr-defined] def update_safe_target(self) -> "Store": - """ - Compute the deepest block that has 2/3+ supermajority attestation weight. - - The safe target is the furthest-from-genesis block where enough validators - agree. Validators use it to decide which block is safe to attest to. - Only blocks meeting the supermajority threshold qualify. - - This runs at interval 3 of the slot cycle: - - - Interval 0: Block proposal - - Interval 1: Validators cast attestation votes - - Interval 2: Aggregators create proofs, broadcast via gossip - - Interval 3: Safe target update (HERE) - - Interval 4: New attestations migrate to "known" pool - - Only the "new" pool counts. Migration into "known" runs at interval 4, - after this step, so safe target sees only votes received this slot. - - Safe target is an *availability* signal, not durable knowledge: - - - A block is safe when 2/3 of currently online validators vote for a descendant. - - "Known" carries block-included, previously migrated, and self-attestations. - - Those reflect historical knowledge, not current liveness. - - Counting them would advance safe target on stale evidence after a participation collapse. - - Returns: - New Store with updated safe_target. - """ - # Look up the post-state of the current head block. - # - # The validator registry in this state tells us how many active - # validators exist. We need that count to compute the threshold. - head_state = self.states[self.head] - num_validators = Uint64(len(head_state.validators)) - - # Compute the 2/3 supermajority threshold. - # - # A block needs at least this many attestation votes to be "safe". - # The ceiling division (negation trick) ensures we round UP. - # For example, 100 validators => threshold is 67, not 66. - min_target_score = -(-num_validators * 2 // 3) - - # Unpack "new" payloads into a flat validator -> vote mapping. - # "Known" is excluded by design. - attestations = self.extract_attestations_from_aggregated_payloads( - self.latest_new_aggregated_payloads, - ) - - # Run LMD GHOST with the supermajority threshold. - # - # The walk starts from the latest justified checkpoint and descends - # through the block tree. At each fork, only children with at least - # `min_target_score` attestation weight are considered. The result - # is the deepest block that clears the 2/3 bar. - # - # If no child meets the threshold at some fork, the walk stops - # early. The safe target is then shallower than the actual head. - safe_target = self._compute_lmd_ghost_head( - start_root=self.latest_justified.root, - attestations=attestations, - min_score=min_target_score, - ) - - # Return a new Store with only the safe target updated. - # - # The head and attestation pools remain unchanged. - return self.model_copy(update={"safe_target": safe_target}) + """Compute the deepest block that has 2/3+ supermajority attestation weight.""" + return _spec().update_safe_target(self) # type: ignore[attr-defined] def aggregate(self) -> tuple["Store", list[SignedAggregatedAttestation]]: - """ - Turn raw validator votes into compact aggregated attestations. - - Validators cast individual signatures over gossip. Before those - votes can influence fork choice or be included in a block, they - must be combined into compact cryptographic proofs. - - The store holds three pools of attestation evidence: - - - **Gossip signatures**: individual validator votes arriving in real-time. - - **New payloads**: aggregated proofs from the current round, not yet - committed to the chain. - - **Known payloads**: previously accepted proofs, reusable as building - blocks for deeper aggregation. - - For each unique piece of attestation data the algorithm proceeds in three phases: - - 1. **Select** — greedily pick existing proofs that maximize - validator coverage (new before known). - 2. **Fill** — collect raw gossip signatures for any validators - not yet covered. - 3. **Aggregate** — delegate to the XMSS subspec to produce a - single cryptographic proof. - - After aggregation the store is updated: - - - Consumed gossip signatures are removed. - - Newly produced proofs are recorded for future reuse. - - Returns: - Updated store and the list of freshly produced signed attestations. - """ - validators = self.states[self.head].validators - gossip_sigs = self.attestation_signatures - new = self.latest_new_aggregated_payloads - known = self.latest_known_aggregated_payloads - - new_aggregates: list[SignedAggregatedAttestation] = [] - - # Only attestation data with a new payload or a raw gossip signature - # can trigger aggregation. Known payloads alone cannot — they exist - # only to help extend coverage when combined with fresh evidence. - for data in new.keys() | gossip_sigs.keys(): - # Phase 1: Select - # - # Start with the cheapest option: reuse proofs that already - # cover many validators. - # - # Child proofs are aggregated signatures from prior rounds. - # Selecting them first keeps the final proof tree shallow - # and avoids redundant cryptographic work. - # - # New payloads go first because they represent uncommitted - # work — known payloads fill remaining gaps. - child_proofs, covered = AggregatedSignatureProof.select_greedily( - new.get(data), known.get(data) - ) - - # Phase 2: Fill - # - # For every validator not yet covered by a child proof, - # include its individual gossip signature. - # - # Sorting by validator index guarantees deterministic proof - # construction regardless of network arrival order. - raw_entries = [ - ( - e.validator_id, - validators[e.validator_id].get_attestation_pubkey(), - e.signature, - ) - for e in sorted(gossip_sigs.get(data, set()), key=lambda e: e.validator_id) - if e.validator_id not in covered - ] - - # The XMSS layer enforces a minimum: either at least one raw - # signature, or at least two child proofs to merge. - # - # A lone child proof is already a valid proof — nothing to do. - if not raw_entries and len(child_proofs) < 2: - continue - - # Encode the set of raw signers as a compact bitfield. - xmss_participants = ValidatorIndices( - data=[vid for vid, _, _ in raw_entries] - ).to_aggregation_bits() - raw_xmss = [(pk, sig) for _, pk, sig in raw_entries] - - # Phase 3: Aggregate - # - # Build the recursive proof tree. - # - # Each child proof needs its participants' public keys so - # the XMSS prover can verify inner proofs while constructing - # the outer one. - children = [ - ( - child, - [ - validators[vid].get_attestation_pubkey() - for vid in child.participants.to_validator_indices() - ], - ) - for child in child_proofs - ] - - # Hand everything to the XMSS subspec. - # Out comes a single proof covering all selected validators. - proof = AggregatedSignatureProof.aggregate( - xmss_participants=xmss_participants, - children=children, - raw_xmss=raw_xmss, - message=hash_tree_root(data), - slot=data.slot, - ) - new_aggregates.append(SignedAggregatedAttestation(data=data, proof=proof)) - - # ── Store bookkeeping ──────────────────────────────────────── - # - # Record freshly produced proofs so future rounds can reuse them. - # Remove gossip signatures that were consumed by this aggregation. - new_aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] = {} - for signed_att in new_aggregates: - new_aggregated_payloads.setdefault(signed_att.data, set()).add(signed_att.proof) - - remaining_attestation_signatures = { - data: sigs - for data, sigs in self.attestation_signatures.items() - if data not in new_aggregated_payloads - } - - return self.model_copy( - update={ - "latest_new_aggregated_payloads": new_aggregated_payloads, - "attestation_signatures": remaining_attestation_signatures, - } - ), new_aggregates + """Turn raw validator votes into compact aggregated attestations.""" + return _spec().aggregate(self) # type: ignore[attr-defined] def tick_interval( self, has_proposal: bool, is_aggregator: bool = False ) -> tuple["Store", list[SignedAggregatedAttestation]]: - """ - Advance store time by one interval and perform interval-specific actions. - - Different actions are performed based on interval within slot: - - Interval 0: Process attestations if proposal exists - - Interval 1: Validator attesting period (no action) - - Interval 2: Aggregators create proofs & broadcast - - Interval 3: Update safe target (fast confirm) - - Interval 4: Process accumulated attestations - - The Five-Interval System - ------------------------- - Each slot is divided into 5 intervals: - - **Interval 0 (Block Proposal)**: - - Block proposer publishes their block - - If proposal exists, immediately accept new attestations - - This ensures validators see the block before attesting - - **Interval 1 (Vote Propagation)**: - - Validators vote & propagate to their attestation subnet topics - - No store action required - - **Interval 2 (Aggregation)**: - - Aggregators collect votes and create aggregated proofs - - Broadcast proofs to the aggregation topic - - **Interval 3 (Safe Target Update)**: - - Validators use received proofs to update safe target - - Provides validators with a stable attestation target (fast confirm) - - **Interval 4 (Attestation Acceptance)**: - - Accept accumulated attestations (new → known) - - Update head based on new attestation weights - - Prepare for next slot - - Args: - has_proposal: Whether a proposal exists for this interval. - is_aggregator: Whether the node is an aggregator. - - Returns: - Tuple of (new store with advanced time, list of new signed aggregated attestation). - """ - # Advance time by one interval - store = self.model_copy(update={"time": self.time + Interval(1)}) - current_interval = store.time % INTERVALS_PER_SLOT - new_aggregates: list[SignedAggregatedAttestation] = [] - - if current_interval == Interval(0) and has_proposal: - store = store.accept_new_attestations() - elif current_interval == Interval(2) and is_aggregator: - store, new_aggregates = store.aggregate() - elif current_interval == Interval(3): - store = store.update_safe_target() - elif current_interval == Interval(4): - store = store.accept_new_attestations() - - return store, new_aggregates + """Advance store time by one interval and perform interval-specific actions.""" + return _spec().tick_interval(self, has_proposal, is_aggregator) # type: ignore[attr-defined] def on_tick( self, target_interval: Interval, has_proposal: bool, is_aggregator: bool = False ) -> tuple["Store", list[SignedAggregatedAttestation]]: - """ - Advance forkchoice store time to given interval count. - - Ticks store forward interval by interval, performing appropriate - actions for each interval type. This method handles time progression - incrementally to ensure all interval-specific actions are performed. - - Args: - target_interval: Target time as intervals since genesis. - has_proposal: Whether node has proposal for current slot. - is_aggregator: Whether the node is an aggregator. - - Returns: - Tuple of (new store with time advanced, - list of all produced signed aggregated attestation). - """ - store = self - all_new_aggregates: list[SignedAggregatedAttestation] = [] - - # Tick forward one interval at a time - while store.time < target_interval: - # Check if proposal should be signaled for next interval - next_interval = Interval(int(store.time) + 1) - should_signal_proposal = has_proposal and next_interval == target_interval - - # Advance by one interval with appropriate signaling - store, new_aggregates = store.tick_interval(should_signal_proposal, is_aggregator) - all_new_aggregates.extend(new_aggregates) - - return store, all_new_aggregates + """Advance forkchoice store time to given interval count.""" + return _spec().on_tick( # type: ignore[attr-defined] + self, target_interval, has_proposal, is_aggregator + ) def get_proposal_head(self, slot: Slot) -> tuple["Store", Bytes32]: - """ - Get the head for block proposal at given slot. - - Ensures store is up-to-date and processes any pending attestations - before returning the canonical head. This guarantees the proposer - builds on the most recent view of the chain. - - Algorithm - --------- - 1. Calculate slot time from slot number - 2. Advance store time to current slot (ticking intervals) - 3. Accept any pending attestations - 4. Return updated store and head root - - Args: - slot: Slot for which to get proposal head. - - Returns: - Tuple of (new Store with updated time, head root for building). - """ - # Advance time to this slot's first interval - target_interval = Interval.from_slot(slot) - store, _ = self.on_tick(target_interval, True) - - # Process any pending attestations before proposal - store = store.accept_new_attestations() - - return store, store.head + """Get the head for block proposal at given slot.""" + return _spec().get_proposal_head(self, slot) # type: ignore[attr-defined] def get_attestation_target(self) -> Checkpoint: - """ - Calculate target checkpoint for validator attestations. - - Determines appropriate attestation target based on head, safe target, - and finalization constraints. The target selection algorithm balances - between advancing the chain head and maintaining safety guarantees. - - Attestation Target Algorithm - ----------------------------- - The algorithm walks back from the current head toward the safe target, - ensuring the target is in a justifiable slot range: - - 1. **Start at Head**: Begin with the current head block - 2. **Walk Toward Safe**: Move backward (up to `JUSTIFICATION_LOOKBACK_SLOTS` steps) - if safe target is newer - 3. **Ensure Justifiable**: Continue walking back until slot is justifiable - 4. **Return Checkpoint**: Create checkpoint from selected block - - Justifiability Rules (see Slot.is_justifiable_after) - ------------------------------------------------------ - A slot is justifiable at distance delta from finalization if: - 1. delta ≤ 5 (first 5 slots always justifiable) - 2. delta is a perfect square (1, 4, 9, 16, 25, ...) - 3. delta is a pronic number (2, 6, 12, 20, 30, ...) - - These rules prevent long-range attacks by restricting which checkpoints - validators can attest to relative to finalization. - - Returns: - Target checkpoint for attestation. - """ - # Start from current head - target_block_root = self.head - - # Walk back toward safe target (up to `JUSTIFICATION_LOOKBACK_SLOTS` steps) - # - # This ensures the target doesn't advance too far ahead of safe target, - # providing a balance between liveness and safety. - for _ in range(JUSTIFICATION_LOOKBACK_SLOTS): - if self.blocks[target_block_root].slot > self.blocks[self.safe_target].slot: - target_block_root = self.blocks[target_block_root].parent_root - else: - break - - # Ensure target is in justifiable slot range - # - # Walk back until we find a slot that satisfies justifiability rules - # relative to the latest finalized checkpoint. - while not self.blocks[target_block_root].slot.is_justifiable_after( - self.latest_finalized.slot - ): - target_block_root = self.blocks[target_block_root].parent_root - - # Create checkpoint from selected target block - target_block = self.blocks[target_block_root] - - return Checkpoint(root=target_block_root, slot=target_block.slot) + """Calculate target checkpoint for validator attestations.""" + return _spec().get_attestation_target(self) # type: ignore[attr-defined] def produce_attestation_data(self, slot: Slot) -> AttestationData: - """ - Produce attestation data for the given slot. - - This method constructs an AttestationData object according to the lean protocol - specification. The attestation data represents the chain state view including - head, target, and source checkpoints. - - The algorithm: - 1. Get the current head block - 2. Calculate the appropriate attestation target using current forkchoice state - 3. Use the store's latest justified checkpoint as the attestation source - 4. Construct and return the complete AttestationData object - - Args: - slot: The slot for which to produce the attestation data. - - Returns: - A fully constructed AttestationData object. - """ - # Get the head block the validator sees for this slot - head_checkpoint = Checkpoint( - root=self.head, - slot=self.blocks[self.head].slot, - ) - - # Calculate the target checkpoint for this attestation - target_checkpoint = self.get_attestation_target() - - # Construct attestation data - return AttestationData( - slot=slot, - head=head_checkpoint, - target=target_checkpoint, - source=self.latest_justified, - ) + """Produce attestation data for the given slot.""" + return _spec().produce_attestation_data(self, slot) # type: ignore[attr-defined] def produce_block_with_signatures( self, slot: Slot, validator_index: ValidatorIndex, ) -> tuple["Store", Block, list[AggregatedSignatureProof]]: - """ - Produce a block and its aggregated signature proofs for the target slot. - - Block production proceeds in four stages: - 1. Retrieve the current chain head as the parent block - 2. Verify proposer authorization for the target slot - 3. Build the block with maximal valid attestations - 4. Store the block and update checkpoints - - The block builder uses a fixed-point algorithm to collect attestations. - Each iteration may update the justified checkpoint. - Some attestations only become valid after this update. - The process repeats until no new attestations can be added. - - This maximizes consensus contribution from each block. - - Args: - slot: Target slot for block production. - validator_index: Proposer's validator index. - - Returns: - Tuple containing: - - - Updated store with the new block - - The produced block - - Signature proofs aligned with block attestations - - Raises: - AssertionError: If validator is not the proposer for this slot, - or if the produced block fails to close a justified divergence - between the store and the head chain. - """ - # Retrieve parent block. - # - # The proposal head reflects the latest chain view after processing - # all pending attestations. Building on stale state would orphan the block. - store, head_root = self.get_proposal_head(slot) - head_state = store.states[head_root] - - # Verify proposer authorization. - # - # Only one validator may propose per slot. - # Unauthorized proposals would be rejected by other nodes. - num_validators = Uint64(len(head_state.validators)) - assert validator_index.is_proposer_for(slot, num_validators), ( - f"Validator {validator_index} is not the proposer for slot {slot}" - ) - - # Build the block. - # - # The builder iteratively collects valid attestations from aggregated - # payloads matching the justified checkpoint. Each iteration may advance - # justification, unlocking more attestation data entries. - final_block, final_post_state, collected_attestations, signatures = head_state.build_block( - slot=slot, - proposer_index=validator_index, - parent_root=head_root, - known_block_roots=set(store.blocks.keys()), - aggregated_payloads=store.latest_known_aggregated_payloads, - ) - - # Invariant: the produced block must close any justified divergence. - # - # The store may have advanced its justified checkpoint from attestations - # on a minority fork that the head state never processed. The fixed-point - # loop above must incorporate those attestations from the pool, advancing - # the block's justified checkpoint to at least match the store. - # - # Without this, other nodes processing the block would never see the - # justification advance, degrading consensus liveness: only nodes that - # happened to receive the minority fork would know justification moved. - block_justified = final_post_state.latest_justified.slot - store_justified = store.latest_justified.slot - assert block_justified >= store_justified, ( - f"Produced block justified={block_justified} < store justified=" - f"{store_justified}. Fixed-point attestation loop did not converge." + """Produce a block and its aggregated signature proofs for the target slot.""" + return _spec().produce_block_with_signatures( # type: ignore[attr-defined] + self, slot, validator_index ) - - # Compute block hash for storage. - block_hash = hash_tree_root(final_block) - - # Update checkpoints from post-state. - # - # Locally produced blocks bypass normal block processing. - # We must manually propagate any checkpoint advances. - # Higher slots indicate more recent justified/finalized states. - latest_justified = max(final_post_state.latest_justified, store.latest_justified) - latest_finalized = max(final_post_state.latest_finalized, store.latest_finalized) - - # Persist block and state immutably. - new_store = store.model_copy( - update={ - "blocks": store.blocks | {block_hash: final_block}, - "states": store.states | {block_hash: final_post_state}, - "latest_justified": latest_justified, - "latest_finalized": latest_finalized, - } - ) - - # Prune stale attestation data when finalization advances - if new_store.latest_finalized.slot > store.latest_finalized.slot: - new_store = new_store.prune_stale_attestation_data() - - return new_store, final_block, signatures diff --git a/tests/lean_spec/forks/test_lstar_spec_delegators.py b/tests/lean_spec/forks/test_lstar_spec_delegators.py deleted file mode 100644 index 4197a511..00000000 --- a/tests/lean_spec/forks/test_lstar_spec_delegators.py +++ /dev/null @@ -1,235 +0,0 @@ -"""Verify that every fork-class method forwards faithfully to the underlying container. - -Each test patches the container method, calls the matching fork-class method, -and asserts: - -- The container method receives the same arguments. -- The fork-class method returns the container's result unchanged. -""" - -from unittest.mock import patch - -from lean_spec.forks.lstar import State, Store -from lean_spec.forks.lstar.containers import Block, SignedAttestation, SignedBlock -from lean_spec.forks.lstar.containers.attestation import ( - AggregatedAttestation, - SignedAggregatedAttestation, -) -from lean_spec.forks.lstar.spec import LstarSpec -from lean_spec.subspecs.chain.clock import Interval -from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME -from lean_spec.types import Bytes32, Slot, ValidatorIndex -from tests.lean_spec.helpers.builders import ( - make_genesis_data, - make_keyed_genesis_state, - make_signed_block, - make_validators, -) - -_NUM_VALIDATORS = 3 -_VALIDATOR_ID = ValidatorIndex(0) -_SENTINEL = object() -"""Unique object returned by patched containers to confirm the result is forwarded unchanged.""" - - -def _spec() -> LstarSpec: - """Build a fresh fork-class instance for one test.""" - return LstarSpec() - - -class TestStateDelegators: - """Fork-class methods that route through the state container.""" - - def test_state_transition_forwards(self) -> None: - """The post-state computation forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - block = Block.model_construct(slot=Slot(1)) - - with patch.object(State, "state_transition", return_value=_SENTINEL) as mock: - result = _spec().state_transition(state, block, valid_signatures=False) - - mock.assert_called_once_with(block, False) - assert result is _SENTINEL - - def test_process_slots_forwards(self) -> None: - """Advancing through empty slots forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - target = Slot(7) - - with patch.object(State, "process_slots", return_value=_SENTINEL) as mock: - result = _spec().process_slots(state, target) - - mock.assert_called_once_with(target) - assert result is _SENTINEL - - def test_process_block_forwards(self) -> None: - """Full block processing forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - block = Block.model_construct(slot=Slot(1)) - - with patch.object(State, "process_block", return_value=_SENTINEL) as mock: - result = _spec().process_block(state, block) - - mock.assert_called_once_with(block) - assert result is _SENTINEL - - def test_process_block_header_forwards(self) -> None: - """Header-only processing forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - block = Block.model_construct(slot=Slot(1)) - - with patch.object(State, "process_block_header", return_value=_SENTINEL) as mock: - result = _spec().process_block_header(state, block) - - mock.assert_called_once_with(block) - assert result is _SENTINEL - - def test_process_attestations_forwards(self) -> None: - """Folding attestations into the state forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - attestations: list[AggregatedAttestation] = [] - - with patch.object(State, "process_attestations", return_value=_SENTINEL) as mock: - result = _spec().process_attestations(state, attestations) - - mock.assert_called_once_with(attestations) - assert result is _SENTINEL - - def test_build_block_forwards(self) -> None: - """Block construction forwards to the state container.""" - state = make_keyed_genesis_state(_NUM_VALIDATORS) - slot = Slot(1) - proposer_index = ValidatorIndex(1) - parent_root = Bytes32.zero() - known_block_roots = {parent_root} - - with patch.object(State, "build_block", return_value=_SENTINEL) as mock: - result = _spec().build_block( - state, - slot=slot, - proposer_index=proposer_index, - parent_root=parent_root, - known_block_roots=known_block_roots, - ) - - mock.assert_called_once_with( - slot=slot, - proposer_index=proposer_index, - parent_root=parent_root, - known_block_roots=known_block_roots, - aggregated_payloads=None, - ) - assert result is _SENTINEL - - -class TestSignedBlockDelegator: - """Fork-class method that routes through the signed-block container.""" - - def test_verify_signatures_forwards(self) -> None: - """Signature verification forwards to the signed-block container.""" - validators = make_validators(_NUM_VALIDATORS) - signed_block = make_signed_block( - slot=Slot(0), - proposer_index=ValidatorIndex(0), - parent_root=Bytes32.zero(), - state_root=Bytes32.zero(), - ) - - with patch.object(SignedBlock, "verify_signatures", return_value=True) as mock: - result = _spec().verify_signatures(signed_block, validators) - - mock.assert_called_once_with(validators, TARGET_SIGNATURE_SCHEME) - assert result is True - - -class TestStoreDelegators: - """Fork-class methods that route through the forkchoice store.""" - - def _store(self) -> Store: - """Build a genesis forkchoice store for one test.""" - return make_genesis_data(num_validators=_NUM_VALIDATORS, validator_id=_VALIDATOR_ID).store - - def test_on_block_forwards(self) -> None: - """Incorporating a new block forwards to the forkchoice store.""" - store = self._store() - signed_block = make_signed_block( - slot=Slot(1), - proposer_index=ValidatorIndex(1), - parent_root=Bytes32.zero(), - state_root=Bytes32.zero(), - ) - - with patch.object(Store, "on_block", return_value=_SENTINEL) as mock: - result = _spec().on_block(store, signed_block) - - mock.assert_called_once_with(signed_block, TARGET_SIGNATURE_SCHEME) - assert result is _SENTINEL - - def test_on_tick_forwards(self) -> None: - """Advancing forkchoice time forwards to the forkchoice store.""" - store = self._store() - target = Interval.from_slot(Slot(1)) - - with patch.object(Store, "on_tick", return_value=_SENTINEL) as mock: - result = _spec().on_tick(store, target, has_proposal=True, is_aggregator=True) - - mock.assert_called_once_with(target, True, True) - assert result is _SENTINEL - - def test_on_gossip_attestation_forwards(self) -> None: - """A single-validator attestation from gossip forwards to the forkchoice store.""" - store = self._store() - attestation = SignedAttestation.model_construct() - - with patch.object(Store, "on_gossip_attestation", return_value=_SENTINEL) as mock: - result = _spec().on_gossip_attestation(store, attestation, is_aggregator=True) - - mock.assert_called_once_with(attestation, TARGET_SIGNATURE_SCHEME, True) - assert result is _SENTINEL - - def test_on_gossip_aggregated_attestation_forwards(self) -> None: - """An aggregated attestation from gossip forwards to the forkchoice store.""" - store = self._store() - attestation = SignedAggregatedAttestation.model_construct() - - with patch.object( - Store, "on_gossip_aggregated_attestation", return_value=_SENTINEL - ) as mock: - result = _spec().on_gossip_aggregated_attestation(store, attestation) - - mock.assert_called_once_with(attestation) - assert result is _SENTINEL - - def test_produce_attestation_data_forwards(self) -> None: - """Building attestation payload forwards to the forkchoice store.""" - store = self._store() - slot = Slot(2) - - with patch.object(Store, "produce_attestation_data", return_value=_SENTINEL) as mock: - result = _spec().produce_attestation_data(store, slot) - - mock.assert_called_once_with(slot) - assert result is _SENTINEL - - def test_produce_block_with_signatures_forwards(self) -> None: - """Producing a proposal block with proofs forwards to the forkchoice store.""" - store = self._store() - slot = Slot(2) - validator_index = ValidatorIndex(1) - - with patch.object(Store, "produce_block_with_signatures", return_value=_SENTINEL) as mock: - result = _spec().produce_block_with_signatures(store, slot, validator_index) - - mock.assert_called_once_with(slot, validator_index) - assert result is _SENTINEL - - def test_get_proposal_head_forwards(self) -> None: - """Resolving the proposal head forwards to the forkchoice store.""" - store = self._store() - slot = Slot(2) - - with patch.object(Store, "get_proposal_head", return_value=_SENTINEL) as mock: - result = _spec().get_proposal_head(store, slot) - - mock.assert_called_once_with(slot) - assert result is _SENTINEL From d505cf55c2cdd39922bc72c1ed33b573832aec27 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 17:21:41 +0200 Subject: [PATCH 3/9] refactor(forks): delete the trivial container forwarders (Stage 4D of #686) State, Store, and SignedBlock become pure Pydantic data containers. All forwarder methods that delegated through the lazy spec singleton are removed; the lazy singleton helpers are removed alongside them. Every remaining call site that used to go through a container method now goes through the active fork spec: - Tests use the session-scoped `spec` fixture from `tests/lean_spec/conftest.py`. - Subspec services (`sync`, `validator`, `chain`, plus the fork-choice API endpoint) carry a module-level `_SPEC = LstarSpec()` constant. - Library helpers (`tests/lean_spec/helpers/builders.py`, `packages/testing/...`) follow the same `_SPEC` pattern. `ForkProtocol.generate_genesis` and `ForkProtocol.create_store` become abstract; the previous default implementations referenced container methods that no longer exist. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../test_fixtures/api_endpoint.py | 8 +- .../test_fixtures/fork_choice.py | 15 +- .../test_types/block_spec.py | 4 +- .../test_types/store_checks.py | 11 +- .../forks/lstar/containers/state/state.py | 69 +-------- src/lean_spec/forks/lstar/store.py | 133 +----------------- src/lean_spec/forks/protocol.py | 11 +- .../subspecs/api/endpoints/fork_choice.py | 7 +- src/lean_spec/subspecs/chain/service.py | 8 +- src/lean_spec/subspecs/sync/service.py | 16 ++- src/lean_spec/subspecs/validator/service.py | 14 +- .../state_transition/test_block_processing.py | 5 +- .../state_transition/test_finalization.py | 5 +- .../devnet/state_transition/test_genesis.py | 5 +- .../test_slot_monotonicity.py | 7 +- tests/lean_spec/conftest.py | 23 ++- tests/lean_spec/helpers/builders.py | 4 +- .../containers/test_state_aggregation.py | 12 +- .../forkchoice/test_attestation_target.py | 49 ++++--- .../forkchoice/test_compute_block_weights.py | 13 +- .../forkchoice/test_store_attestations.py | 40 +++--- .../subspecs/forkchoice/test_store_pruning.py | 31 ++-- .../forkchoice/test_time_management.py | 50 ++++--- .../forkchoice/test_validate_attestation.py | 21 +-- .../subspecs/forkchoice/test_validator.py | 8 +- .../lean_spec/subspecs/genesis/test_state.py | 13 +- 26 files changed, 227 insertions(+), 355 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py b/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py index 0a1eadec..ad69bd51 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py +++ b/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py @@ -1,7 +1,7 @@ """API endpoint response conformance fixtures.""" from collections.abc import Callable -from typing import Any, ClassVar +from typing import Any, ClassVar, cast from lean_spec.forks.lstar import Store from lean_spec.forks.lstar.containers import BlockBody @@ -53,7 +53,7 @@ def _build_store(num_validators: int, genesis_time: int, anchor_slot: int = 0) - ) block = _make_genesis_block(state) # No validator identity — fixture only reads store data, never signs. - return Store.from_anchor(state, block, validator_id=None) + return cast(Store, fork.create_store(state, block, validator_id=None)) # Walk the chain from genesis through anchor_slot using empty blocks. # The returned pair (state, block) is internally consistent with the @@ -64,7 +64,7 @@ def _build_store(num_validators: int, genesis_time: int, anchor_slot: int = 0) - anchor_slot=Slot(anchor_slot), genesis_time=Uint64(genesis_time), ) - return Store.from_anchor(state, block, validator_id=None) + return cast(Store, fork.create_store(state, block, validator_id=None)) def _health_response(_store: Store, _fixture: "ApiEndpointTest") -> dict[str, Any]: @@ -100,7 +100,7 @@ def _finalized_state_response(store: Store, _fixture: "ApiEndpointTest") -> dict def _fork_choice_response(store: Store, _fixture: "ApiEndpointTest") -> dict[str, Any]: """Fork choice tree: blocks with weights, head, checkpoints, validator count.""" - weights = store.compute_block_weights() + weights = LstarSpec().compute_block_weights(store) # Only post-finalization blocks are relevant to head selection. nodes = [ diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 0c0add9f..03c0483e 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -7,7 +7,7 @@ from __future__ import annotations -from typing import ClassVar, Self +from typing import ClassVar, Self, cast from pydantic import Field, model_validator @@ -202,7 +202,7 @@ def make_fixture(self) -> Self: "Store.from_anchor is expected to fail before any step can run" ) try: - Store.from_anchor( + _SPEC.create_store( self.anchor_state, self.anchor_block, validator_id=ValidatorIndex(0), @@ -257,10 +257,13 @@ def make_fixture(self) -> Self: # # The Store is the node's local view of the chain. # It starts from a trusted anchor (usually genesis). - store = Store.from_anchor( - self.anchor_state, - self.anchor_block, - validator_id=ValidatorIndex(0), + store = cast( + Store, + _SPEC.create_store( + self.anchor_state, + self.anchor_block, + validator_id=ValidatorIndex(0), + ), ) # Block registry for fork creation diff --git a/packages/testing/src/consensus_testing/test_types/block_spec.py b/packages/testing/src/consensus_testing/test_types/block_spec.py index 944d102e..3a09964e 100644 --- a/packages/testing/src/consensus_testing/test_types/block_spec.py +++ b/packages/testing/src/consensus_testing/test_types/block_spec.py @@ -454,8 +454,8 @@ def build_signed_block_with_store( ) # Trigger Store aggregation to merge gossip signatures into known payloads. - aggregation_store, _ = store.aggregate() - merged_store = aggregation_store.accept_new_attestations() + aggregation_store, _ = _SPEC.aggregate(store) + merged_store = _SPEC.accept_new_attestations(aggregation_store) # Build the block through the spec's State.build_block(). final_block, _, _, block_proofs = _SPEC.build_block( diff --git a/packages/testing/src/consensus_testing/test_types/store_checks.py b/packages/testing/src/consensus_testing/test_types/store_checks.py index 7f791753..a5a53be0 100644 --- a/packages/testing/src/consensus_testing/test_types/store_checks.py +++ b/packages/testing/src/consensus_testing/test_types/store_checks.py @@ -4,12 +4,15 @@ from lean_spec.forks.lstar.containers import AttestationData from lean_spec.forks.lstar.containers.block.block import Block, BlockLookup +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.forks.lstar.store import Store from lean_spec.subspecs.ssz import hash_tree_root from lean_spec.types import ZERO_HASH, Bytes32, CamelModel, Slot, Uint64, ValidatorIndex from .utils import resolve_block_root +_SPEC = LstarSpec() + def _ancestor_set(blocks: BlockLookup, head: Bytes32) -> set[Bytes32]: """Walk parent links from head and collect every reachable block root.""" @@ -364,7 +367,7 @@ def _resolve(label: str) -> Bytes32: # Attestation target checkpoint (slot + root consistency) if "attestation_target_slot" in fields: - target = store.get_attestation_target() + target = _SPEC.get_attestation_target(store) _check("attestation_target.slot", target.slot, self.attestation_target_slot) block_found = any( @@ -395,7 +398,7 @@ def _resolve(label: str) -> Bytes32: payloads = store.latest_known_aggregated_payloads label = "in latest_known" - extracted = store.extract_attestations_from_aggregated_payloads(payloads) + extracted = _SPEC.extract_attestations_from_aggregated_payloads(store, payloads) if check.validator not in extracted: raise AssertionError( f"Step {step_index}: validator {check.validator} not found " @@ -559,8 +562,8 @@ def _validate_lexicographic_head( root = hash_tree_root(block) slot = block.slot - known_attestations = store.extract_attestations_from_aggregated_payloads( - store.latest_known_aggregated_payloads + known_attestations = _SPEC.extract_attestations_from_aggregated_payloads( + store, store.latest_known_aggregated_payloads ) weight = 0 for attestation in known_attestations.values(): diff --git a/src/lean_spec/forks/lstar/containers/state/state.py b/src/lean_spec/forks/lstar/containers/state/state.py index 624a3443..4b42023d 100644 --- a/src/lean_spec/forks/lstar/containers/state/state.py +++ b/src/lean_spec/forks/lstar/containers/state/state.py @@ -2,11 +2,7 @@ from __future__ import annotations -from collections.abc import Iterable -from collections.abc import Set as AbstractSet - -from lean_spec.forks.lstar.containers.attestation import AggregatedAttestation, AttestationData -from lean_spec.forks.lstar.containers.block import Block, BlockHeader +from lean_spec.forks.lstar.containers.block import BlockHeader from lean_spec.forks.lstar.containers.config import Config from lean_spec.forks.lstar.containers.state.types import ( HistoricalBlockHashes, @@ -15,20 +11,7 @@ JustifiedSlots, ) from lean_spec.forks.lstar.containers.validator import Validators -from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof -from lean_spec.types import Bytes32, Checkpoint, Container, Slot, Uint64, ValidatorIndex - -_LAZY_SPEC: object = None - - -def _spec() -> object: - """Return the lstar fork spec; deferred import breaks the spec ↔ state cycle.""" - global _LAZY_SPEC - if _LAZY_SPEC is None: - from lean_spec.forks.lstar.spec import LstarSpec - - _LAZY_SPEC = LstarSpec() - return _LAZY_SPEC +from lean_spec.types import Checkpoint, Container, Slot class State(Container): @@ -68,51 +51,3 @@ class State(Container): justifications_validators: JustificationValidators """A bitlist of validators who participated in justifications.""" - - @classmethod - def generate_genesis(cls, genesis_time: Uint64, validators: Validators) -> State: - """Generate a genesis state with empty history and proper initial values.""" - return _spec().generate_genesis(genesis_time, validators) # type: ignore[attr-defined] - - def process_slots(self, target_slot: Slot) -> State: - """Advance the state through empty slots up to, but not including, target_slot.""" - return _spec().process_slots(self, target_slot) # type: ignore[attr-defined] - - def process_block_header(self, block: Block) -> State: - """Validate the block header and update header-linked state.""" - return _spec().process_block_header(self, block) # type: ignore[attr-defined] - - def process_block(self, block: Block) -> State: - """Apply full block processing including header and body.""" - return _spec().process_block(self, block) # type: ignore[attr-defined] - - def process_attestations( - self, - attestations: Iterable[AggregatedAttestation], - ) -> State: - """Apply attestations and update justification/finalization.""" - return _spec().process_attestations(self, attestations) # type: ignore[attr-defined] - - def state_transition(self, block: Block, valid_signatures: bool = True) -> State: - """Apply the complete state transition function for a block.""" - return _spec().state_transition( # type: ignore[attr-defined] - self, block, valid_signatures - ) - - def build_block( - self, - slot: Slot, - proposer_index: ValidatorIndex, - parent_root: Bytes32, - known_block_roots: AbstractSet[Bytes32], - aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] | None = None, - ) -> tuple[Block, State, list[AggregatedAttestation], list[AggregatedSignatureProof]]: - """Build a valid block on top of this state.""" - return _spec().build_block( # type: ignore[attr-defined] - self, - slot=slot, - proposer_index=proposer_index, - parent_root=parent_root, - known_block_roots=known_block_roots, - aggregated_payloads=aggregated_payloads, - ) diff --git a/src/lean_spec/forks/lstar/store.py b/src/lean_spec/forks/lstar/store.py index 3b397694..db9ca1de 100644 --- a/src/lean_spec/forks/lstar/store.py +++ b/src/lean_spec/forks/lstar/store.py @@ -10,39 +10,17 @@ from lean_spec.forks.lstar.containers import ( AttestationData, - Block, Config, - SignedAttestation, - SignedBlock, ) -from lean_spec.forks.lstar.containers.attestation.attestation import SignedAggregatedAttestation from lean_spec.forks.lstar.containers.block import BlockLookup from lean_spec.subspecs.chain.clock import Interval from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof from lean_spec.subspecs.xmss.containers import Signature -from lean_spec.subspecs.xmss.interface import TARGET_SIGNATURE_SCHEME, GeneralizedXmssScheme -from lean_spec.types import ( - Bytes32, - Checkpoint, - Slot, - ValidatorIndex, -) +from lean_spec.types import Bytes32, Checkpoint, ValidatorIndex from lean_spec.types.base import StrictBaseModel from .containers.state import State -_LAZY_SPEC: object = None - - -def _spec() -> object: - """Return the lstar fork spec; deferred import breaks the spec ↔ store cycle.""" - global _LAZY_SPEC - if _LAZY_SPEC is None: - from lean_spec.forks.lstar.spec import LstarSpec - - _LAZY_SPEC = LstarSpec() - return _LAZY_SPEC - class AttestationSignatureEntry(NamedTuple): """ @@ -158,112 +136,3 @@ class Store(StrictBaseModel): These payloads are "known" and contribute to fork choice weights. Used for recursive signature aggregation when building blocks. """ - - @classmethod - def from_anchor( - cls, - state: State, - anchor_block: Block, - validator_id: ValidatorIndex | None, - ) -> "Store": - """Initialize a forkchoice store from an anchor state and block.""" - return _spec().create_store(state, anchor_block, validator_id) # type: ignore[attr-defined] - - def prune_stale_attestation_data(self) -> "Store": - """Remove attestation data that can no longer influence fork choice.""" - return _spec().prune_stale_attestation_data(self) # type: ignore[attr-defined] - - def validate_attestation(self, attestation_data: AttestationData) -> None: - """Validate incoming attestation before processing.""" - _spec().validate_attestation(self, attestation_data) # type: ignore[attr-defined] - - def on_gossip_attestation( - self, - signed_attestation: SignedAttestation, - scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, - is_aggregator: bool = False, - ) -> "Store": - """Process a signed attestation received via gossip network.""" - return _spec().on_gossip_attestation( # type: ignore[attr-defined] - self, signed_attestation, scheme, is_aggregator - ) - - def on_gossip_aggregated_attestation( - self, signed_attestation: SignedAggregatedAttestation - ) -> "Store": - """Process a signed aggregated attestation received via gossip.""" - return _spec().on_gossip_aggregated_attestation( # type: ignore[attr-defined] - self, signed_attestation - ) - - def on_block( - self, - signed_block: SignedBlock, - scheme: GeneralizedXmssScheme = TARGET_SIGNATURE_SCHEME, - ) -> "Store": - """Process a new block and update the forkchoice state.""" - return _spec().on_block(self, signed_block, scheme) # type: ignore[attr-defined] - - def extract_attestations_from_aggregated_payloads( - self, aggregated_payloads: dict[AttestationData, set[AggregatedSignatureProof]] - ) -> dict[ValidatorIndex, AttestationData]: - """Extract attestations from aggregated payloads.""" - return _spec().extract_attestations_from_aggregated_payloads( # type: ignore[attr-defined] - self, aggregated_payloads - ) - - def compute_block_weights(self) -> dict[Bytes32, int]: - """Compute attestation-based weight for each block above the finalized slot.""" - return _spec().compute_block_weights(self) # type: ignore[attr-defined] - - def update_head(self) -> "Store": - """Compute updated store with new canonical head.""" - return _spec().update_head(self) # type: ignore[attr-defined] - - def accept_new_attestations(self) -> "Store": - """Process pending aggregated payloads and update forkchoice head.""" - return _spec().accept_new_attestations(self) # type: ignore[attr-defined] - - def update_safe_target(self) -> "Store": - """Compute the deepest block that has 2/3+ supermajority attestation weight.""" - return _spec().update_safe_target(self) # type: ignore[attr-defined] - - def aggregate(self) -> tuple["Store", list[SignedAggregatedAttestation]]: - """Turn raw validator votes into compact aggregated attestations.""" - return _spec().aggregate(self) # type: ignore[attr-defined] - - def tick_interval( - self, has_proposal: bool, is_aggregator: bool = False - ) -> tuple["Store", list[SignedAggregatedAttestation]]: - """Advance store time by one interval and perform interval-specific actions.""" - return _spec().tick_interval(self, has_proposal, is_aggregator) # type: ignore[attr-defined] - - def on_tick( - self, target_interval: Interval, has_proposal: bool, is_aggregator: bool = False - ) -> tuple["Store", list[SignedAggregatedAttestation]]: - """Advance forkchoice store time to given interval count.""" - return _spec().on_tick( # type: ignore[attr-defined] - self, target_interval, has_proposal, is_aggregator - ) - - def get_proposal_head(self, slot: Slot) -> tuple["Store", Bytes32]: - """Get the head for block proposal at given slot.""" - return _spec().get_proposal_head(self, slot) # type: ignore[attr-defined] - - def get_attestation_target(self) -> Checkpoint: - """Calculate target checkpoint for validator attestations.""" - return _spec().get_attestation_target(self) # type: ignore[attr-defined] - - def produce_attestation_data(self, slot: Slot) -> AttestationData: - """Produce attestation data for the given slot.""" - return _spec().produce_attestation_data(self, slot) # type: ignore[attr-defined] - - def produce_block_with_signatures( - self, - slot: Slot, - validator_index: ValidatorIndex, - ) -> tuple["Store", Block, list[AggregatedSignatureProof]]: - """Produce a block and its aggregated signature proofs for the target slot.""" - return _spec().produce_block_with_signatures( # type: ignore[attr-defined] - self, slot, validator_index - ) diff --git a/src/lean_spec/forks/protocol.py b/src/lean_spec/forks/protocol.py index 0db25f32..81722ec1 100644 --- a/src/lean_spec/forks/protocol.py +++ b/src/lean_spec/forks/protocol.py @@ -41,11 +41,6 @@ def config(self) -> "SpecConfigType": """Genesis configuration carried by the state.""" ... - @classmethod - def generate_genesis(cls, genesis_time: Uint64, validators: SSZList[Any]) -> Self: - """Construct the fork's genesis state.""" - ... - class SpecBlockType(SpecSSZType, Protocol): """Structural contract: any fork's Block container class.""" @@ -359,10 +354,11 @@ class ForkProtocol(ABC): config_class: type[SpecConfigType] """Concrete genesis Config container class.""" + @abstractmethod def generate_genesis(self, genesis_time: Uint64, validators: SSZList[Any]) -> SpecStateType: - """Construct a genesis state using this fork's State class.""" - return self.state_class.generate_genesis(genesis_time, validators) + """Construct a genesis state for this fork.""" + @abstractmethod def create_store( self, state: SpecStateType, @@ -370,7 +366,6 @@ def create_store( validator_id: ValidatorIndex | None, ) -> SpecStoreType: """Construct a forkchoice store anchored at the given state and block.""" - return self.store_class.from_anchor(state, anchor_block, validator_id) @abstractmethod def upgrade_state(self, state: SpecStateType) -> SpecStateType: diff --git a/src/lean_spec/subspecs/api/endpoints/fork_choice.py b/src/lean_spec/subspecs/api/endpoints/fork_choice.py index 7c9a5c74..42ad1778 100644 --- a/src/lean_spec/subspecs/api/endpoints/fork_choice.py +++ b/src/lean_spec/subspecs/api/endpoints/fork_choice.py @@ -6,6 +6,11 @@ from aiohttp import web +from lean_spec.forks import LstarSpec + +_SPEC = LstarSpec() +"""Active fork spec — stateless, safe to share across all endpoint invocations.""" + async def handle(request: web.Request) -> web.Response: """ @@ -34,7 +39,7 @@ async def handle(request: web.Request) -> web.Response: raise web.HTTPServiceUnavailable(reason="Store not initialized") finalized_slot = store.latest_finalized.slot - weights = store.compute_block_weights() + weights = _SPEC.compute_block_weights(store) nodes = [] for root, block in store.blocks.items(): diff --git a/src/lean_spec/subspecs/chain/service.py b/src/lean_spec/subspecs/chain/service.py index a03d21a9..f4c8370f 100644 --- a/src/lean_spec/subspecs/chain/service.py +++ b/src/lean_spec/subspecs/chain/service.py @@ -26,7 +26,7 @@ import logging from dataclasses import dataclass, field -from lean_spec.forks import SignedAggregatedAttestation +from lean_spec.forks import LstarSpec, SignedAggregatedAttestation from lean_spec.subspecs.chain.config import INTERVALS_PER_SLOT from lean_spec.subspecs.sync import SyncService from lean_spec.types import Uint64 @@ -35,6 +35,9 @@ logger = logging.getLogger(__name__) +_SPEC = LstarSpec() +"""Active fork spec — stateless, safe to share across all chain invocations.""" + @dataclass(slots=True) class ChainService: @@ -172,7 +175,8 @@ async def _tick_to(self, target_interval: Interval) -> list[SignedAggregatedAtte # Tick remaining intervals one at a time. while store.time < target_interval: - store, new_aggregates = store.tick_interval( + store, new_aggregates = _SPEC.tick_interval( + store, has_proposal=False, is_aggregator=self.sync_service.is_aggregator, ) diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index 07f2894f..fb0579ff 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -43,6 +43,7 @@ from lean_spec.forks import ( Block, BlockLookup, + LstarSpec, SignedAggregatedAttestation, SignedAttestation, SignedBlock, @@ -65,6 +66,9 @@ logger = logging.getLogger(__name__) +_SPEC = LstarSpec() +"""Active fork spec — stateless, safe to share across all sync invocations.""" + @dataclass(slots=True) class _SyncStoreView: @@ -115,7 +119,7 @@ def default_block_processor( itself through the observer, wired at node startup. Everything else here is derived by diffing pre- and post-stores. """ - new_store = store.on_block(block) + new_store = _SPEC.on_block(store, block) metrics.lean_head_slot.set(new_store.blocks[new_store.head].slot) metrics.lean_safe_target_slot.set(new_store.blocks[new_store.safe_target].slot) @@ -574,7 +578,8 @@ async def on_gossip_attestation( # Invalid attestations (bad signature, unknown target) are rejected. # Validation failures are logged but don't crash the event loop. try: - self.store = self.store.on_gossip_attestation( + self.store = _SPEC.on_gossip_attestation( + self.store, signed_attestation=attestation, is_aggregator=is_aggregator_role, ) @@ -631,7 +636,7 @@ async def on_gossip_aggregated_attestation( ) try: - self.store = self.store.on_gossip_aggregated_attestation(signed_attestation) + self.store = _SPEC.on_gossip_aggregated_attestation(self.store, signed_attestation) logger.info( "Aggregated attestation from peer %s slot=%s: validation and signature ok", peer_str, @@ -665,7 +670,8 @@ def _replay_pending_attestations(self) -> None: self._pending_attestations = [] for attestation in pending: try: - self.store = self.store.on_gossip_attestation( + self.store = _SPEC.on_gossip_attestation( + self.store, signed_attestation=attestation, is_aggregator=is_aggregator_role, ) @@ -676,7 +682,7 @@ def _replay_pending_attestations(self) -> None: self._pending_aggregated_attestations = [] for signed_attestation in pending_agg: try: - self.store = self.store.on_gossip_aggregated_attestation(signed_attestation) + self.store = _SPEC.on_gossip_aggregated_attestation(self.store, signed_attestation) except (AssertionError, KeyError): self._pending_aggregated_attestations.append(signed_attestation) diff --git a/src/lean_spec/subspecs/validator/service.py b/src/lean_spec/subspecs/validator/service.py index 2857dc35..cb4ab68e 100644 --- a/src/lean_spec/subspecs/validator/service.py +++ b/src/lean_spec/subspecs/validator/service.py @@ -42,6 +42,7 @@ AttestationSignatures, Block, BlockSignatures, + LstarSpec, SignedAttestation, SignedBlock, ) @@ -57,6 +58,9 @@ logger = logging.getLogger(__name__) +_SPEC = LstarSpec() +"""Active fork spec — stateless, safe to share across all validator invocations.""" + type BlockPublisher = Callable[[SignedBlock], Awaitable[None]] """Callback for publishing signed blocks.""" type AttestationPublisher = Callable[[SignedAttestation], Awaitable[None]] @@ -259,7 +263,8 @@ async def _maybe_produce_block(self, slot: Slot) -> None: # We are the proposer for this slot. try: - new_store, block, signatures = store.produce_block_with_signatures( + new_store, block, signatures = _SPEC.produce_block_with_signatures( + store, slot=slot, validator_index=validator_index, ) @@ -330,7 +335,7 @@ async def _produce_attestations(self, slot: Slot) -> None: break # Ensure we are attesting to the latest known head - self.sync_service.store = self.sync_service.store.update_head() + self.sync_service.store = _SPEC.update_head(self.sync_service.store) store = self.sync_service.store head_state = store.states.get(store.head) @@ -338,7 +343,7 @@ async def _produce_attestations(self, slot: Slot) -> None: return for validator_index in self.registry.indices(): - attestation_data = store.produce_attestation_data(slot) + attestation_data = _SPEC.produce_attestation_data(store, slot) signed_attestation = self._sign_attestation(attestation_data, validator_index) self._attestations_produced += 1 @@ -353,7 +358,8 @@ async def _produce_attestations(self, slot: Slot) -> None: self.sync_service.store.validator_id is not None and self.sync_service.is_aggregator ) try: - self.sync_service.store = self.sync_service.store.on_gossip_attestation( + self.sync_service.store = _SPEC.on_gossip_attestation( + self.sync_service.store, signed_attestation=signed_attestation, is_aggregator=is_aggregator_role, ) diff --git a/tests/consensus/devnet/state_transition/test_block_processing.py b/tests/consensus/devnet/state_transition/test_block_processing.py index 9e600544..4b7d3828 100644 --- a/tests/consensus/devnet/state_transition/test_block_processing.py +++ b/tests/consensus/devnet/state_transition/test_block_processing.py @@ -9,8 +9,11 @@ ) from lean_spec.forks.lstar.containers.state.types import JustifiedSlots +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.types import Boolean, Bytes32, Slot, ValidatorIndex +_SPEC = LstarSpec() + pytestmark = pytest.mark.valid_until("Lstar") @@ -333,7 +336,7 @@ def test_block_with_wrong_slot(state_transition_test: StateTransitionTestFiller) - Essential for slot-based consensus """ pre_state = generate_pre_state() - pre_state = pre_state.process_slots(Slot(1)) + pre_state = _SPEC.process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, diff --git a/tests/consensus/devnet/state_transition/test_finalization.py b/tests/consensus/devnet/state_transition/test_finalization.py index a3376765..1278f070 100644 --- a/tests/consensus/devnet/state_transition/test_finalization.py +++ b/tests/consensus/devnet/state_transition/test_finalization.py @@ -14,9 +14,12 @@ JustificationValidators, JustifiedSlots, ) +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Boolean, Slot, ValidatorIndex +_SPEC = LstarSpec() + pytestmark = pytest.mark.valid_until("Lstar") @@ -218,7 +221,7 @@ def test_no_finalization_when_intermediate_justifiable_slot_exists( 7. There are no pending justifications """ pre = generate_pre_state() - anchor_root = hash_tree_root(pre.process_slots(Slot(1)).latest_block_header) + anchor_root = hash_tree_root(_SPEC.process_slots(pre, Slot(1)).latest_block_header) state_transition_test( pre=pre, diff --git a/tests/consensus/devnet/state_transition/test_genesis.py b/tests/consensus/devnet/state_transition/test_genesis.py index d29a6d5b..409fa66d 100644 --- a/tests/consensus/devnet/state_transition/test_genesis.py +++ b/tests/consensus/devnet/state_transition/test_genesis.py @@ -24,9 +24,12 @@ JustificationValidators, JustifiedSlots, ) +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Slot, Uint64, ValidatorIndex +_SPEC = LstarSpec() + pytestmark = pytest.mark.valid_until("Lstar") @@ -245,7 +248,7 @@ def test_first_post_genesis_block_sets_checkpoint_anchor_roots( 5. justified_slots is empty """ pre = generate_pre_state() - anchor_root = hash_tree_root(pre.process_slots(Slot(1)).latest_block_header) + anchor_root = hash_tree_root(_SPEC.process_slots(pre, Slot(1)).latest_block_header) state_transition_test( pre=pre, diff --git a/tests/consensus/devnet/state_transition/test_slot_monotonicity.py b/tests/consensus/devnet/state_transition/test_slot_monotonicity.py index 18132e31..9d910e30 100644 --- a/tests/consensus/devnet/state_transition/test_slot_monotonicity.py +++ b/tests/consensus/devnet/state_transition/test_slot_monotonicity.py @@ -7,8 +7,11 @@ generate_pre_state, ) +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.types import Slot +_SPEC = LstarSpec() + pytestmark = pytest.mark.valid_until("Devnet") @@ -37,7 +40,7 @@ def test_process_slots_target_equal_to_state_slot_rejected( - Protects against replay of already-processed slots. """ pre_state = generate_pre_state() - pre_state = pre_state.process_slots(Slot(1)) + pre_state = _SPEC.process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, @@ -78,7 +81,7 @@ def test_block_at_parent_slot_rejected_when_slot_processing_skipped( when the chain tip is at or above the claimed slot. """ pre_state = generate_pre_state() - pre_state = pre_state.process_slots(Slot(1)) + pre_state = _SPEC.process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, diff --git a/tests/lean_spec/conftest.py b/tests/lean_spec/conftest.py index 3e61c2eb..24ef77a9 100644 --- a/tests/lean_spec/conftest.py +++ b/tests/lean_spec/conftest.py @@ -8,6 +8,7 @@ from __future__ import annotations from collections.abc import Callable +from typing import cast import pytest from consensus_testing.keys import XmssKeyManager @@ -75,12 +76,15 @@ def genesis_block(genesis_state: State) -> Block: @pytest.fixture -def base_store(genesis_state: State, genesis_block: Block) -> Store: +def base_store(spec: LstarSpec, genesis_state: State, genesis_block: Block) -> Store: """Fork choice store initialized with genesis.""" - return Store.from_anchor( - genesis_state, - genesis_block, - validator_id=ValidatorIndex(0), + return cast( + Store, + spec.create_store( + genesis_state, + genesis_block, + validator_id=ValidatorIndex(0), + ), ) @@ -112,6 +116,11 @@ def keyed_store(keyed_genesis: GenesisData) -> Store: @pytest.fixture -def observer_store(keyed_genesis_state: State, keyed_genesis_block: Block) -> Store: +def observer_store( + spec: LstarSpec, keyed_genesis_state: State, keyed_genesis_block: Block +) -> Store: """Fork choice store with validator_id=None (non-validator observer).""" - return Store.from_anchor(keyed_genesis_state, keyed_genesis_block, validator_id=None) + return cast( + Store, + spec.create_store(keyed_genesis_state, keyed_genesis_block, validator_id=None), + ) diff --git a/tests/lean_spec/helpers/builders.py b/tests/lean_spec/helpers/builders.py index fcda1164..517a25e2 100644 --- a/tests/lean_spec/helpers/builders.py +++ b/tests/lean_spec/helpers/builders.py @@ -142,7 +142,7 @@ def make_genesis_state( """ if validators is None: validators = make_validators(num_validators) - return State.generate_genesis(genesis_time=Uint64(genesis_time), validators=validators) + return _SPEC.generate_genesis(genesis_time=Uint64(genesis_time), validators=validators) def make_empty_block_body() -> BlockBody: @@ -339,7 +339,7 @@ def make_genesis_data( validators = make_validators(num_validators) genesis_state = make_genesis_state(validators=validators, genesis_time=genesis_time) genesis_block = make_genesis_block(genesis_state) - store = Store.from_anchor(genesis_state, genesis_block, validator_id=validator_id) + store = cast(Store, _SPEC.create_store(genesis_state, genesis_block, validator_id=validator_id)) return GenesisData(store, genesis_state, genesis_block) diff --git a/tests/lean_spec/subspecs/containers/test_state_aggregation.py b/tests/lean_spec/subspecs/containers/test_state_aggregation.py index 27472e09..5b727e73 100644 --- a/tests/lean_spec/subspecs/containers/test_state_aggregation.py +++ b/tests/lean_spec/subspecs/containers/test_state_aggregation.py @@ -22,6 +22,7 @@ def test_aggregated_signatures_prefers_full_gossip_payload( container_key_manager: XmssKeyManager, + spec: LstarSpec, ) -> None: store = make_store(num_validators=2, key_manager=container_key_manager) head_state = store.states[store.head] @@ -40,7 +41,7 @@ def test_aggregated_signatures_prefers_full_gossip_payload( } store = store.model_copy(update={"attestation_signatures": attestation_signatures}) - _, results = store.aggregate() + _, results = spec.aggregate(store) assert len(results) == 1 assert set(results[0].proof.participants.to_validator_indices()) == { @@ -133,16 +134,18 @@ def test_build_block_skips_attestations_without_signatures( def test_aggregate_with_empty_attestation_signatures( container_key_manager: XmssKeyManager, + spec: LstarSpec, ) -> None: """Empty attestations list should return empty results.""" store = make_store(num_validators=2, key_manager=container_key_manager) - _, results = store.aggregate() + _, results = spec.aggregate(store) assert results == [] def test_aggregated_signatures_with_multiple_data_groups( container_key_manager: XmssKeyManager, + spec: LstarSpec, ) -> None: """Multiple attestation data groups should be processed independently.""" store = make_store(num_validators=4, key_manager=container_key_manager) @@ -179,7 +182,7 @@ def test_aggregated_signatures_with_multiple_data_groups( } store = store.model_copy(update={"attestation_signatures": attestation_signatures}) - _, results = store.aggregate() + _, results = spec.aggregate(store) assert len(results) == 2 @@ -360,6 +363,7 @@ def test_build_block_skips_unknown_head_root( def test_aggregate_with_no_signatures( container_key_manager: XmssKeyManager, + spec: LstarSpec, ) -> None: """ Test edge case where the store has no attestation signatures or payloads. @@ -367,7 +371,7 @@ def test_aggregate_with_no_signatures( Returns empty results (no attestations can be aggregated without signatures). """ store = make_store(num_validators=2, key_manager=container_key_manager) - _, results = store.aggregate() + _, results = spec.aggregate(store) assert results == [] diff --git a/tests/lean_spec/subspecs/forkchoice/test_attestation_target.py b/tests/lean_spec/subspecs/forkchoice/test_attestation_target.py index 1927c335..8cdee3fb 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_attestation_target.py +++ b/tests/lean_spec/subspecs/forkchoice/test_attestation_target.py @@ -25,9 +25,9 @@ class TestGetAttestationTarget: """Tests for Store.get_attestation_target() method.""" - def test_attestation_target_at_genesis(self, observer_store: Store) -> None: + def test_attestation_target_at_genesis(self, spec: LstarSpec, observer_store: Store) -> None: """Target at genesis should be the genesis block.""" - target = observer_store.get_attestation_target() + target = spec.get_attestation_target(observer_store) genesis_hash = observer_store.head genesis_block = observer_store.blocks[genesis_hash] @@ -35,9 +35,11 @@ def test_attestation_target_at_genesis(self, observer_store: Store) -> None: assert target.root == genesis_hash assert target.slot == genesis_block.slot - def test_attestation_target_returns_checkpoint(self, observer_store: Store) -> None: + def test_attestation_target_returns_checkpoint( + self, spec: LstarSpec, observer_store: Store + ) -> None: """get_attestation_target should return a Checkpoint.""" - target = observer_store.get_attestation_target() + target = spec.get_attestation_target(observer_store) assert isinstance(target, Checkpoint) assert target.root in observer_store.blocks @@ -67,7 +69,7 @@ def test_attestation_target_walks_back_toward_safe_target( assert store.blocks[store.safe_target].slot == Slot(0) # Get attestation target - target = store.get_attestation_target() + target = spec.get_attestation_target(store) # Target should be walked back from head toward safe_target # It cannot exceed JUSTIFICATION_LOOKBACK_SLOTS steps back from head @@ -90,7 +92,7 @@ def test_attestation_target_respects_justifiable_slots( proposer = ValidatorIndex(slot_num % len(store.states[store.head].validators)) store, _, _ = spec.produce_block_with_signatures(store, slot, proposer) - target = store.get_attestation_target() + target = spec.get_attestation_target(store) finalized_slot = store.latest_finalized.slot # The target slot must be justifiable after the finalized slot @@ -108,7 +110,7 @@ def test_attestation_target_consistency_with_head( proposer = ValidatorIndex(slot_num % len(store.states[store.head].validators)) store, _, _ = spec.produce_block_with_signatures(store, slot, proposer) - target = store.get_attestation_target() + target = spec.get_attestation_target(store) # Walk from head back to target and verify the path exists current_root = store.head @@ -163,10 +165,10 @@ def test_safe_target_requires_supermajority( store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) # Aggregate the signatures - store, _ = store.aggregate() + store, _ = spec.aggregate(store) # Update safe target (uses latest_new_aggregated_payloads) - store = store.update_safe_target() + store = spec.update_safe_target(store) # Safe target should still be at genesis (insufficient votes) current_safe_slot = store.blocks[store.safe_target].slot @@ -208,10 +210,10 @@ def test_safe_target_advances_with_supermajority( store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) # Aggregate the signatures - store, _ = store.aggregate() + store, _ = spec.aggregate(store) # Update safe target - store = store.update_safe_target() + store = spec.update_safe_target(store) # Verify the aggregation produced payloads and safe target was updated. # Safe target advancement depends on the full 3SF-mini justification rules, @@ -247,10 +249,10 @@ def test_update_safe_target_uses_new_attestations( store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) # Aggregate into new payloads - store, _ = store.aggregate() + store, _ = spec.aggregate(store) # Update safe target should use new aggregated payloads - store = store.update_safe_target() + store = spec.update_safe_target(store) # Verify update_safe_target processes new aggregated payloads without error assert store.safe_target in store.blocks @@ -301,7 +303,7 @@ def test_justification_with_supermajority_attestations( store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) # Aggregate signatures before producing the next block - store, _ = store.aggregate() + store, _ = spec.aggregate(store) # Produce block 2 which includes these attestations store, block_2, signatures = spec.produce_block_with_signatures(store, slot_2, proposer_2) @@ -349,7 +351,7 @@ def test_justification_requires_valid_source( # This attestation should fail validation because source is unknown with pytest.raises(AssertionError, match="Unknown source block"): - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) def test_justification_tracking_with_multiple_targets( self, @@ -383,8 +385,8 @@ def test_justification_tracking_with_multiple_targets( ) store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) - store, _ = store.aggregate() - store = store.update_safe_target() + store, _ = spec.aggregate(store) + store = spec.update_safe_target(store) # With only half the validators, safe target should not advance past genesis assert store.blocks[store.safe_target].slot == Slot(0) @@ -460,7 +462,7 @@ def test_attestation_target_with_skipped_slots( # Skip slot 2, 3 store, _, _ = spec.produce_block_with_signatures(store, Slot(4), ValidatorIndex(4)) - target = store.get_attestation_target() + target = spec.get_attestation_target(store) # Target should still be valid despite skipped slots assert target.root in store.blocks @@ -468,13 +470,14 @@ def test_attestation_target_with_skipped_slots( def test_attestation_target_single_validator( self, + spec: LstarSpec, key_manager: XmssKeyManager, ) -> None: """Attestation target computation should work with single validator.""" store = make_store(num_validators=1, key_manager=key_manager, validator_id=None) # Should be able to get attestation target - target = store.get_attestation_target() + target = spec.get_attestation_target(store) assert target.root == store.head def test_attestation_target_at_justification_lookback_boundary( @@ -492,7 +495,7 @@ def test_attestation_target_at_justification_lookback_boundary( proposer = ValidatorIndex(slot_num % len(store.states[store.head].validators)) store, _, _ = spec.produce_block_with_signatures(store, slot, proposer) - target = store.get_attestation_target() + target = spec.get_attestation_target(store) head_slot = store.blocks[store.head].slot # Target should not be more than JUSTIFICATION_LOOKBACK_SLOTS behind head @@ -533,10 +536,10 @@ def test_full_attestation_cycle( store = spec.on_gossip_attestation(store, signed_attestation, is_aggregator=True) # Phase 3: Aggregate signatures into payloads - store, _ = store.aggregate() + store, _ = spec.aggregate(store) # Phase 4: Update safe target - store = store.update_safe_target() + store = spec.update_safe_target(store) # Verify the full cycle completed: safe target is a valid block in the store assert store.safe_target in store.blocks @@ -584,7 +587,7 @@ def test_attestation_target_after_on_block( consumer_store = spec.on_block(consumer_store, signed_block) # Get attestation target after on_block - target = consumer_store.get_attestation_target() + target = spec.get_attestation_target(consumer_store) # Target should be valid assert target.root in consumer_store.blocks diff --git a/tests/lean_spec/subspecs/forkchoice/test_compute_block_weights.py b/tests/lean_spec/subspecs/forkchoice/test_compute_block_weights.py index ec659ea0..94dcfa4b 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_compute_block_weights.py +++ b/tests/lean_spec/subspecs/forkchoice/test_compute_block_weights.py @@ -4,6 +4,7 @@ from lean_spec.forks.lstar import Store from lean_spec.forks.lstar.containers.attestation import AttestationData +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof from lean_spec.types import Checkpoint, Slot, ValidatorIndex, ValidatorIndices @@ -19,12 +20,12 @@ def _make_empty_proof(participants: list[ValidatorIndex]) -> AggregatedSignature ) -def test_genesis_only_store_returns_empty_weights(base_store: Store) -> None: +def test_genesis_only_store_returns_empty_weights(spec: LstarSpec, base_store: Store) -> None: """A genesis-only store with no attestations has no block weights.""" - assert base_store.compute_block_weights() == {} + assert spec.compute_block_weights(base_store) == {} -def test_linear_chain_weight_accumulates_upward(base_store: Store) -> None: +def test_linear_chain_weight_accumulates_upward(spec: LstarSpec, base_store: Store) -> None: """Weights walk up from the attested head through all ancestors above finalized slot.""" genesis_root = base_store.head @@ -73,7 +74,7 @@ def test_linear_chain_weight_accumulates_upward(base_store: Store) -> None: } ) - weights = store.compute_block_weights() + weights = spec.compute_block_weights(store) # Validator 0 attests to block2 as head. # Walking up: block2 (slot 2 > 0) gets +1, block1 (slot 1 > 0) gets +1. @@ -81,7 +82,7 @@ def test_linear_chain_weight_accumulates_upward(base_store: Store) -> None: assert weights == {block2_root: 1, block1_root: 1} -def test_multiple_attestations_accumulate(base_store: Store) -> None: +def test_multiple_attestations_accumulate(spec: LstarSpec, base_store: Store) -> None: """Multiple validators attesting to the same head accumulate weight.""" genesis_root = base_store.head @@ -120,7 +121,7 @@ def test_multiple_attestations_accumulate(base_store: Store) -> None: } ) - weights = store.compute_block_weights() + weights = spec.compute_block_weights(store) # Both validators contribute weight to block1 assert weights == {block1_root: 2} diff --git a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py index 57883c49..f2c50561 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py +++ b/tests/lean_spec/subspecs/forkchoice/test_store_attestations.py @@ -65,8 +65,8 @@ def test_on_block_processes_multi_validator_aggregations( updated_store = spec.on_block(consumer_store, signed_block) # Verify attestations can be extracted from aggregated payloads - extracted_attestations = updated_store.extract_attestations_from_aggregated_payloads( - updated_store.latest_known_aggregated_payloads + extracted_attestations = spec.extract_attestations_from_aggregated_payloads( + updated_store, updated_store.latest_known_aggregated_payloads ) assert ValidatorIndex(1) in extracted_attestations assert ValidatorIndex(2) in extracted_attestations @@ -492,7 +492,7 @@ class TestAggregateCommitteeSignatures: """ def test_aggregates_attestation_signatures_into_proof( - self, key_manager: XmssKeyManager + self, key_manager: XmssKeyManager, spec: LstarSpec ) -> None: """ Aggregation creates proofs from collected gossip signatures. @@ -512,7 +512,7 @@ def test_aggregates_attestation_signatures_into_proof( ) # Perform aggregation - updated_store, _ = store.aggregate() + updated_store, _ = spec.aggregate(store) # Verify proofs were created and stored keyed by attestation data assert attestation_data in updated_store.latest_new_aggregated_payloads, ( @@ -521,7 +521,7 @@ def test_aggregates_attestation_signatures_into_proof( proofs = updated_store.latest_new_aggregated_payloads[attestation_data] assert len(proofs) >= 1, "At least one proof should exist" - def test_aggregated_proof_is_valid(self, key_manager: XmssKeyManager) -> None: + def test_aggregated_proof_is_valid(self, key_manager: XmssKeyManager, spec: LstarSpec) -> None: """ Created aggregated proof passes verification. @@ -537,7 +537,7 @@ def test_aggregated_proof_is_valid(self, key_manager: XmssKeyManager) -> None: attesting_validators=attesting_validators, ) - updated_store, _ = store.aggregate() + updated_store, _ = spec.aggregate(store) proofs = updated_store.latest_new_aggregated_payloads[attestation_data] proof = next(iter(proofs)) @@ -554,7 +554,7 @@ def test_aggregated_proof_is_valid(self, key_manager: XmssKeyManager) -> None: ) def test_empty_attestation_signatures_produces_no_proofs( - self, key_manager: XmssKeyManager + self, key_manager: XmssKeyManager, spec: LstarSpec ) -> None: """ No proofs created when attestation_signatures is empty. @@ -568,7 +568,7 @@ def test_empty_attestation_signatures_produces_no_proofs( attesting_validators=[], # No attesters ) - updated_store, _ = store.aggregate() + updated_store, _ = spec.aggregate(store) # Verify no proofs were created assert len(updated_store.latest_new_aggregated_payloads) == 0 @@ -609,7 +609,7 @@ def test_multiple_attestation_data_grouped_separately( } ) - updated_store, _ = store.aggregate() + updated_store, _ = spec.aggregate(store) # Verify both attestation data have separate proofs assert att_data_1 in updated_store.latest_new_aggregated_payloads @@ -625,7 +625,7 @@ class TestTickIntervalAggregation: """ def test_interval_2_triggers_aggregation_for_aggregator( - self, key_manager: XmssKeyManager + self, key_manager: XmssKeyManager, spec: LstarSpec ) -> None: """ Aggregation is triggered at interval 2 when is_aggregator=True. @@ -648,7 +648,7 @@ def test_interval_2_triggers_aggregation_for_aggregator( store = store.model_copy(update={"time": Uint64(1)}) # Tick to interval 2 as aggregator - updated_store, _ = store.tick_interval(has_proposal=False, is_aggregator=True) + updated_store, _ = spec.tick_interval(store, has_proposal=False, is_aggregator=True) # Verify aggregation was performed assert attestation_data in updated_store.latest_new_aggregated_payloads, ( @@ -656,7 +656,7 @@ def test_interval_2_triggers_aggregation_for_aggregator( ) def test_interval_2_skips_aggregation_for_non_aggregator( - self, key_manager: XmssKeyManager + self, key_manager: XmssKeyManager, spec: LstarSpec ) -> None: """ Aggregation is NOT triggered at interval 2 when is_aggregator=False. @@ -676,14 +676,16 @@ def test_interval_2_skips_aggregation_for_non_aggregator( store = store.model_copy(update={"time": Uint64(1)}) # Tick to interval 2 as NON-aggregator - updated_store, _ = store.tick_interval(has_proposal=False, is_aggregator=False) + updated_store, _ = spec.tick_interval(store, has_proposal=False, is_aggregator=False) # Verify aggregation was NOT performed assert attestation_data not in updated_store.latest_new_aggregated_payloads, ( "Aggregation should NOT occur for non-aggregators" ) - def test_other_intervals_do_not_trigger_aggregation(self, key_manager: XmssKeyManager) -> None: + def test_other_intervals_do_not_trigger_aggregation( + self, key_manager: XmssKeyManager, spec: LstarSpec + ) -> None: """ Aggregation is NOT triggered at intervals other than 2. @@ -709,14 +711,16 @@ def test_other_intervals_do_not_trigger_aggregation(self, key_manager: XmssKeyMa pre_tick_time = (target_interval - 1) % int(INTERVALS_PER_SLOT) test_store = store.model_copy(update={"time": Uint64(pre_tick_time)}) - updated_store, _ = test_store.tick_interval(has_proposal=False, is_aggregator=True) + updated_store, _ = spec.tick_interval( + test_store, has_proposal=False, is_aggregator=True + ) assert attestation_data not in updated_store.latest_new_aggregated_payloads, ( f"Aggregation should NOT occur at interval {target_interval}" ) def test_interval_0_accepts_attestations_with_proposal( - self, key_manager: XmssKeyManager + self, key_manager: XmssKeyManager, spec: LstarSpec ) -> None: """ Interval 0 accepts new attestations when has_proposal=True. @@ -732,7 +736,7 @@ def test_interval_0_accepts_attestations_with_proposal( store = store.model_copy(update={"time": Uint64(4)}) # Tick to interval 0 with proposal - updated_store, _ = store.tick_interval(has_proposal=True, is_aggregator=True) + updated_store, _ = spec.tick_interval(store, has_proposal=True, is_aggregator=True) # Verify time advanced assert updated_store.time == Uint64(5) @@ -796,7 +800,7 @@ def test_gossip_to_aggregation_to_storage( # Step 2: Advance to interval 2 (aggregation interval) store = store.model_copy(update={"time": Uint64(1)}) - store, _ = store.tick_interval(has_proposal=False, is_aggregator=True) + store, _ = spec.tick_interval(store, has_proposal=False, is_aggregator=True) # Step 3: Verify aggregated proofs were created assert attestation_data in store.latest_new_aggregated_payloads, ( diff --git a/tests/lean_spec/subspecs/forkchoice/test_store_pruning.py b/tests/lean_spec/subspecs/forkchoice/test_store_pruning.py index abf31486..c5ed7cb9 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_store_pruning.py +++ b/tests/lean_spec/subspecs/forkchoice/test_store_pruning.py @@ -1,6 +1,7 @@ """Tests for Store attestation data pruning.""" from lean_spec.forks.lstar import AttestationSignatureEntry, Store +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.xmss.aggregation import AggregatedSignatureProof from lean_spec.types import ByteListMiB, Bytes32, Slot, ValidatorIndex, ValidatorIndices from tests.lean_spec.helpers import ( @@ -11,7 +12,7 @@ ) -def test_prunes_entries_with_target_at_finalized(pruning_store: Store) -> None: +def test_prunes_entries_with_target_at_finalized(spec: LstarSpec, pruning_store: Store) -> None: """Verify entries with target.slot == finalized slot are pruned.""" store = pruning_store @@ -40,12 +41,12 @@ def test_prunes_entries_with_target_at_finalized(pruning_store: Store) -> None: assert attestation_data in store.attestation_signatures # Prune should remove entries where target.slot <= finalized.slot - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) assert attestation_data not in pruned_store.attestation_signatures -def test_prunes_entries_with_target_before_finalized(pruning_store: Store) -> None: +def test_prunes_entries_with_target_before_finalized(spec: LstarSpec, pruning_store: Store) -> None: """Verify entries with target.slot < finalized slot are pruned.""" store = pruning_store @@ -74,12 +75,12 @@ def test_prunes_entries_with_target_before_finalized(pruning_store: Store) -> No assert attestation_data in store.attestation_signatures # Prune should remove entries where target.slot <= finalized.slot - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) assert attestation_data not in pruned_store.attestation_signatures -def test_keeps_entries_with_target_after_finalized(pruning_store: Store) -> None: +def test_keeps_entries_with_target_after_finalized(spec: LstarSpec, pruning_store: Store) -> None: """Verify entries with target.slot > finalized slot are kept.""" store = pruning_store @@ -108,12 +109,12 @@ def test_keeps_entries_with_target_after_finalized(pruning_store: Store) -> None assert attestation_data in store.attestation_signatures # Prune should keep entries where target.slot > finalized.slot - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) assert attestation_data in pruned_store.attestation_signatures -def test_prunes_related_structures_together(pruning_store: Store) -> None: +def test_prunes_related_structures_together(spec: LstarSpec, pruning_store: Store) -> None: """Verify all three data structures are pruned atomically.""" store = pruning_store @@ -172,7 +173,7 @@ def test_prunes_related_structures_together(pruning_store: Store) -> None: assert fresh_attestation in store.latest_new_aggregated_payloads assert fresh_attestation in store.latest_known_aggregated_payloads - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) # Stale entries should be removed from all structures assert stale_attestation not in pruned_store.attestation_signatures @@ -185,7 +186,7 @@ def test_prunes_related_structures_together(pruning_store: Store) -> None: assert fresh_attestation in pruned_store.latest_known_aggregated_payloads -def test_handles_empty_attestation_signatures(pruning_store: Store) -> None: +def test_handles_empty_attestation_signatures(spec: LstarSpec, pruning_store: Store) -> None: """Verify pruning works correctly when attestation_signatures is empty.""" store = pruning_store @@ -193,12 +194,14 @@ def test_handles_empty_attestation_signatures(pruning_store: Store) -> None: assert len(store.attestation_signatures) == 0 # Pruning should not fail - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) assert len(pruned_store.attestation_signatures) == 0 -def test_prunes_multiple_validators_same_attestation_data(pruning_store: Store) -> None: +def test_prunes_multiple_validators_same_attestation_data( + spec: LstarSpec, pruning_store: Store +) -> None: """Verify pruning removes entries for multiple validators with same attestation data.""" store = pruning_store @@ -228,13 +231,13 @@ def test_prunes_multiple_validators_same_attestation_data(pruning_store: Store) assert stale_attestation in store.attestation_signatures assert len(store.attestation_signatures[stale_attestation]) == 2 - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) # All validators' signatures should be removed (whole entry pruned) assert stale_attestation not in pruned_store.attestation_signatures -def test_mixed_stale_and_fresh_entries(pruning_store: Store) -> None: +def test_mixed_stale_and_fresh_entries(spec: LstarSpec, pruning_store: Store) -> None: """Verify correct pruning behavior with a mix of stale and fresh entries.""" store = pruning_store @@ -267,7 +270,7 @@ def test_mixed_stale_and_fresh_entries(pruning_store: Store) -> None: for att in attestations: assert att in store.attestation_signatures - pruned_store = store.prune_stale_attestation_data() + pruned_store = spec.prune_stale_attestation_data(store) # Entries with target.slot <= 5 should be pruned (slots 1-5) for att in attestations[:5]: diff --git a/tests/lean_spec/subspecs/forkchoice/test_time_management.py b/tests/lean_spec/subspecs/forkchoice/test_time_management.py index 565247f1..5607f497 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_time_management.py +++ b/tests/lean_spec/subspecs/forkchoice/test_time_management.py @@ -1,9 +1,11 @@ """Tests for time advancement, intervals, and slot management.""" +from typing import cast + from hypothesis import given, settings from hypothesis import strategies as st -from lean_spec.forks.lstar import State, Store +from lean_spec.forks.lstar import Store from lean_spec.forks.lstar.containers import Block from lean_spec.forks.lstar.containers.state import Validators from lean_spec.forks.lstar.spec import LstarSpec @@ -25,10 +27,11 @@ class TestGetForkchoiceStore: @settings(max_examples=100) @given(anchor_slot=st.integers(min_value=0, max_value=10000)) def test_store_time_from_anchor_slot(self, anchor_slot: int) -> None: - """Store.from_anchor sets time = anchor_slot * INTERVALS_PER_SLOT.""" + """spec.create_store sets time = anchor_slot * INTERVALS_PER_SLOT.""" # Must create its own state and block instead of using sample_store() - # because sample_store() bypasses from_anchor() with hardcoded time. - state = State.generate_genesis( + # because sample_store() bypasses create_store() with hardcoded time. + spec = LstarSpec() + state = spec.generate_genesis( genesis_time=Uint64(1000), validators=Validators(data=[]), ) @@ -42,10 +45,13 @@ def test_store_time_from_anchor_slot(self, anchor_slot: int) -> None: body=make_empty_block_body(), ) - store = Store.from_anchor( - state, - anchor_block, - validator_id=TEST_VALIDATOR_ID, + store = cast( + Store, + spec.create_store( + state, + anchor_block, + validator_id=TEST_VALIDATOR_ID, + ), ) assert store.time == Interval(int(INTERVALS_PER_SLOT) * anchor_slot) @@ -99,37 +105,37 @@ def test_on_tick_small_increment(self, sample_store: Store, spec: LstarSpec) -> class TestIntervalTicking: """Test interval-based time ticking.""" - def test_tick_interval_basic(self, sample_store: Store) -> None: + def test_tick_interval_basic(self, sample_store: Store, spec: LstarSpec) -> None: """Test basic interval ticking.""" initial_time = sample_store.time # Tick one interval forward - sample_store, _ = sample_store.tick_interval(has_proposal=False) + sample_store, _ = spec.tick_interval(sample_store, has_proposal=False) # Time should advance by one interval assert sample_store.time == initial_time + Uint64(1) - def test_tick_interval_with_proposal(self, sample_store: Store) -> None: + def test_tick_interval_with_proposal(self, sample_store: Store, spec: LstarSpec) -> None: """Test interval ticking with proposal.""" initial_time = sample_store.time - sample_store, _ = sample_store.tick_interval(has_proposal=True) + sample_store, _ = spec.tick_interval(sample_store, has_proposal=True) # Time should advance assert sample_store.time == initial_time + Uint64(1) - def test_tick_interval_sequence(self, sample_store: Store) -> None: + def test_tick_interval_sequence(self, sample_store: Store, spec: LstarSpec) -> None: """Test sequence of interval ticks.""" initial_time = sample_store.time # Tick multiple intervals for i in range(5): - sample_store, _ = sample_store.tick_interval(has_proposal=(i % 2 == 0)) + sample_store, _ = spec.tick_interval(sample_store, has_proposal=(i % 2 == 0)) # Should have advanced by 5 intervals assert sample_store.time == initial_time + Uint64(5) - def test_tick_interval_actions_by_phase(self, sample_store: Store) -> None: + def test_tick_interval_actions_by_phase(self, sample_store: Store, spec: LstarSpec) -> None: """Test different actions performed based on interval phase.""" # Reset store to known state initial_time = Uint64(0) @@ -138,7 +144,7 @@ def test_tick_interval_actions_by_phase(self, sample_store: Store) -> None: # Tick through a complete slot cycle for interval in range(INTERVALS_PER_SLOT): has_proposal = interval == 0 # Proposal only in first interval - sample_store, _ = sample_store.tick_interval(has_proposal=has_proposal) + sample_store, _ = spec.tick_interval(sample_store, has_proposal=has_proposal) current_interval = sample_store.time % INTERVALS_PER_SLOT expected_interval = Uint64((interval + 1)) % INTERVALS_PER_SLOT @@ -148,34 +154,34 @@ def test_tick_interval_actions_by_phase(self, sample_store: Store) -> None: class TestAttestationProcessingTiming: """Test timing of attestation processing.""" - def test_accept_new_attestations_basic(self, sample_store: Store) -> None: + def test_accept_new_attestations_basic(self, sample_store: Store, spec: LstarSpec) -> None: """Test basic new attestation processing moves aggregated payloads.""" # The method now processes aggregated payloads, not attestations directly # Just verify the method runs without error initial_known_payloads = len(sample_store.latest_known_aggregated_payloads) # Accept new attestations (which processes aggregated payloads) - sample_store = sample_store.accept_new_attestations() + sample_store = spec.accept_new_attestations(sample_store) # New payloads should move to known payloads assert len(sample_store.latest_new_aggregated_payloads) == 0 assert len(sample_store.latest_known_aggregated_payloads) >= initial_known_payloads - def test_accept_new_attestations_multiple(self, sample_store: Store) -> None: + def test_accept_new_attestations_multiple(self, sample_store: Store, spec: LstarSpec) -> None: """Test accepting multiple new aggregated payloads.""" # Aggregated payloads are now the source of attestations # The test is simplified to just test the migration logic - sample_store = sample_store.accept_new_attestations() + sample_store = spec.accept_new_attestations(sample_store) # All new payloads should move to known payloads assert len(sample_store.latest_new_aggregated_payloads) == 0 - def test_accept_new_attestations_empty(self, sample_store: Store) -> None: + def test_accept_new_attestations_empty(self, sample_store: Store, spec: LstarSpec) -> None: """Test accepting new attestations when there are none.""" initial_known_payloads = len(sample_store.latest_known_aggregated_payloads) # Accept attestations when there are no new payloads - sample_store = sample_store.accept_new_attestations() + sample_store = spec.accept_new_attestations(sample_store) # Should be no-op assert len(sample_store.latest_new_aggregated_payloads) == 0 diff --git a/tests/lean_spec/subspecs/forkchoice/test_validate_attestation.py b/tests/lean_spec/subspecs/forkchoice/test_validate_attestation.py index 24aed541..c78d09ec 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_validate_attestation.py +++ b/tests/lean_spec/subspecs/forkchoice/test_validate_attestation.py @@ -66,7 +66,7 @@ def test_head_checkpoint_slot_mismatch_rejected( ) with pytest.raises(AssertionError, match="Head checkpoint slot mismatch"): - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) def test_head_slot_less_than_source_rejected( self, @@ -103,7 +103,7 @@ def test_head_slot_less_than_source_rejected( ) with pytest.raises(AssertionError, match="Head checkpoint must not be older than target"): - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) def test_head_slot_less_than_target_rejected( self, @@ -140,7 +140,7 @@ def test_head_slot_less_than_target_rejected( ) with pytest.raises(AssertionError, match="Head checkpoint must not be older than target"): - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) def test_valid_attestation_with_correct_head_passes( self, @@ -176,10 +176,11 @@ def test_valid_attestation_with_correct_head_passes( ), ) - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) def test_head_equal_to_source_and_target_passes( self, + spec: LstarSpec, observer_store: Store, ) -> None: """All three checkpoints pointing to genesis (slot 0) is valid.""" @@ -202,7 +203,7 @@ def test_head_equal_to_source_and_target_passes( ), ) - store.validate_attestation(attestation.data) + spec.validate_attestation(store, attestation.data) class TestValidateAttestationTimeCheck: @@ -240,7 +241,7 @@ def test_attestation_at_current_slot_passes( # Sweep every interval in the attestation's slot. for offset in range(int(INTERVALS_PER_SLOT)): local = store.model_copy(update={"time": ATTESTATION_START_INTERVAL + Interval(offset)}) - local.validate_attestation(data) + spec.validate_attestation(local, data) def test_attestation_in_past_passes(self, spec: LstarSpec, observer_store: Store) -> None: """A vote from a past slot is always accepted.""" @@ -249,7 +250,7 @@ def test_attestation_in_past_passes(self, spec: LstarSpec, observer_store: Store # Place the local clock several slots ahead. far_future = ATTESTATION_START_INTERVAL + INTERVALS_PER_SLOT * Interval(10) store = store.model_copy(update={"time": far_future}) - store.validate_attestation(data) + spec.validate_attestation(store, data) def test_attestation_at_disparity_boundary_passes( self, spec: LstarSpec, observer_store: Store @@ -258,7 +259,7 @@ def test_attestation_at_disparity_boundary_passes( store, data = self._build_two_block_chain(spec, observer_store) store = store.model_copy(update={"time": DISPARITY_BOUNDARY_INTERVAL}) - store.validate_attestation(data) + spec.validate_attestation(store, data) def test_attestation_just_beyond_disparity_boundary_rejected( self, spec: LstarSpec, observer_store: Store @@ -269,7 +270,7 @@ def test_attestation_just_beyond_disparity_boundary_rejected( store = store.model_copy(update={"time": JUST_BEYOND_DISPARITY_BOUNDARY_INTERVAL}) with pytest.raises(AssertionError, match="Attestation too far in future"): - store.validate_attestation(data) + spec.validate_attestation(store, data) def test_attestation_one_full_slot_in_future_rejected( self, spec: LstarSpec, observer_store: Store @@ -286,4 +287,4 @@ def test_attestation_one_full_slot_in_future_rejected( store = store.model_copy(update={"time": ONE_FULL_SLOT_BEHIND_INTERVAL}) with pytest.raises(AssertionError, match="Attestation too far in future"): - store.validate_attestation(data) + spec.validate_attestation(store, data) diff --git a/tests/lean_spec/subspecs/forkchoice/test_validator.py b/tests/lean_spec/subspecs/forkchoice/test_validator.py index 9bf09983..af62dfbf 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_validator.py +++ b/tests/lean_spec/subspecs/forkchoice/test_validator.py @@ -64,7 +64,7 @@ def test_produce_block_with_attestations( data_5 = AttestationData( slot=head_block.slot, head=head_checkpoint, - target=sample_store.get_attestation_target(), + target=spec.get_attestation_target(sample_store), source=sample_store.latest_justified, ) signed_5 = SignedAttestation( @@ -75,7 +75,7 @@ def test_produce_block_with_attestations( data_6 = AttestationData( slot=head_block.slot, head=head_checkpoint, - target=sample_store.get_attestation_target(), + target=spec.get_attestation_target(sample_store), source=sample_store.latest_justified, ) signed_6 = SignedAttestation( @@ -214,7 +214,7 @@ def test_produce_block_state_consistency( data_7 = AttestationData( slot=head_block.slot, head=head_checkpoint, - target=sample_store.get_attestation_target(), + target=spec.get_attestation_target(sample_store), source=sample_store.latest_justified, ) signed_7 = SignedAttestation( @@ -269,7 +269,7 @@ def test_block_production_then_attestation(self, sample_store: Store, spec: Lsta spec.produce_block_with_signatures(sample_store, proposer_slot, proposer_idx) # Update store state after block production - sample_store = sample_store.update_head() + sample_store = spec.update_head(sample_store) # Other validator creates attestation for slot 2 attestor_slot = Slot(2) diff --git a/tests/lean_spec/subspecs/genesis/test_state.py b/tests/lean_spec/subspecs/genesis/test_state.py index 8b47695c..2deeb107 100644 --- a/tests/lean_spec/subspecs/genesis/test_state.py +++ b/tests/lean_spec/subspecs/genesis/test_state.py @@ -2,11 +2,14 @@ from lean_spec.forks.lstar.containers.block import Block, BlockBody from lean_spec.forks.lstar.containers.block.types import AggregatedAttestations -from lean_spec.forks.lstar.containers.state import State, Validators +from lean_spec.forks.lstar.containers.state import Validators from lean_spec.forks.lstar.containers.validator import Validator +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Bytes52, Slot, Uint64, ValidatorIndex +_SPEC = LstarSpec() + def test_genesis_block_hash_comparison() -> None: """Test that genesis block hashes are deterministic and differ with different inputs.""" @@ -20,7 +23,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state1 = State.generate_genesis( + genesis_state1 = _SPEC.generate_genesis( genesis_time=Uint64(1000), validators=validators1, ) @@ -38,7 +41,7 @@ def test_genesis_block_hash_comparison() -> None: genesis_block_hash1 = hash_tree_root(genesis_block1) # Create a second genesis state with same config but regenerated (should produce same hash) - genesis_state1_copy = State.generate_genesis( + genesis_state1_copy = _SPEC.generate_genesis( genesis_time=Uint64(1000), validators=validators1, ) @@ -66,7 +69,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state2 = State.generate_genesis( + genesis_state2 = _SPEC.generate_genesis( genesis_time=Uint64(1000), # Same genesis_time but different validators validators=validators2, ) @@ -94,7 +97,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state3 = State.generate_genesis( + genesis_state3 = _SPEC.generate_genesis( genesis_time=Uint64(2000), # Different genesis_time but same validators validators=validators3, ) From f9a6b9c6c8980804c28e378dafc0bc8969c95099 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 17:31:59 +0200 Subject: [PATCH 4/9] fix(forks): resolve Block forward reference in BlockLookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Store.blocks field is annotated as `BlockLookup`, which was defined as `dict[Bytes32, "Block"]` — a string forward reference. After the container methods moved off Store, Pydantic's model rebuild could no longer resolve `Block` because it was never imported into store.py alongside the alias. Drop the forward-reference quoting: `BlockLookup` lives in the same module as `Block`, so the type can refer to the class directly. Pydantic then resolves `Store.blocks` correctly through the alias re-export. Without this, every consensus filler that constructed a Store via `spec.create_store(...)` raised `PydanticUserError: 'Store' is not fully defined`. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/lean_spec/forks/lstar/containers/block/block.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lean_spec/forks/lstar/containers/block/block.py b/src/lean_spec/forks/lstar/containers/block/block.py index 8880f9c0..ada831f2 100644 --- a/src/lean_spec/forks/lstar/containers/block/block.py +++ b/src/lean_spec/forks/lstar/containers/block/block.py @@ -66,7 +66,7 @@ class Block(Container): """The block's payload.""" -BlockLookup = dict[Bytes32, "Block"] +BlockLookup = dict[Bytes32, Block] """Mapping from block root to Block objects.""" From cbd4038bebce8bb07c2bc933b53eb58c477265ce Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 17:44:31 +0200 Subject: [PATCH 5/9] fix(forks): route mock-store tests through autouse spec patches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The forwarders that used to live on `Store` (and were patched in `tests/lean_spec/subspecs/{sync,chain,networking,validator}/`) are gone after Stage 4D. The mocks (`MockStore`, `MockForkchoiceStore`) still implement the same method surface, but the service code now calls the real spec, which expects a Pydantic Store. Add an autouse fixture per affected subspec that patches the active spec's methods to delegate back to `store.method(...)`. The mocks intercept calls in-place, preserving every test's recording semantics without touching service code. The validator service tests that previously patched `Store.produce_block_with_signatures` and `Store.on_gossip_attestation` now patch the matching methods on the validator service's `_SPEC` — same intent, current attribute path. Co-Authored-By: Claude Opus 4.7 (1M context) --- tests/lean_spec/subspecs/chain/conftest.py | 24 +++++++++++ .../lean_spec/subspecs/networking/conftest.py | 42 +++++++++++++++++++ tests/lean_spec/subspecs/sync/conftest.py | 36 ++++++++++++++++ .../subspecs/validator/test_service.py | 10 +++-- 4 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 tests/lean_spec/subspecs/chain/conftest.py create mode 100644 tests/lean_spec/subspecs/networking/conftest.py diff --git a/tests/lean_spec/subspecs/chain/conftest.py b/tests/lean_spec/subspecs/chain/conftest.py new file mode 100644 index 00000000..190bfd29 --- /dev/null +++ b/tests/lean_spec/subspecs/chain/conftest.py @@ -0,0 +1,24 @@ +"""Shared fixtures for chain service tests.""" + +from __future__ import annotations + +from typing import Any + +import pytest + +from lean_spec.subspecs.chain import service as chain_service_module + + +@pytest.fixture(autouse=True) +def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: + """Route chain-service spec calls back to the mock store's matching method. + + Chain tests run against `MockStore`, which records calls on its own + `tick_interval`. The real spec implementation expects a fully-formed + Pydantic Store. Routing the spec call back lets the mock intercept in-place. + """ + + def tick_interval(store: Any, has_proposal: bool, is_aggregator: bool = False) -> Any: + return store.tick_interval(has_proposal, is_aggregator) + + monkeypatch.setattr(chain_service_module._SPEC, "tick_interval", tick_interval) diff --git a/tests/lean_spec/subspecs/networking/conftest.py b/tests/lean_spec/subspecs/networking/conftest.py new file mode 100644 index 00000000..3650df86 --- /dev/null +++ b/tests/lean_spec/subspecs/networking/conftest.py @@ -0,0 +1,42 @@ +"""Shared fixtures for networking subspec tests.""" + +from __future__ import annotations + +from typing import Any + +import pytest + +from lean_spec.subspecs.sync import service as sync_service_module + + +@pytest.fixture(autouse=True) +def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: + """Route sync-service spec calls back to the mock store's matching method. + + Networking tests drive `SyncService` against `MockForkchoiceStore` for + isolation. The real spec implementation expects a fully-formed Pydantic + Store; routing each spec call back to `store.method(...)` lets the mock + intercept in-place without changing service code. + """ + spec = sync_service_module._SPEC + + def on_block(store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: + return store.on_block(signed_block, *args, **kwargs) + + def on_gossip_attestation( + store: Any, + signed_attestation: Any, + *args: Any, + **kwargs: Any, + ) -> Any: + kwargs.pop("scheme", None) + return store.on_gossip_attestation(signed_attestation, *args, **kwargs) + + def on_gossip_aggregated_attestation( + store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + ) -> Any: + return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) + + monkeypatch.setattr(spec, "on_block", on_block) + monkeypatch.setattr(spec, "on_gossip_attestation", on_gossip_attestation) + monkeypatch.setattr(spec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation) diff --git a/tests/lean_spec/subspecs/sync/conftest.py b/tests/lean_spec/subspecs/sync/conftest.py index 439694ee..32e62ff7 100644 --- a/tests/lean_spec/subspecs/sync/conftest.py +++ b/tests/lean_spec/subspecs/sync/conftest.py @@ -6,12 +6,48 @@ from __future__ import annotations +from typing import Any + import pytest from lean_spec.subspecs.networking.reqresp.message import Status +from lean_spec.subspecs.sync import service as sync_service_module from lean_spec.types import Bytes32, Checkpoint, Slot +@pytest.fixture(autouse=True) +def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: + """Route sync-service spec calls back to the mock store's matching method. + + Sync tests run against `MockForkchoiceStore`, which records calls on its own + methods. The real spec implementation expects a fully-formed Pydantic Store. + Routing each spec call back to `store.method(...)` lets the mock intercept + in-place without a sync-service code change. + """ + spec = sync_service_module._SPEC + + def on_block(store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: + return store.on_block(signed_block, *args, **kwargs) + + def on_gossip_attestation( + store: Any, + signed_attestation: Any, + *args: Any, + **kwargs: Any, + ) -> Any: + kwargs.pop("scheme", None) + return store.on_gossip_attestation(signed_attestation, *args, **kwargs) + + def on_gossip_aggregated_attestation( + store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + ) -> Any: + return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) + + monkeypatch.setattr(spec, "on_block", on_block) + monkeypatch.setattr(spec, "on_gossip_attestation", on_gossip_attestation) + monkeypatch.setattr(spec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation) + + @pytest.fixture def sample_checkpoint() -> Checkpoint: """Sample checkpoint for sync tests.""" diff --git a/tests/lean_spec/subspecs/validator/test_service.py b/tests/lean_spec/subspecs/validator/test_service.py index 2ad071ef..e5403576 100644 --- a/tests/lean_spec/subspecs/validator/test_service.py +++ b/tests/lean_spec/subspecs/validator/test_service.py @@ -496,8 +496,9 @@ async def test_assertion_error_is_logged_and_skipped( on_block=lambda b: blocks.append(b), # type: ignore[arg-type, return-value] ) - with patch.object( - Store, "produce_block_with_signatures", side_effect=AssertionError("mismatch") + with patch( + "lean_spec.subspecs.validator.service._SPEC.produce_block_with_signatures", + side_effect=AssertionError("mismatch"), ): # Slot 0: proposer is validator 0 (0 % 8 = 0), which is in the registry. await service._maybe_produce_block(Slot(0)) @@ -677,7 +678,10 @@ async def capture_att(att: SignedAttestation) -> None: with ( caplog.at_level("DEBUG"), - patch.object(Store, "on_gossip_attestation", side_effect=RuntimeError("store error")), + patch( + "lean_spec.subspecs.validator.service._SPEC.on_gossip_attestation", + side_effect=RuntimeError("store error"), + ), ): await service._produce_attestations(target_slot) From 4f0c2eedccf37506b06872932f436967027737c4 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 17:59:27 +0200 Subject: [PATCH 6/9] refactor(forks): drop cast(Store, ...) at call sites Casts were a workaround for Liskov violations on LstarSpec.create_store when it returned the SpecStoreType protocol while concretely producing Store. cast had no runtime effect and pushed type-checker noise into fixtures and tests. Move the imprecision into the fork itself: create_store now declares its concrete Store return and suppresses the override warning at the single definition site. Callers receive Store directly and need no cast. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../test_fixtures/api_endpoint.py | 6 ++-- .../test_fixtures/fork_choice.py | 14 +++----- src/lean_spec/forks/lstar/spec.py | 33 +++++++++---------- tests/lean_spec/conftest.py | 17 +++------- tests/lean_spec/helpers/builders.py | 2 +- .../forkchoice/test_time_management.py | 13 +++----- 6 files changed, 34 insertions(+), 51 deletions(-) diff --git a/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py b/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py index ad69bd51..34a7e36c 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py +++ b/packages/testing/src/consensus_testing/test_fixtures/api_endpoint.py @@ -1,7 +1,7 @@ """API endpoint response conformance fixtures.""" from collections.abc import Callable -from typing import Any, ClassVar, cast +from typing import Any, ClassVar from lean_spec.forks.lstar import Store from lean_spec.forks.lstar.containers import BlockBody @@ -53,7 +53,7 @@ def _build_store(num_validators: int, genesis_time: int, anchor_slot: int = 0) - ) block = _make_genesis_block(state) # No validator identity — fixture only reads store data, never signs. - return cast(Store, fork.create_store(state, block, validator_id=None)) + return fork.create_store(state, block, validator_id=None) # Walk the chain from genesis through anchor_slot using empty blocks. # The returned pair (state, block) is internally consistent with the @@ -64,7 +64,7 @@ def _build_store(num_validators: int, genesis_time: int, anchor_slot: int = 0) - anchor_slot=Slot(anchor_slot), genesis_time=Uint64(genesis_time), ) - return cast(Store, fork.create_store(state, block, validator_id=None)) + return fork.create_store(state, block, validator_id=None) def _health_response(_store: Store, _fixture: "ApiEndpointTest") -> dict[str, Any]: diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 03c0483e..13763d8c 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -7,11 +7,10 @@ from __future__ import annotations -from typing import ClassVar, Self, cast +from typing import ClassVar, Self from pydantic import Field, model_validator -from lean_spec.forks.lstar import Store from lean_spec.forks.lstar.containers.block import ( Block, BlockBody, @@ -257,13 +256,10 @@ def make_fixture(self) -> Self: # # The Store is the node's local view of the chain. # It starts from a trusted anchor (usually genesis). - store = cast( - Store, - _SPEC.create_store( - self.anchor_state, - self.anchor_block, - validator_id=ValidatorIndex(0), - ), + store = _SPEC.create_store( + self.anchor_state, + self.anchor_block, + validator_id=ValidatorIndex(0), ) # Block registry for fork creation diff --git a/src/lean_spec/forks/lstar/spec.py b/src/lean_spec/forks/lstar/spec.py index 11509994..2e6ef55b 100644 --- a/src/lean_spec/forks/lstar/spec.py +++ b/src/lean_spec/forks/lstar/spec.py @@ -3,7 +3,7 @@ from collections import defaultdict from collections.abc import Iterable from collections.abc import Set as AbstractSet -from typing import Any, ClassVar, cast +from typing import Any, ClassVar from lean_spec.forks.lstar.containers import ( AggregatedAttestation, @@ -59,7 +59,7 @@ ValidatorIndices, ) -from ..protocol import ForkProtocol, SpecBlockType, SpecStateType, SpecStoreType +from ..protocol import ForkProtocol, SpecBlockType, SpecStateType from .store import AttestationSignatureEntry, Store @@ -871,12 +871,14 @@ def verify_signatures( return True - def create_store( + # Pydantic fields don't structurally match Protocol @property in ty; + # the concrete return is Liskov-safe (Store satisfies SpecStoreType structurally). + def create_store( # type: ignore[override] # ty: ignore[invalid-method-override] self, state: SpecStateType, anchor_block: SpecBlockType, validator_id: ValidatorIndex | None, - ) -> SpecStoreType: + ) -> Store: """Initialize a forkchoice store from an anchor state and block. The anchor block and state form the starting point for fork choice. @@ -921,19 +923,16 @@ def create_store( # regardless of what the anchor state's embedded checkpoints say. anchor_checkpoint = Checkpoint(root=anchor_root, slot=anchor_slot) - return cast( - SpecStoreType, - self.store_class( - time=Interval.from_slot(anchor_slot), - config=state.config, - head=anchor_root, - safe_target=anchor_root, - latest_justified=anchor_checkpoint, - latest_finalized=anchor_checkpoint, - blocks={anchor_root: anchor_block}, - states={anchor_root: state}, - validator_id=validator_id, - ), + return self.store_class( + time=Interval.from_slot(anchor_slot), + config=state.config, + head=anchor_root, + safe_target=anchor_root, + latest_justified=anchor_checkpoint, + latest_finalized=anchor_checkpoint, + blocks={anchor_root: anchor_block}, + states={anchor_root: state}, + validator_id=validator_id, ) def prune_stale_attestation_data(self, store: Store) -> Store: diff --git a/tests/lean_spec/conftest.py b/tests/lean_spec/conftest.py index 24ef77a9..45e1d10c 100644 --- a/tests/lean_spec/conftest.py +++ b/tests/lean_spec/conftest.py @@ -8,7 +8,6 @@ from __future__ import annotations from collections.abc import Callable -from typing import cast import pytest from consensus_testing.keys import XmssKeyManager @@ -78,13 +77,10 @@ def genesis_block(genesis_state: State) -> Block: @pytest.fixture def base_store(spec: LstarSpec, genesis_state: State, genesis_block: Block) -> Store: """Fork choice store initialized with genesis.""" - return cast( - Store, - spec.create_store( - genesis_state, - genesis_block, - validator_id=ValidatorIndex(0), - ), + return spec.create_store( + genesis_state, + genesis_block, + validator_id=ValidatorIndex(0), ) @@ -120,7 +116,4 @@ def observer_store( spec: LstarSpec, keyed_genesis_state: State, keyed_genesis_block: Block ) -> Store: """Fork choice store with validator_id=None (non-validator observer).""" - return cast( - Store, - spec.create_store(keyed_genesis_state, keyed_genesis_block, validator_id=None), - ) + return spec.create_store(keyed_genesis_state, keyed_genesis_block, validator_id=None) diff --git a/tests/lean_spec/helpers/builders.py b/tests/lean_spec/helpers/builders.py index 517a25e2..bd9c7e43 100644 --- a/tests/lean_spec/helpers/builders.py +++ b/tests/lean_spec/helpers/builders.py @@ -339,7 +339,7 @@ def make_genesis_data( validators = make_validators(num_validators) genesis_state = make_genesis_state(validators=validators, genesis_time=genesis_time) genesis_block = make_genesis_block(genesis_state) - store = cast(Store, _SPEC.create_store(genesis_state, genesis_block, validator_id=validator_id)) + store = _SPEC.create_store(genesis_state, genesis_block, validator_id=validator_id) return GenesisData(store, genesis_state, genesis_block) diff --git a/tests/lean_spec/subspecs/forkchoice/test_time_management.py b/tests/lean_spec/subspecs/forkchoice/test_time_management.py index 5607f497..572fe03e 100644 --- a/tests/lean_spec/subspecs/forkchoice/test_time_management.py +++ b/tests/lean_spec/subspecs/forkchoice/test_time_management.py @@ -1,7 +1,5 @@ """Tests for time advancement, intervals, and slot management.""" -from typing import cast - from hypothesis import given, settings from hypothesis import strategies as st @@ -45,13 +43,10 @@ def test_store_time_from_anchor_slot(self, anchor_slot: int) -> None: body=make_empty_block_body(), ) - store = cast( - Store, - spec.create_store( - state, - anchor_block, - validator_id=TEST_VALIDATOR_ID, - ), + store = spec.create_store( + state, + anchor_block, + validator_id=TEST_VALIDATOR_ID, ) assert store.time == Interval(int(INTERVALS_PER_SLOT) * anchor_slot) From 9faf0ec3331bf71671297b6c49dc489a86d19d94 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 23:04:31 +0200 Subject: [PATCH 7/9] refactor(forks): replace 17-site _SPEC duplication with DI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each module declared its own _SPEC = LstarSpec(), creating value-equal but identity-distinct instances. The pattern also baked LstarSpec into 17 import sites; a future fork would have to grep-and-replace them all. Production services (chain, sync, validator, api) now take spec as a dataclass field with default_factory=LstarSpec — explicit at the composition root in node.py, optional in tests. node.py narrows config.fork (ForkProtocol) to LstarSpec once with isinstance, which also lets the cast(State, ...) and cast(Store, ...) at the genesis construction sites drop. Test conftests that intercept spec calls now monkey-patch LstarSpec at the class level (not the deleted module-level _SPEC instance). Test types and fixtures instantiate LstarSpec at call time — no module-level cache, no shared mutable state to alias. ForkProtocol still declares only the three abstract construction methods (generate_genesis, create_store, upgrade_state). Services and tests that drive consensus methods (process_slots, build_block, tick_interval, ...) keep the concrete LstarSpec type until the protocol surface is widened in a follow-up. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../testing/src/consensus_testing/genesis.py | 21 +++--- .../test_fixtures/fork_choice.py | 19 +++-- .../test_fixtures/state_transition.py | 16 ++--- .../test_fixtures/verify_signatures.py | 5 +- .../test_types/block_spec.py | 31 ++++----- .../test_types/gossip_attestation_spec.py | 5 +- .../test_types/store_checks.py | 11 +-- .../subspecs/api/endpoints/fork_choice.py | 7 +- src/lean_spec/subspecs/api/server.py | 8 ++- src/lean_spec/subspecs/chain/service.py | 8 +-- src/lean_spec/subspecs/node/node.py | 20 ++++-- src/lean_spec/subspecs/sync/service.py | 69 +++++++++++-------- src/lean_spec/subspecs/validator/service.py | 14 ++-- .../state_transition/test_block_processing.py | 4 +- .../state_transition/test_finalization.py | 5 +- .../devnet/state_transition/test_genesis.py | 5 +- .../test_slot_monotonicity.py | 6 +- tests/lean_spec/helpers/builders.py | 14 ++-- tests/lean_spec/subspecs/chain/conftest.py | 8 ++- .../lean_spec/subspecs/genesis/test_state.py | 12 ++-- .../lean_spec/subspecs/networking/conftest.py | 16 +++-- tests/lean_spec/subspecs/sync/conftest.py | 16 +++-- 22 files changed, 163 insertions(+), 157 deletions(-) diff --git a/packages/testing/src/consensus_testing/genesis.py b/packages/testing/src/consensus_testing/genesis.py index 122ea1ec..d4a41fdf 100644 --- a/packages/testing/src/consensus_testing/genesis.py +++ b/packages/testing/src/consensus_testing/genesis.py @@ -4,18 +4,12 @@ from lean_spec.forks.lstar.containers.state import State, Validators from lean_spec.forks.lstar.containers.validator import Validator from lean_spec.forks.lstar.spec import LstarSpec -from lean_spec.forks.protocol import ForkProtocol from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes52, Slot, Uint64, ValidatorIndex from .keys import XmssKeyManager _DEFAULT_GENESIS_TIME = Uint64(0) -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all helper invocations.""" - -_DEFAULT_FORK: ForkProtocol = _SPEC -"""Stateless fork instance used when callers do not pass one explicitly.""" def _build_validators(num_validators: int) -> Validators: @@ -44,30 +38,30 @@ def _build_validators(num_validators: int) -> Validators: def generate_pre_state( - fork: ForkProtocol = _DEFAULT_FORK, + fork: LstarSpec | None = None, genesis_time: Uint64 = _DEFAULT_GENESIS_TIME, num_validators: int = 4, ) -> State: """Generate a default pre-state for consensus tests. Args: - fork: Fork dispatching genesis construction. + fork: Fork dispatching genesis construction. Defaults to a fresh + LstarSpec instance. genesis_time: The genesis timestamp. num_validators: Number of validators to include. Returns: A properly initialized consensus state. """ + fork = fork or LstarSpec() validators = _build_validators(num_validators) - state = fork.generate_genesis(genesis_time=genesis_time, validators=validators) - assert isinstance(state, State) - return state + return fork.generate_genesis(genesis_time=genesis_time, validators=validators) def build_anchor( num_validators: int, anchor_slot: Slot, - fork: ForkProtocol = _DEFAULT_FORK, + fork: LstarSpec | None = None, genesis_time: Uint64 = _DEFAULT_GENESIS_TIME, ) -> tuple[State, Block]: """Build a consistent non-genesis anchor by advancing the genesis state. @@ -101,6 +95,7 @@ def build_anchor( "For a genesis anchor use generate_pre_state instead." ) + fork = fork or LstarSpec() state = generate_pre_state(fork=fork, genesis_time=genesis_time, num_validators=num_validators) # Reconstruct the genesis block from the state's latest header. @@ -124,7 +119,7 @@ def build_anchor( for next_slot in range(1, int(anchor_slot) + 1): slot = Slot(next_slot) proposer_index = ValidatorIndex(int(slot) % int(num_validators_u64)) - current_block, state, _, _ = _SPEC.build_block( + current_block, state, _, _ = fork.build_block( state, slot=slot, proposer_index=proposer_index, diff --git a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py index 13763d8c..22e2180d 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py +++ b/packages/testing/src/consensus_testing/test_fixtures/fork_choice.py @@ -37,9 +37,6 @@ ) from .base import BaseConsensusFixture -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all fixture invocations.""" - class ForkChoiceTest(BaseConsensusFixture): """ @@ -189,6 +186,8 @@ def make_fixture(self) -> Self: assert self.anchor_block is not None, "anchor block must be set before making fixture" assert self.max_slot is not None, "max slot must be set before making fixture" + spec = LstarSpec() + # Expected anchor-init failure path. # # When anchor_valid is False, the test asserts that Store.from_anchor @@ -201,7 +200,7 @@ def make_fixture(self) -> Self: "Store.from_anchor is expected to fail before any step can run" ) try: - _SPEC.create_store( + spec.create_store( self.anchor_state, self.anchor_block, validator_id=ValidatorIndex(0), @@ -256,7 +255,7 @@ def make_fixture(self) -> Self: # # The Store is the node's local view of the chain. # It starts from a trusted anchor (usually genesis). - store = _SPEC.create_store( + store = spec.create_store( self.anchor_state, self.anchor_block, validator_id=ValidatorIndex(0), @@ -292,7 +291,7 @@ def make_fixture(self) -> Self: target_interval = Interval.from_unix_time( Uint64(step.time), store.config.genesis_time ) - store, _ = _SPEC.on_tick( + store, _ = spec.on_tick( store, target_interval, has_proposal=step.has_proposal, @@ -325,13 +324,13 @@ def make_fixture(self) -> Self: # This tick includes a block (has proposal). # Always act as aggregator to ensure gossip signatures are aggregated target_interval = Interval.from_slot(block.slot) - store, _ = _SPEC.on_tick( + store, _ = spec.on_tick( store, target_interval, has_proposal=True, is_aggregator=True ) # Process the block through Store. # This validates, applies state transition, and updates the store's head. - store = _SPEC.on_block( + store = spec.on_block( store, signed_block, scheme=LEAN_ENV_TO_SCHEMES[self.lean_env], @@ -349,7 +348,7 @@ def make_fixture(self) -> Self: step.valid, ) step._filled_attestation = signed_attestation - store = _SPEC.on_gossip_attestation( + store = spec.on_gossip_attestation( store, signed_attestation, scheme=LEAN_ENV_TO_SCHEMES[self.lean_env], @@ -363,7 +362,7 @@ def make_fixture(self) -> Self: key_manager, ) step._filled_attestation = signed_aggregated - store = _SPEC.on_gossip_aggregated_attestation(store, signed_aggregated) + store = spec.on_gossip_aggregated_attestation(store, signed_aggregated) case _: raise ValueError(f"Step {i}: unknown step type {type(step).__name__}") diff --git a/packages/testing/src/consensus_testing/test_fixtures/state_transition.py b/packages/testing/src/consensus_testing/test_fixtures/state_transition.py index a8c4bcff..e320bb10 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/state_transition.py +++ b/packages/testing/src/consensus_testing/test_fixtures/state_transition.py @@ -17,9 +17,6 @@ from ..test_types import AggregatedAttestationSpec, BlockSpec, StateExpectation from .base import BaseConsensusFixture -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all fixture invocations.""" - class StateTransitionTest(BaseConsensusFixture): """ @@ -113,6 +110,7 @@ def make_fixture(self) -> "StateTransitionTest": """ actual_post_state: State | None = None exception_raised: Exception | None = None + spec = LstarSpec() # Initialize filled_blocks list that will be populated as we process blocks filled_blocks: list[Block] = [] @@ -140,9 +138,9 @@ def make_fixture(self) -> "StateTransitionTest": if cached_state is not None: state = cached_state elif getattr(block_spec, "skip_slot_processing", False): - state = _SPEC.process_block(state, block) + state = spec.process_block(state, block) else: - state = _SPEC.state_transition( + state = spec.state_transition( state, block=block, valid_signatures=True, @@ -217,7 +215,7 @@ def _build_block_from_spec( # Advance slots unless the spec intentionally skips slot processing. slot_advanced_state: State | None = None if not spec.skip_slot_processing: - slot_advanced_state = _SPEC.process_slots(state, spec.slot) + slot_advanced_state = LstarSpec().process_slots(state, spec.slot) # Resolve the parent root. # Default: latest block header from the slot-advanced state. @@ -260,7 +258,7 @@ def _build_block_from_spec( known_block_roots = frozenset(hash_tree_root(b) for b in block_registry.values()) - block, post_state, _, _ = _SPEC.build_block( + block, post_state, _, _ = LstarSpec().build_block( state, slot=spec.slot, proposer_index=proposer_index, @@ -295,8 +293,8 @@ def _build_block_from_spec( # The body changed, so re-run the transition to get the correct # post-state and state root. if post_state is not None: - post_state = _SPEC.process_slots(state, spec.slot) - post_state = _SPEC.process_block(post_state, block) + post_state = LstarSpec().process_slots(state, spec.slot) + post_state = LstarSpec().process_block(post_state, block) block = block.model_copy(update={"state_root": hash_tree_root(post_state)}) return block, post_state diff --git a/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py b/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py index 689b9142..1f1702b9 100644 --- a/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py +++ b/packages/testing/src/consensus_testing/test_fixtures/verify_signatures.py @@ -22,9 +22,6 @@ from ..test_types import BlockSpec from .base import BaseConsensusFixture -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all fixture invocations.""" - class VerifySignaturesTest(BaseConsensusFixture): """ @@ -115,7 +112,7 @@ def make_fixture(self) -> VerifySignaturesTest: # Verify signatures try: - _SPEC.verify_signatures(signed_block, self.anchor_state.validators) + LstarSpec().verify_signatures(signed_block, self.anchor_state.validators) except AssertionError as e: exception_raised = e # If we expect an exception, this is fine diff --git a/packages/testing/src/consensus_testing/test_types/block_spec.py b/packages/testing/src/consensus_testing/test_types/block_spec.py index 3a09964e..21aa8f05 100644 --- a/packages/testing/src/consensus_testing/test_types/block_spec.py +++ b/packages/testing/src/consensus_testing/test_types/block_spec.py @@ -32,9 +32,6 @@ from ..keys import LEAN_ENV_TO_SCHEMES, XmssKeyManager, create_dummy_signature from .aggregated_attestation_spec import AggregatedAttestationSpec -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all spec invocations.""" - class BlockSpec(CamelModel): """ @@ -294,6 +291,7 @@ def build_signed_block( Returns: Complete signed block with all attestation and proposer signatures. """ + spec = LstarSpec() proposer_index = self.resolve_proposer_index(len(state.validators)) # Build a genesis block registry so attestation specs can resolve labels. @@ -308,7 +306,7 @@ def build_signed_block( # Resolve the parent root. # The default is the latest block header from the slot-advanced state. - parent_state = _SPEC.process_slots(state, self.slot) + parent_state = spec.process_slots(state, self.slot) parent_root = self.resolve_parent_root( block_registry, default_root=hash_tree_root(parent_state.latest_block_header), @@ -364,7 +362,7 @@ def build_signed_block( for agg_att, proof in zip(aggregated_attestations, attestation_sigs.data, strict=True) } - final_block, _, _, aggregated_signatures = _SPEC.build_block( + final_block, _, _, aggregated_signatures = spec.build_block( state, slot=self.slot, proposer_index=proposer_index, @@ -405,6 +403,7 @@ def build_signed_block_with_store( Returns: Complete signed block ready for Store processing. """ + spec = LstarSpec() proposer_index = self.resolve_proposer_index(len(store.states[store.head].validators)) # Resolve parent block. @@ -429,7 +428,7 @@ def build_signed_block_with_store( # check rejects votes whose slot has not yet started locally. block_slot_interval = Interval.from_slot(self.slot) if store.time < block_slot_interval: - store, _ = _SPEC.on_tick( + store, _ = spec.on_tick( store, block_slot_interval, has_proposal=True, is_aggregator=True ) @@ -442,7 +441,7 @@ def build_signed_block_with_store( or (signature := sigs_for_data.get(attestation.validator_id)) is None ): continue - store = _SPEC.on_gossip_attestation( + store = spec.on_gossip_attestation( store, SignedAttestation( validator_id=attestation.validator_id, @@ -454,11 +453,11 @@ def build_signed_block_with_store( ) # Trigger Store aggregation to merge gossip signatures into known payloads. - aggregation_store, _ = _SPEC.aggregate(store) - merged_store = _SPEC.accept_new_attestations(aggregation_store) + aggregation_store, _ = spec.aggregate(store) + merged_store = spec.accept_new_attestations(aggregation_store) # Build the block through the spec's State.build_block(). - final_block, _, _, block_proofs = _SPEC.build_block( + final_block, _, _, block_proofs = spec.build_block( parent_state, slot=self.slot, proposer_index=proposer_index, @@ -470,9 +469,9 @@ def build_signed_block_with_store( # Append forced attestations that bypass the builder's MAX cap. # Each entry is signed and aggregated so the block carries valid proofs. if self.forced_attestations: - for spec in self.forced_attestations: - att_data = spec.build_attestation_data(block_registry, parent_state) - proof = key_manager.sign_and_aggregate(spec.validator_ids, att_data) + for att_spec in self.forced_attestations: + att_data = att_spec.build_attestation_data(block_registry, parent_state) + proof = key_manager.sign_and_aggregate(att_spec.validator_ids, att_data) block_proofs.append(proof) final_block = final_block.model_copy( update={ @@ -483,7 +482,7 @@ def build_signed_block_with_store( *final_block.body.attestations.data, AggregatedAttestation( aggregation_bits=ValidatorIndices( - data=spec.validator_ids, + data=att_spec.validator_ids, ).to_aggregation_bits(), data=att_data, ), @@ -495,8 +494,8 @@ def build_signed_block_with_store( ) # Recompute state root with the modified body. - post_state = _SPEC.process_slots(parent_state, self.slot) - post_state = _SPEC.process_block(post_state, final_block) + post_state = spec.process_slots(parent_state, self.slot) + post_state = spec.process_block(post_state, final_block) final_block = final_block.model_copy(update={"state_root": hash_tree_root(post_state)}) return self._sign_block(final_block, block_proofs, proposer_index, key_manager) diff --git a/packages/testing/src/consensus_testing/test_types/gossip_attestation_spec.py b/packages/testing/src/consensus_testing/test_types/gossip_attestation_spec.py index a5cd0f3e..5a363011 100644 --- a/packages/testing/src/consensus_testing/test_types/gossip_attestation_spec.py +++ b/packages/testing/src/consensus_testing/test_types/gossip_attestation_spec.py @@ -12,9 +12,6 @@ from ..keys import XmssKeyManager, create_dummy_signature from .utils import resolve_checkpoint -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all spec invocations.""" - class GossipAttestationSpec(CamelModel): """ @@ -204,7 +201,7 @@ def build_signed( attestation_data = self.build_attestation_data(block_registry, anchor_block) else: # Honest path: use the Store's own attestation data production. - attestation_data = _SPEC.produce_attestation_data(store, self.slot) + attestation_data = LstarSpec().produce_attestation_data(store, self.slot) signature = ( key_manager.sign_attestation_data(self.validator_id, attestation_data) diff --git a/packages/testing/src/consensus_testing/test_types/store_checks.py b/packages/testing/src/consensus_testing/test_types/store_checks.py index a5a53be0..53c9bbf8 100644 --- a/packages/testing/src/consensus_testing/test_types/store_checks.py +++ b/packages/testing/src/consensus_testing/test_types/store_checks.py @@ -11,8 +11,6 @@ from .utils import resolve_block_root -_SPEC = LstarSpec() - def _ancestor_set(blocks: BlockLookup, head: Bytes32) -> set[Bytes32]: """Walk parent links from head and collect every reachable block root.""" @@ -367,7 +365,7 @@ def _resolve(label: str) -> Bytes32: # Attestation target checkpoint (slot + root consistency) if "attestation_target_slot" in fields: - target = _SPEC.get_attestation_target(store) + target = LstarSpec().get_attestation_target(store) _check("attestation_target.slot", target.slot, self.attestation_target_slot) block_found = any( @@ -398,7 +396,9 @@ def _resolve(label: str) -> Bytes32: payloads = store.latest_known_aggregated_payloads label = "in latest_known" - extracted = _SPEC.extract_attestations_from_aggregated_payloads(store, payloads) + extracted = LstarSpec().extract_attestations_from_aggregated_payloads( + store, payloads + ) if check.validator not in extracted: raise AssertionError( f"Step {step_index}: validator {check.validator} not found " @@ -562,7 +562,8 @@ def _validate_lexicographic_head( root = hash_tree_root(block) slot = block.slot - known_attestations = _SPEC.extract_attestations_from_aggregated_payloads( + spec = LstarSpec() + known_attestations = spec.extract_attestations_from_aggregated_payloads( store, store.latest_known_aggregated_payloads ) weight = 0 diff --git a/src/lean_spec/subspecs/api/endpoints/fork_choice.py b/src/lean_spec/subspecs/api/endpoints/fork_choice.py index 42ad1778..1867c2d1 100644 --- a/src/lean_spec/subspecs/api/endpoints/fork_choice.py +++ b/src/lean_spec/subspecs/api/endpoints/fork_choice.py @@ -6,11 +6,6 @@ from aiohttp import web -from lean_spec.forks import LstarSpec - -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all endpoint invocations.""" - async def handle(request: web.Request) -> web.Response: """ @@ -39,7 +34,7 @@ async def handle(request: web.Request) -> web.Response: raise web.HTTPServiceUnavailable(reason="Store not initialized") finalized_slot = store.latest_finalized.slot - weights = _SPEC.compute_block_weights(store) + weights = request.app["spec"].compute_block_weights(store) nodes = [] for root, block in store.blocks.items(): diff --git a/src/lean_spec/subspecs/api/server.py b/src/lean_spec/subspecs/api/server.py index f38ae699..9842fd8b 100644 --- a/src/lean_spec/subspecs/api/server.py +++ b/src/lean_spec/subspecs/api/server.py @@ -14,7 +14,7 @@ from aiohttp import web -from lean_spec.forks import Store +from lean_spec.forks import LstarSpec, Store from .aggregator_controller import AggregatorController from .routes import ADMIN_ROUTES, ROUTES @@ -65,6 +65,9 @@ class ApiServer: config: ApiServerConfig """Server configuration.""" + spec: LstarSpec = field(default_factory=LstarSpec) + """Fork spec used by handlers needing consensus computations (e.g. fork-choice weights).""" + store_getter: Callable[[], Store | None] | None = None """Callable that returns the current Store instance.""" @@ -98,6 +101,9 @@ async def start(self) -> None: # Store the store_getter in app for handlers that need store access app["store_getter"] = self.store_getter + # Expose the fork spec for handlers that drive consensus computations. + app["spec"] = self.spec + # Expose the aggregator controller to admin endpoints. # Absence is fine; endpoints return 503 when unset. app["aggregator_controller"] = self.aggregator_controller diff --git a/src/lean_spec/subspecs/chain/service.py b/src/lean_spec/subspecs/chain/service.py index f4c8370f..79597920 100644 --- a/src/lean_spec/subspecs/chain/service.py +++ b/src/lean_spec/subspecs/chain/service.py @@ -35,9 +35,6 @@ logger = logging.getLogger(__name__) -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all chain invocations.""" - @dataclass(slots=True) class ChainService: @@ -60,6 +57,9 @@ class ChainService: clock: SlotClock """Clock for time calculation.""" + spec: LstarSpec = field(default_factory=LstarSpec) + """Fork spec driving consensus methods. Default lets tests skip wiring.""" + _running: bool = field(default=False, repr=False) """Whether the service is running.""" @@ -175,7 +175,7 @@ async def _tick_to(self, target_interval: Interval) -> list[SignedAggregatedAtte # Tick remaining intervals one at a time. while store.time < target_interval: - store, new_aggregates = _SPEC.tick_interval( + store, new_aggregates = self.spec.tick_interval( store, has_proposal=False, is_aggregator=self.sync_service.is_aggregator, diff --git a/src/lean_spec/subspecs/node/node.py b/src/lean_spec/subspecs/node/node.py index 5be242dc..51c69a31 100644 --- a/src/lean_spec/subspecs/node/node.py +++ b/src/lean_spec/subspecs/node/node.py @@ -17,16 +17,16 @@ from collections.abc import Callable from dataclasses import dataclass, field from pathlib import Path -from typing import Final, cast +from typing import Final from lean_spec.forks import ( AggregatedAttestations, Block, BlockBody, ForkProtocol, + LstarSpec, SignedAttestation, SignedBlock, - State, Store, Validators, ) @@ -212,6 +212,13 @@ def from_genesis(cls, config: NodeConfig) -> Node: validator_id = ( config.validator_registry.primary_index() if config.validator_registry else None ) + # The composition root narrows the protocol to its concrete fork. + # Services need the concrete consensus surface (process_slots, build_block, + # tick_interval, ...) which the abstract protocol does not declare. + # When fork #2 lands, replace this with a per-slot dispatcher. + assert isinstance(config.fork, LstarSpec), ( + f"Only LstarSpec is supported at the composition root, got {type(config.fork).__name__}" + ) fork = config.fork store = cls._try_load_store_from_database( database, validator_id, config.genesis_time, config.time_fn, fork @@ -221,7 +228,7 @@ def from_genesis(cls, config: NodeConfig) -> Node: # Generate genesis state from validators. # # Includes initial checkpoints, validator registry, and config. - state = cast(State, fork.generate_genesis(config.genesis_time, config.validators)) + state = fork.generate_genesis(config.genesis_time, config.validators) # Create genesis block. # @@ -238,7 +245,7 @@ def from_genesis(cls, config: NodeConfig) -> Node: # Initialize forkchoice store. # # Genesis block is both justified and finalized. - store = cast(Store, fork.create_store(state, block, validator_id)) + store = fork.create_store(state, block, validator_id) # Persist genesis to database if available. # @@ -271,13 +278,14 @@ def from_genesis(cls, config: NodeConfig) -> Node: block_cache=block_cache, clock=clock, network=config.network, + spec=fork, database=database, is_aggregator=config.is_aggregator, aggregate_subnet_ids=config.aggregate_subnet_ids, genesis_start=True, ) - chain_service = ChainService(sync_service=sync_service, clock=clock) + chain_service = ChainService(sync_service=sync_service, clock=clock, spec=fork) network_service = NetworkService( sync_service=sync_service, event_source=config.event_source, @@ -304,6 +312,7 @@ def from_genesis(cls, config: NodeConfig) -> Node: # Store getter captures sync_service to get the live store api_server = ApiServer( config=config.api_config, + spec=fork, store_getter=lambda: sync_service.store, aggregator_controller=aggregator_controller, ) @@ -336,6 +345,7 @@ async def publish_block_wrapper(block: SignedBlock) -> None: sync_service=sync_service, clock=clock, registry=config.validator_registry, + spec=fork, on_block=publish_block_wrapper, on_attestation=publish_attestation_wrapper, ) diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index fb0579ff..49946818 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -66,9 +66,6 @@ logger = logging.getLogger(__name__) -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all sync invocations.""" - @dataclass(slots=True) class _SyncStoreView: @@ -107,34 +104,37 @@ def _ancestor_set(blocks: BlockLookup, head: Bytes32) -> set[Bytes32]: return seen -def default_block_processor( - store: Store, - block: SignedBlock, -) -> Store: +def make_default_block_processor( + spec: LstarSpec, +) -> Callable[[Store, SignedBlock], Store]: """ - Default block processor. + Build a default block processor bound to the given spec. Wraps the pure spec entry point with caller-side fork-choice telemetry. State transition and block processing timings are emitted by the spec itself through the observer, wired at node startup. Everything else here is derived by diffing pre- and post-stores. """ - new_store = _SPEC.on_block(store, block) - metrics.lean_head_slot.set(new_store.blocks[new_store.head].slot) - metrics.lean_safe_target_slot.set(new_store.blocks[new_store.safe_target].slot) - metrics.lean_latest_justified_slot.set(new_store.latest_justified.slot) - metrics.lean_latest_finalized_slot.set(new_store.latest_finalized.slot) + def default_block_processor(store: Store, block: SignedBlock) -> Store: + new_store = spec.on_block(store, block) - if new_store.head != store.head: - depth = len( - _ancestor_set(new_store.blocks, store.head) - - _ancestor_set(new_store.blocks, new_store.head) - ) - metrics.lean_fork_choice_reorgs_total.inc() - metrics.lean_fork_choice_reorg_depth.observe(depth) + metrics.lean_head_slot.set(new_store.blocks[new_store.head].slot) + metrics.lean_safe_target_slot.set(new_store.blocks[new_store.safe_target].slot) + metrics.lean_latest_justified_slot.set(new_store.latest_justified.slot) + metrics.lean_latest_finalized_slot.set(new_store.latest_finalized.slot) + + if new_store.head != store.head: + depth = len( + _ancestor_set(new_store.blocks, store.head) + - _ancestor_set(new_store.blocks, new_store.head) + ) + metrics.lean_fork_choice_reorgs_total.inc() + metrics.lean_fork_choice_reorg_depth.observe(depth) + + return new_store - return new_store + return default_block_processor async def _noop_publish_agg(signed_attestation: SignedAggregatedAttestation) -> None: @@ -212,6 +212,9 @@ class SyncService: network: NetworkRequester """Network interface for block requests.""" + spec: LstarSpec = field(default_factory=LstarSpec) + """Fork spec driving consensus methods. Default lets tests skip wiring.""" + database: Database | None = field(default=None) """Optional database for persisting blocks and states.""" @@ -227,8 +230,8 @@ class SyncService: is also True — non-aggregator nodes never import gossip attestations. """ - process_block: Callable[[Store, SignedBlock], Store] = field(default=default_block_processor) - """Block processor function. Defaults to the store's block processing.""" + process_block: Callable[[Store, SignedBlock], Store] | None = field(default=None) + """Block processor function. Defaults to the spec's block processing.""" _publish_agg_fn: Callable[[SignedAggregatedAttestation], Coroutine[None, None, None]] = field( default=_noop_publish_agg @@ -279,6 +282,12 @@ def set_publish_agg_fn( def __post_init__(self) -> None: """Initialize sync components.""" + # Bind the default processor to the injected spec when no override is provided. + # + # Tests pass an explicit processor and skip this path. + if self.process_block is None: + self.process_block = make_default_block_processor(self.spec) + self._init_components() # Genesis validators already hold the full genesis state so they @@ -328,7 +337,9 @@ def _process_block_wrapper( # Delegate to the actual block processor. # # The processor validates the block and updates forkchoice state. - new_store = self.process_block(store, block) + processor = self.process_block + assert processor is not None + new_store = processor(store, block) # Track processed blocks. # @@ -578,7 +589,7 @@ async def on_gossip_attestation( # Invalid attestations (bad signature, unknown target) are rejected. # Validation failures are logged but don't crash the event loop. try: - self.store = _SPEC.on_gossip_attestation( + self.store = self.spec.on_gossip_attestation( self.store, signed_attestation=attestation, is_aggregator=is_aggregator_role, @@ -636,7 +647,7 @@ async def on_gossip_aggregated_attestation( ) try: - self.store = _SPEC.on_gossip_aggregated_attestation(self.store, signed_attestation) + self.store = self.spec.on_gossip_aggregated_attestation(self.store, signed_attestation) logger.info( "Aggregated attestation from peer %s slot=%s: validation and signature ok", peer_str, @@ -670,7 +681,7 @@ def _replay_pending_attestations(self) -> None: self._pending_attestations = [] for attestation in pending: try: - self.store = _SPEC.on_gossip_attestation( + self.store = self.spec.on_gossip_attestation( self.store, signed_attestation=attestation, is_aggregator=is_aggregator_role, @@ -682,7 +693,9 @@ def _replay_pending_attestations(self) -> None: self._pending_aggregated_attestations = [] for signed_attestation in pending_agg: try: - self.store = _SPEC.on_gossip_aggregated_attestation(self.store, signed_attestation) + self.store = self.spec.on_gossip_aggregated_attestation( + self.store, signed_attestation + ) except (AssertionError, KeyError): self._pending_aggregated_attestations.append(signed_attestation) diff --git a/src/lean_spec/subspecs/validator/service.py b/src/lean_spec/subspecs/validator/service.py index cb4ab68e..ab60e12d 100644 --- a/src/lean_spec/subspecs/validator/service.py +++ b/src/lean_spec/subspecs/validator/service.py @@ -58,9 +58,6 @@ logger = logging.getLogger(__name__) -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all validator invocations.""" - type BlockPublisher = Callable[[SignedBlock], Awaitable[None]] """Callback for publishing signed blocks.""" type AttestationPublisher = Callable[[SignedAttestation], Awaitable[None]] @@ -93,6 +90,9 @@ class ValidatorService: registry: ValidatorRegistry """Registry of validators we control.""" + spec: LstarSpec = field(default_factory=LstarSpec) + """Fork spec driving consensus methods. Default lets tests skip wiring.""" + on_block: BlockPublisher = field(default=_noop_block_publisher) """Callback invoked when a block is produced.""" @@ -263,7 +263,7 @@ async def _maybe_produce_block(self, slot: Slot) -> None: # We are the proposer for this slot. try: - new_store, block, signatures = _SPEC.produce_block_with_signatures( + new_store, block, signatures = self.spec.produce_block_with_signatures( store, slot=slot, validator_index=validator_index, @@ -335,7 +335,7 @@ async def _produce_attestations(self, slot: Slot) -> None: break # Ensure we are attesting to the latest known head - self.sync_service.store = _SPEC.update_head(self.sync_service.store) + self.sync_service.store = self.spec.update_head(self.sync_service.store) store = self.sync_service.store head_state = store.states.get(store.head) @@ -343,7 +343,7 @@ async def _produce_attestations(self, slot: Slot) -> None: return for validator_index in self.registry.indices(): - attestation_data = _SPEC.produce_attestation_data(store, slot) + attestation_data = self.spec.produce_attestation_data(store, slot) signed_attestation = self._sign_attestation(attestation_data, validator_index) self._attestations_produced += 1 @@ -358,7 +358,7 @@ async def _produce_attestations(self, slot: Slot) -> None: self.sync_service.store.validator_id is not None and self.sync_service.is_aggregator ) try: - self.sync_service.store = _SPEC.on_gossip_attestation( + self.sync_service.store = self.spec.on_gossip_attestation( self.sync_service.store, signed_attestation=signed_attestation, is_aggregator=is_aggregator_role, diff --git a/tests/consensus/devnet/state_transition/test_block_processing.py b/tests/consensus/devnet/state_transition/test_block_processing.py index 4b7d3828..de5a7c5d 100644 --- a/tests/consensus/devnet/state_transition/test_block_processing.py +++ b/tests/consensus/devnet/state_transition/test_block_processing.py @@ -12,8 +12,6 @@ from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.types import Boolean, Bytes32, Slot, ValidatorIndex -_SPEC = LstarSpec() - pytestmark = pytest.mark.valid_until("Lstar") @@ -336,7 +334,7 @@ def test_block_with_wrong_slot(state_transition_test: StateTransitionTestFiller) - Essential for slot-based consensus """ pre_state = generate_pre_state() - pre_state = _SPEC.process_slots(pre_state, Slot(1)) + pre_state = LstarSpec().process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, diff --git a/tests/consensus/devnet/state_transition/test_finalization.py b/tests/consensus/devnet/state_transition/test_finalization.py index 1278f070..2d2ef906 100644 --- a/tests/consensus/devnet/state_transition/test_finalization.py +++ b/tests/consensus/devnet/state_transition/test_finalization.py @@ -18,8 +18,6 @@ from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Boolean, Slot, ValidatorIndex -_SPEC = LstarSpec() - pytestmark = pytest.mark.valid_until("Lstar") @@ -221,7 +219,8 @@ def test_no_finalization_when_intermediate_justifiable_slot_exists( 7. There are no pending justifications """ pre = generate_pre_state() - anchor_root = hash_tree_root(_SPEC.process_slots(pre, Slot(1)).latest_block_header) + anchor_state = LstarSpec().process_slots(pre, Slot(1)) + anchor_root = hash_tree_root(anchor_state.latest_block_header) state_transition_test( pre=pre, diff --git a/tests/consensus/devnet/state_transition/test_genesis.py b/tests/consensus/devnet/state_transition/test_genesis.py index 409fa66d..acc43d1f 100644 --- a/tests/consensus/devnet/state_transition/test_genesis.py +++ b/tests/consensus/devnet/state_transition/test_genesis.py @@ -28,8 +28,6 @@ from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Slot, Uint64, ValidatorIndex -_SPEC = LstarSpec() - pytestmark = pytest.mark.valid_until("Lstar") @@ -248,7 +246,8 @@ def test_first_post_genesis_block_sets_checkpoint_anchor_roots( 5. justified_slots is empty """ pre = generate_pre_state() - anchor_root = hash_tree_root(_SPEC.process_slots(pre, Slot(1)).latest_block_header) + anchor_state = LstarSpec().process_slots(pre, Slot(1)) + anchor_root = hash_tree_root(anchor_state.latest_block_header) state_transition_test( pre=pre, diff --git a/tests/consensus/devnet/state_transition/test_slot_monotonicity.py b/tests/consensus/devnet/state_transition/test_slot_monotonicity.py index 9d910e30..ce4195ea 100644 --- a/tests/consensus/devnet/state_transition/test_slot_monotonicity.py +++ b/tests/consensus/devnet/state_transition/test_slot_monotonicity.py @@ -10,8 +10,6 @@ from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.types import Slot -_SPEC = LstarSpec() - pytestmark = pytest.mark.valid_until("Devnet") @@ -40,7 +38,7 @@ def test_process_slots_target_equal_to_state_slot_rejected( - Protects against replay of already-processed slots. """ pre_state = generate_pre_state() - pre_state = _SPEC.process_slots(pre_state, Slot(1)) + pre_state = LstarSpec().process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, @@ -81,7 +79,7 @@ def test_block_at_parent_slot_rejected_when_slot_processing_skipped( when the chain tip is at or above the claimed slot. """ pre_state = generate_pre_state() - pre_state = _SPEC.process_slots(pre_state, Slot(1)) + pre_state = LstarSpec().process_slots(pre_state, Slot(1)) state_transition_test( pre=pre_state, diff --git a/tests/lean_spec/helpers/builders.py b/tests/lean_spec/helpers/builders.py index bd9c7e43..ef237e05 100644 --- a/tests/lean_spec/helpers/builders.py +++ b/tests/lean_spec/helpers/builders.py @@ -63,9 +63,6 @@ from .mocks import MockForkchoiceStore, MockNetworkRequester -_SPEC = LstarSpec() -"""Active fork spec — stateless, safe to share across all helper invocations.""" - def make_bytes32(seed: int) -> Bytes32: """Create a deterministic 32-byte value from a seed.""" @@ -142,7 +139,7 @@ def make_genesis_state( """ if validators is None: validators = make_validators(num_validators) - return _SPEC.generate_genesis(genesis_time=Uint64(genesis_time), validators=validators) + return LstarSpec().generate_genesis(genesis_time=Uint64(genesis_time), validators=validators) def make_empty_block_body() -> BlockBody: @@ -339,7 +336,7 @@ def make_genesis_data( validators = make_validators(num_validators) genesis_state = make_genesis_state(validators=validators, genesis_time=genesis_time) genesis_block = make_genesis_block(genesis_state) - store = _SPEC.create_store(genesis_state, genesis_block, validator_id=validator_id) + store = LstarSpec().create_store(genesis_state, genesis_block, validator_id=validator_id) return GenesisData(store, genesis_state, genesis_block) @@ -371,7 +368,7 @@ def make_store_with_attestation_data( key_manager=key_manager, ) store = store.model_copy(update={"time": Interval.from_slot(attestation_slot)}) - attestation_data = _SPEC.produce_attestation_data(store, attestation_slot) + attestation_data = LstarSpec().produce_attestation_data(store, attestation_slot) return store, attestation_data @@ -484,7 +481,7 @@ def make_signed_block_from_store( Returns the updated store (with time advanced) and the signed block. """ - _, block, _ = _SPEC.produce_block_with_signatures(store, slot, proposer_index) + _, block, _ = LstarSpec().produce_block_with_signatures(store, slot, proposer_index) block_root = hash_tree_root(block) proposer_signature = key_manager.sign_block_root(proposer_index, slot, block_root) attestation_signatures = key_manager.build_attestation_signatures(block.body.attestations) @@ -498,7 +495,7 @@ def make_signed_block_from_store( ) target_interval = Interval.from_slot(block.slot) - advanced_store, _ = _SPEC.on_tick(store, target_interval, has_proposal=True) + advanced_store, _ = LstarSpec().on_tick(store, target_interval, has_proposal=True) return advanced_store, signed_block @@ -523,6 +520,7 @@ def create_mock_sync_service( block_cache=BlockCache(), clock=SlotClock(genesis_time=Uint64(0), time_fn=lambda: 1000.0), network=MockNetworkRequester(), + spec=LstarSpec(), database=database, genesis_start=genesis_start, process_block=processor, diff --git a/tests/lean_spec/subspecs/chain/conftest.py b/tests/lean_spec/subspecs/chain/conftest.py index 190bfd29..57c9dfda 100644 --- a/tests/lean_spec/subspecs/chain/conftest.py +++ b/tests/lean_spec/subspecs/chain/conftest.py @@ -6,7 +6,7 @@ import pytest -from lean_spec.subspecs.chain import service as chain_service_module +from lean_spec.forks.lstar.spec import LstarSpec @pytest.fixture(autouse=True) @@ -18,7 +18,9 @@ def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: Pydantic Store. Routing the spec call back lets the mock intercept in-place. """ - def tick_interval(store: Any, has_proposal: bool, is_aggregator: bool = False) -> Any: + def tick_interval( + self: LstarSpec, store: Any, has_proposal: bool, is_aggregator: bool = False + ) -> Any: return store.tick_interval(has_proposal, is_aggregator) - monkeypatch.setattr(chain_service_module._SPEC, "tick_interval", tick_interval) + monkeypatch.setattr(LstarSpec, "tick_interval", tick_interval) diff --git a/tests/lean_spec/subspecs/genesis/test_state.py b/tests/lean_spec/subspecs/genesis/test_state.py index 2deeb107..f134b431 100644 --- a/tests/lean_spec/subspecs/genesis/test_state.py +++ b/tests/lean_spec/subspecs/genesis/test_state.py @@ -8,10 +8,8 @@ from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Bytes52, Slot, Uint64, ValidatorIndex -_SPEC = LstarSpec() - -def test_genesis_block_hash_comparison() -> None: +def test_genesis_block_hash_comparison(spec: LstarSpec) -> None: """Test that genesis block hashes are deterministic and differ with different inputs.""" # Create first genesis state with 3 validators # Fill pubkeys with different values (1, 2, 3) @@ -23,7 +21,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state1 = _SPEC.generate_genesis( + genesis_state1 = spec.generate_genesis( genesis_time=Uint64(1000), validators=validators1, ) @@ -41,7 +39,7 @@ def test_genesis_block_hash_comparison() -> None: genesis_block_hash1 = hash_tree_root(genesis_block1) # Create a second genesis state with same config but regenerated (should produce same hash) - genesis_state1_copy = _SPEC.generate_genesis( + genesis_state1_copy = spec.generate_genesis( genesis_time=Uint64(1000), validators=validators1, ) @@ -69,7 +67,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state2 = _SPEC.generate_genesis( + genesis_state2 = spec.generate_genesis( genesis_time=Uint64(1000), # Same genesis_time but different validators validators=validators2, ) @@ -97,7 +95,7 @@ def test_genesis_block_hash_comparison() -> None: ] ) - genesis_state3 = _SPEC.generate_genesis( + genesis_state3 = spec.generate_genesis( genesis_time=Uint64(2000), # Different genesis_time but same validators validators=validators3, ) diff --git a/tests/lean_spec/subspecs/networking/conftest.py b/tests/lean_spec/subspecs/networking/conftest.py index 3650df86..cd8bb05b 100644 --- a/tests/lean_spec/subspecs/networking/conftest.py +++ b/tests/lean_spec/subspecs/networking/conftest.py @@ -6,7 +6,7 @@ import pytest -from lean_spec.subspecs.sync import service as sync_service_module +from lean_spec.forks.lstar.spec import LstarSpec @pytest.fixture(autouse=True) @@ -18,12 +18,12 @@ def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: Store; routing each spec call back to `store.method(...)` lets the mock intercept in-place without changing service code. """ - spec = sync_service_module._SPEC - def on_block(store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: + def on_block(self: LstarSpec, store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: return store.on_block(signed_block, *args, **kwargs) def on_gossip_attestation( + self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, @@ -33,10 +33,12 @@ def on_gossip_attestation( return store.on_gossip_attestation(signed_attestation, *args, **kwargs) def on_gossip_aggregated_attestation( - store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any ) -> Any: return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) - monkeypatch.setattr(spec, "on_block", on_block) - monkeypatch.setattr(spec, "on_gossip_attestation", on_gossip_attestation) - monkeypatch.setattr(spec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation) + monkeypatch.setattr(LstarSpec, "on_block", on_block) + monkeypatch.setattr(LstarSpec, "on_gossip_attestation", on_gossip_attestation) + monkeypatch.setattr( + LstarSpec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation + ) diff --git a/tests/lean_spec/subspecs/sync/conftest.py b/tests/lean_spec/subspecs/sync/conftest.py index 32e62ff7..d6a841a1 100644 --- a/tests/lean_spec/subspecs/sync/conftest.py +++ b/tests/lean_spec/subspecs/sync/conftest.py @@ -10,8 +10,8 @@ import pytest +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.networking.reqresp.message import Status -from lean_spec.subspecs.sync import service as sync_service_module from lean_spec.types import Bytes32, Checkpoint, Slot @@ -24,12 +24,12 @@ def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: Routing each spec call back to `store.method(...)` lets the mock intercept in-place without a sync-service code change. """ - spec = sync_service_module._SPEC - def on_block(store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: + def on_block(self: LstarSpec, store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: return store.on_block(signed_block, *args, **kwargs) def on_gossip_attestation( + self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, @@ -39,13 +39,15 @@ def on_gossip_attestation( return store.on_gossip_attestation(signed_attestation, *args, **kwargs) def on_gossip_aggregated_attestation( - store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any ) -> Any: return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) - monkeypatch.setattr(spec, "on_block", on_block) - monkeypatch.setattr(spec, "on_gossip_attestation", on_gossip_attestation) - monkeypatch.setattr(spec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation) + monkeypatch.setattr(LstarSpec, "on_block", on_block) + monkeypatch.setattr(LstarSpec, "on_gossip_attestation", on_gossip_attestation) + monkeypatch.setattr( + LstarSpec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation + ) @pytest.fixture From d55f00acad3f923cce121f6a63ea689788b6a068 Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 23:22:58 +0200 Subject: [PATCH 8/9] refactor(tests): replace conftest monkey-patches with injected spec Three autouse fixtures in chain/sync/networking conftests patched LstarSpec class methods so MockStore / MockForkchoiceStore could intercept consensus calls in place. Class-level patching mutates shared state and runs against every test in the directory whether needed or not. Now that services accept a spec field, tests inject a small StoreInterceptingSpec subclass that forwards each spec call back to the store argument. make_store() (used by sync/networking tests) hands the intercepting spec to the real SyncService transparently. Chain test_service.py threads it through ChainService directly. Two conftests delete entirely; the sync conftest keeps only its sample_checkpoint / sample_status fixtures. Co-Authored-By: Claude Opus 4.7 (1M context) --- tests/lean_spec/helpers/builders.py | 4 +- tests/lean_spec/helpers/mocks.py | 32 +++++ tests/lean_spec/subspecs/chain/conftest.py | 26 ---- .../lean_spec/subspecs/chain/test_service.py | 113 ++++++++++++++---- .../lean_spec/subspecs/networking/conftest.py | 44 ------- tests/lean_spec/subspecs/sync/conftest.py | 38 ------ 6 files changed, 127 insertions(+), 130 deletions(-) delete mode 100644 tests/lean_spec/subspecs/chain/conftest.py delete mode 100644 tests/lean_spec/subspecs/networking/conftest.py diff --git a/tests/lean_spec/helpers/builders.py b/tests/lean_spec/helpers/builders.py index ef237e05..d2a9b464 100644 --- a/tests/lean_spec/helpers/builders.py +++ b/tests/lean_spec/helpers/builders.py @@ -61,7 +61,7 @@ ValidatorIndices, ) -from .mocks import MockForkchoiceStore, MockNetworkRequester +from .mocks import MockForkchoiceStore, MockNetworkRequester, StoreInterceptingSpec def make_bytes32(seed: int) -> Bytes32: @@ -520,7 +520,7 @@ def create_mock_sync_service( block_cache=BlockCache(), clock=SlotClock(genesis_time=Uint64(0), time_fn=lambda: 1000.0), network=MockNetworkRequester(), - spec=LstarSpec(), + spec=StoreInterceptingSpec(), database=database, genesis_start=genesis_start, process_block=processor, diff --git a/tests/lean_spec/helpers/mocks.py b/tests/lean_spec/helpers/mocks.py index e17039b8..95bbd2aa 100644 --- a/tests/lean_spec/helpers/mocks.py +++ b/tests/lean_spec/helpers/mocks.py @@ -10,16 +10,48 @@ from contextlib import contextmanager from dataclasses import dataclass, field from types import MappingProxyType +from typing import Any from lean_spec.forks.lstar.containers import SignedBlock from lean_spec.forks.lstar.containers.attestation import SignedAttestation from lean_spec.forks.lstar.containers.attestation.attestation import SignedAggregatedAttestation +from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.networking import PeerId from lean_spec.subspecs.networking.service.events import NetworkEvent from lean_spec.subspecs.ssz.hash import hash_tree_root from lean_spec.types import Bytes32, Slot, Uint64 +class StoreInterceptingSpec(LstarSpec): + """Spec stub that forwards consensus calls to the store argument.""" + + def on_block( # type: ignore[override] + self, store: Any, signed_block: Any, *args: Any, **kwargs: Any + ) -> Any: + """Forward to store.on_block.""" + kwargs.pop("scheme", None) + return store.on_block(signed_block, *args, **kwargs) + + def on_gossip_attestation( # type: ignore[override] + self, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + ) -> Any: + """Forward to store.on_gossip_attestation.""" + kwargs.pop("scheme", None) + return store.on_gossip_attestation(signed_attestation, *args, **kwargs) + + def on_gossip_aggregated_attestation( # type: ignore[override] + self, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any + ) -> Any: + """Forward to store.on_gossip_aggregated_attestation.""" + return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) + + def tick_interval( # type: ignore[override] + self, store: Any, has_proposal: bool, is_aggregator: bool = False + ) -> Any: + """Forward to store.tick_interval.""" + return store.tick_interval(has_proposal, is_aggregator) + + class MockNetworkRequester: """Mock network that returns pre-configured blocks and tracks requests.""" diff --git a/tests/lean_spec/subspecs/chain/conftest.py b/tests/lean_spec/subspecs/chain/conftest.py deleted file mode 100644 index 57c9dfda..00000000 --- a/tests/lean_spec/subspecs/chain/conftest.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Shared fixtures for chain service tests.""" - -from __future__ import annotations - -from typing import Any - -import pytest - -from lean_spec.forks.lstar.spec import LstarSpec - - -@pytest.fixture(autouse=True) -def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: - """Route chain-service spec calls back to the mock store's matching method. - - Chain tests run against `MockStore`, which records calls on its own - `tick_interval`. The real spec implementation expects a fully-formed - Pydantic Store. Routing the spec call back lets the mock intercept in-place. - """ - - def tick_interval( - self: LstarSpec, store: Any, has_proposal: bool, is_aggregator: bool = False - ) -> Any: - return store.tick_interval(has_proposal, is_aggregator) - - monkeypatch.setattr(LstarSpec, "tick_interval", tick_interval) diff --git a/tests/lean_spec/subspecs/chain/test_service.py b/tests/lean_spec/subspecs/chain/test_service.py index 0c2c48d7..6b6e4b8a 100644 --- a/tests/lean_spec/subspecs/chain/test_service.py +++ b/tests/lean_spec/subspecs/chain/test_service.py @@ -5,10 +5,12 @@ from dataclasses import dataclass, field from unittest.mock import patch +from lean_spec.forks.lstar.containers.attestation.attestation import SignedAggregatedAttestation from lean_spec.subspecs.chain import SlotClock from lean_spec.subspecs.chain.config import MILLISECONDS_PER_INTERVAL from lean_spec.subspecs.chain.service import ChainService from lean_spec.types import ZERO_HASH, Bytes32, Slot, Uint64 +from tests.lean_spec.helpers.mocks import StoreInterceptingSpec @dataclass @@ -58,7 +60,7 @@ class MockSyncService: is_aggregator: bool = False published_aggregations: list = field(default_factory=list) - async def publish_aggregated_attestation(self, agg: object) -> None: + async def publish_aggregated_attestation(self, agg: SignedAggregatedAttestation) -> None: """Record published aggregations.""" self.published_aggregations.append(agg) @@ -74,8 +76,11 @@ def test_starts_not_running(self) -> None: """ sync_service = MockSyncService() clock = SlotClock(genesis_time=Uint64(0), time_fn=lambda: 0.0) - # MockSyncService satisfies SyncService interface for testing. - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) assert chain_service.is_running is False @@ -87,7 +92,11 @@ def test_stop_sets_flag(self) -> None: """ sync_service = MockSyncService() clock = SlotClock(genesis_time=Uint64(0), time_fn=lambda: 0.0) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) chain_service._running = True assert chain_service.is_running is True @@ -103,7 +112,11 @@ async def test_run_sets_running_flag(self) -> None: """ sync_service = MockSyncService() clock = SlotClock(genesis_time=Uint64(0), time_fn=lambda: 0.0) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) call_count = 0 @@ -135,7 +148,11 @@ async def test_sleep_calculation_mid_interval(self) -> None: current_time = float(genesis) + interval_secs / 2 clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) captured_duration: float | None = None @@ -162,7 +179,11 @@ async def test_sleep_at_interval_boundary(self) -> None: current_time = float(genesis + (MILLISECONDS_PER_INTERVAL // Uint64(1000))) clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) captured_duration: float | None = None @@ -188,7 +209,11 @@ async def test_sleep_before_genesis(self) -> None: current_time = 900.0 # 100 seconds before genesis clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) captured_duration: float | None = None @@ -222,7 +247,11 @@ async def test_ticks_store_with_current_interval(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) call_count = 0 @@ -252,7 +281,11 @@ async def test_has_proposal_always_false(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) tick_count = 0 @@ -281,7 +314,11 @@ async def test_sync_service_store_updated(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) initial_store = MockStore() sync_service = MockSyncService(store=initial_store) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) # No ticks before our first run. assert sync_service.store.tick_calls == [] @@ -327,7 +364,11 @@ def advancing_time() -> float: clock = SlotClock(genesis_time=genesis, time_fn=advancing_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) async def advance_and_stop(_duration: float) -> None: nonlocal time_index @@ -364,7 +405,11 @@ async def test_initial_tick_skipped_before_genesis(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) initial_store = MockStore() sync_service = MockSyncService(store=initial_store) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) # Run just the initial tick without the full run loop. await chain_service._initial_tick() @@ -387,7 +432,11 @@ async def test_initial_tick_executed_after_genesis(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) initial_store = MockStore() sync_service = MockSyncService(store=initial_store) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) await chain_service._initial_tick() @@ -408,7 +457,11 @@ async def test_initial_tick_at_exact_genesis(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) initial_store = MockStore() sync_service = MockSyncService(store=initial_store) - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) await chain_service._initial_tick() @@ -430,7 +483,11 @@ async def test_initial_tick_skips_stale_intervals(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) await chain_service._initial_tick() @@ -460,7 +517,11 @@ async def test_does_not_reprocess_same_interval(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) sleep_call_count = 0 @@ -494,7 +555,11 @@ async def test_genesis_time_zero(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) async def stop_immediately(_duration: float) -> None: chain_service.stop() @@ -519,7 +584,11 @@ async def test_large_genesis_time(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) async def stop_immediately(_duration: float) -> None: chain_service.stop() @@ -544,7 +613,11 @@ async def test_stop_during_sleep(self) -> None: clock = SlotClock(genesis_time=genesis, time_fn=lambda: current_time) sync_service = MockSyncService() - chain_service = ChainService(sync_service=sync_service, clock=clock) # type: ignore[arg-type] + chain_service = ChainService( + sync_service=sync_service, # type: ignore[arg-type] + clock=clock, + spec=StoreInterceptingSpec(), + ) async def stop_during_sleep(_duration: float) -> None: # Simulate stop being called while sleeping. diff --git a/tests/lean_spec/subspecs/networking/conftest.py b/tests/lean_spec/subspecs/networking/conftest.py deleted file mode 100644 index cd8bb05b..00000000 --- a/tests/lean_spec/subspecs/networking/conftest.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Shared fixtures for networking subspec tests.""" - -from __future__ import annotations - -from typing import Any - -import pytest - -from lean_spec.forks.lstar.spec import LstarSpec - - -@pytest.fixture(autouse=True) -def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: - """Route sync-service spec calls back to the mock store's matching method. - - Networking tests drive `SyncService` against `MockForkchoiceStore` for - isolation. The real spec implementation expects a fully-formed Pydantic - Store; routing each spec call back to `store.method(...)` lets the mock - intercept in-place without changing service code. - """ - - def on_block(self: LstarSpec, store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: - return store.on_block(signed_block, *args, **kwargs) - - def on_gossip_attestation( - self: LstarSpec, - store: Any, - signed_attestation: Any, - *args: Any, - **kwargs: Any, - ) -> Any: - kwargs.pop("scheme", None) - return store.on_gossip_attestation(signed_attestation, *args, **kwargs) - - def on_gossip_aggregated_attestation( - self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any - ) -> Any: - return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) - - monkeypatch.setattr(LstarSpec, "on_block", on_block) - monkeypatch.setattr(LstarSpec, "on_gossip_attestation", on_gossip_attestation) - monkeypatch.setattr( - LstarSpec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation - ) diff --git a/tests/lean_spec/subspecs/sync/conftest.py b/tests/lean_spec/subspecs/sync/conftest.py index d6a841a1..439694ee 100644 --- a/tests/lean_spec/subspecs/sync/conftest.py +++ b/tests/lean_spec/subspecs/sync/conftest.py @@ -6,50 +6,12 @@ from __future__ import annotations -from typing import Any - import pytest -from lean_spec.forks.lstar.spec import LstarSpec from lean_spec.subspecs.networking.reqresp.message import Status from lean_spec.types import Bytes32, Checkpoint, Slot -@pytest.fixture(autouse=True) -def _delegate_spec_to_store(monkeypatch: pytest.MonkeyPatch) -> None: - """Route sync-service spec calls back to the mock store's matching method. - - Sync tests run against `MockForkchoiceStore`, which records calls on its own - methods. The real spec implementation expects a fully-formed Pydantic Store. - Routing each spec call back to `store.method(...)` lets the mock intercept - in-place without a sync-service code change. - """ - - def on_block(self: LstarSpec, store: Any, signed_block: Any, *args: Any, **kwargs: Any) -> Any: - return store.on_block(signed_block, *args, **kwargs) - - def on_gossip_attestation( - self: LstarSpec, - store: Any, - signed_attestation: Any, - *args: Any, - **kwargs: Any, - ) -> Any: - kwargs.pop("scheme", None) - return store.on_gossip_attestation(signed_attestation, *args, **kwargs) - - def on_gossip_aggregated_attestation( - self: LstarSpec, store: Any, signed_attestation: Any, *args: Any, **kwargs: Any - ) -> Any: - return store.on_gossip_aggregated_attestation(signed_attestation, *args, **kwargs) - - monkeypatch.setattr(LstarSpec, "on_block", on_block) - monkeypatch.setattr(LstarSpec, "on_gossip_attestation", on_gossip_attestation) - monkeypatch.setattr( - LstarSpec, "on_gossip_aggregated_attestation", on_gossip_aggregated_attestation - ) - - @pytest.fixture def sample_checkpoint() -> Checkpoint: """Sample checkpoint for sync tests.""" From 11e21ceec5853c0999b6459ff9959d69acf1d70d Mon Sep 17 00:00:00 2001 From: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Date: Sun, 3 May 2026 23:31:34 +0200 Subject: [PATCH 9/9] fix(tests): patch validator spec on the instance, not the dropped module Two validator tests still resolved `lean_spec.subspecs.validator.service._SPEC`, which was removed when the spec moved onto the service as a field. Patching now targets `service.spec` directly via `patch.object`, which also exercises the single instance the test actually calls into. Co-Authored-By: Claude Opus 4.7 (1M context) --- tests/lean_spec/subspecs/validator/test_service.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/lean_spec/subspecs/validator/test_service.py b/tests/lean_spec/subspecs/validator/test_service.py index e5403576..0d0a4ebf 100644 --- a/tests/lean_spec/subspecs/validator/test_service.py +++ b/tests/lean_spec/subspecs/validator/test_service.py @@ -496,8 +496,9 @@ async def test_assertion_error_is_logged_and_skipped( on_block=lambda b: blocks.append(b), # type: ignore[arg-type, return-value] ) - with patch( - "lean_spec.subspecs.validator.service._SPEC.produce_block_with_signatures", + with patch.object( + service.spec, + "produce_block_with_signatures", side_effect=AssertionError("mismatch"), ): # Slot 0: proposer is validator 0 (0 % 8 = 0), which is in the registry. @@ -678,8 +679,9 @@ async def capture_att(att: SignedAttestation) -> None: with ( caplog.at_level("DEBUG"), - patch( - "lean_spec.subspecs.validator.service._SPEC.on_gossip_attestation", + patch.object( + service.spec, + "on_gossip_attestation", side_effect=RuntimeError("store error"), ), ):