Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
5 changes: 4 additions & 1 deletion .github/scripts/generate_eip_report.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
"""Generate a markdown report of outdated EIP references from the EIP version checker output."""
"""
Generate a markdown report of outdated EIP references from the EIP version
checker output.
"""

import os
import re
Expand Down
2 changes: 1 addition & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[python]": {
"editor.rulers": [100],
"editor.rulers": [79, 100],
"editor.formatOnSave": true,
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.codeActionsOnSave": {
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ line-length = 99
[tool.ruff.lint]
select = ["E", "F", "B", "W", "I", "A", "N", "D", "C", "ARG001"]
fixable = ["I", "B", "E", "F", "W", "D", "C"]
ignore = ["D205", "D203", "D212", "D415", "C420", "C901"]
ignore = ["D200", "D205", "D203", "D212", "D415", "C420", "C901"]

[tool.ruff.lint.per-file-ignores]
"tests/*" = ["ARG001"] # TODO: ethereum/execution-spec-tests#2188
Expand Down
13 changes: 8 additions & 5 deletions src/cli/check_fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,15 @@ def check_json(json_file_path: Path):
"""
Check all fixtures in the specified json file:
1. Load the json file into a pydantic model. This checks there are no
Validation errors when loading fixtures into EEST models.
Validation errors when loading fixtures into EEST models.
2. Serialize the loaded pydantic model to "json" (actually python data
structures, ready to written as json).
structures, ready to written as json).
3. Load the serialized data back into a pydantic model (to get an updated
hash) from step 2.
hash) from step 2.
4. Compare hashes:
a. Compare the newly calculated hashes from step 2. and 3. and
b. If present, compare info["hash"] with the calculated hash from step 2.
b. If present, compare info["hash"] with the calculated hash from
step 2.
"""
fixtures: Fixtures = Fixtures.model_validate_json(json_file_path.read_text())
fixtures_json = to_json(fixtures)
Expand Down Expand Up @@ -86,7 +87,9 @@ def check_json(json_file_path: Path):
help="Stop and raise any exceptions encountered while checking fixtures.",
)
def check_fixtures(input_str: str, quiet_mode: bool, stop_on_error: bool):
"""Perform some checks on the fixtures contained in the specified directory."""
"""
Perform some checks on the fixtures contained in the specified directory.
"""
input_path = Path(input_str)
success = True
file_count = 0
Expand Down
14 changes: 8 additions & 6 deletions src/cli/compare_fixtures.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
"""
Compare two fixture folders and remove duplicates based on fixture hashes.

This tool reads the .meta/index.json files from two fixture directories and identifies
fixtures with identical hashes on a test case basis, then removes the duplicates from
both of the folders. Used within the coverage workflow.
This tool reads the .meta/index.json files from two fixture directories and
identifies fixtures with identical hashes on a test case basis, then removes
the duplicates from both of the folders. Used within the coverage workflow.
"""

import json
Expand Down Expand Up @@ -95,8 +95,8 @@ def batch_remove_fixtures_from_files(removals_by_file):

def rewrite_index(folder: Path, index: IndexFile, dry_run: bool):
"""
Rewrite the index to the correct index file, or if the test count was reduced to zero,
the entire directory is deleted.
Rewrite the index to the correct index file, or if the test count was
reduced to zero, the entire directory is deleted.
"""
if len(index.test_cases) > 0:
# Just rewrite the index
Expand Down Expand Up @@ -130,7 +130,9 @@ def main(
dry_run: bool,
abort_on_empty_patch: bool,
):
"""Compare two fixture folders and remove duplicates based on fixture hashes."""
"""
Compare two fixture folders and remove duplicates based on fixture hashes.
"""
try:
# Load indices
base_index = load_index(base)
Expand Down
13 changes: 9 additions & 4 deletions src/cli/eest/commands/clean.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,23 @@
def clean(all_files: bool, dry_run: bool, verbose: bool):
"""
Remove all generated files and directories from the repository.
If `--all` is specified, the virtual environment and .tox directory will also be removed.

If `--all` is specified, the virtual environment and .tox directory will
also be removed.

Args:
all_files (bool): Remove the virtual environment and .tox directory as well.
all_files (bool): Remove the virtual environment and .tox directory
as well.

dry_run (bool): Simulate the cleanup without removing files.

verbose (bool): Show verbose output.

Note: The virtual environment and .tox directory are not removed by default.
Note: The virtual environment and .tox directory are not removed by
default.

Example: Cleaning all generated files and directories and show the deleted items.
Example: Cleaning all generated files and directories and show the deleted
items.

uv run eest clean --all -v

Expand Down
13 changes: 8 additions & 5 deletions src/cli/eest/make/cli.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
"""
The `make` CLI streamlines the process of scaffolding tasks, such as generating new test files,
enabling developers to concentrate on the core aspects of specification testing.
The `make` CLI streamlines the process of scaffolding tasks, such as generating
new test files, enabling developers to concentrate on the core aspects of
specification testing.


The module calls the appropriate function for the subcommand. If an invalid subcommand
is chosen, it throws an error and shows a list of valid subcommands. If no subcommand
is present, it shows a list of valid subcommands to choose from.

The module calls the appropriate function for the subcommand. If an invalid
subcommand is chosen, it throws an error and shows a list of valid subcommands.
If no subcommand is present, it shows a list of valid subcommands to choose
from.
"""

import click
Expand Down
7 changes: 4 additions & 3 deletions src/cli/eest/make/commands/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""
Holds subcommands for the make command. New subcommands must be created as
modules and exported from this package, then registered under the make command in
`cli.py`.
Holds subcommands for the make command.

New subcommands must be created as modules and exported from this package,
then registered under the make command in `cli.py`.
"""

from .env import create_default_env
Expand Down
16 changes: 9 additions & 7 deletions src/cli/eest/make/commands/test.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
"""
Provides a CLI command to scaffold a test file.

The `test` command guides the user through a series of prompts to generate a test file
based on the selected test type, fork, EIP number, and EIP name. The generated test file
is saved in the appropriate directory with a rendered template using Jinja2.
The `test` command guides the user through a series of prompts to generate a
test file based on the selected test type, fork, EIP number, and EIP name. The
generated test file is saved in the appropriate directory with a rendered
template using Jinja2.
"""

import os
Expand Down Expand Up @@ -38,10 +39,11 @@ def test():
"""
Generate a new test file for an EIP.

This function guides the user through a series of prompts to generate a test file
for Ethereum execution specifications. The user is prompted to select the type of test,
the fork to use, and to provide the EIP number and name. Based on the inputs, a test file
is created in the appropriate directory with a rendered template.
This function guides the user through a series of prompts to generate a
test file for Ethereum execution specifications. The user is prompted to
select the type of test, the fork to use, and to provide the EIP number and
name. Based on the inputs, a test file is created in the appropriate
directory with a rendered template.

Example:
uv run eest make test
Expand Down
4 changes: 3 additions & 1 deletion src/cli/eest/quotes.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,5 +51,7 @@ def box_quote(quote):


def get_quote():
"""Return random inspirational quote related to system design formatted in a box."""
"""
Return random inspirational quote formatted in a box.
"""
return box_quote(random.choice(make_something_great))
38 changes: 21 additions & 17 deletions src/cli/eofwrap.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""
Generate a JSON blockchain test from an existing JSON blockchain test by wrapping its pre-state
code in EOF wherever possible.
Generate a JSON blockchain test from an existing JSON blockchain test by
wrapping its pre-state code in EOF wherever possible.

Example Usage:

Expand Down Expand Up @@ -44,8 +44,8 @@
@click.option("--traces", is_flag=True, type=bool)
def eof_wrap(input_path: str, output_dir: str, traces: bool):
"""
Wrap JSON blockchain test file(s) found at `input_path` and
outputs them to the `output_dir`.
Wrap JSON blockchain test file(s) found at `input_path` and outputs them to
the `output_dir`.
"""
eof_wrapper = EofWrapper()

Expand Down Expand Up @@ -116,7 +116,9 @@ class EofWrapper:
GENERATION_ERRORS = "generation_errors"

def __init__(self):
"""Initialize the EofWrapper with metrics tracking and a unique EOF set."""
"""
Initialize the EofWrapper with metrics tracking and a unique EOF set.
"""
self.metrics = {
self.FILES_GENERATED: 0,
self.FILES_SKIPPED: 0,
Expand All @@ -135,7 +137,8 @@ def __init__(self):

file_skip_list = [
"Pyspecs",
# EXTCODE* opcodes return different results for EOF targets and that is tested elsewhere
# EXTCODE* opcodes return different results for EOF targets and that is
# tested elsewhere
"stExtCodeHash",
# bigint syntax
"ValueOverflowParis",
Expand Down Expand Up @@ -168,10 +171,11 @@ def __init__(self):

def wrap_file(self, in_path: str, out_path: str, traces: bool):
"""
Wrap code from a blockchain test JSON file from `in_path` into EOF containers,
wherever possible. If not possible - skips and tracks that in metrics. Possible means
at least one account's code can be wrapped in a valid EOF container and the assertions
on post state are satisfied.
Wrap code from a blockchain test JSON file from `in_path` into EOF
containers, wherever possible. If not possible - skips and tracks that
in metrics. Possible means at least one account's code can be wrapped
in a valid EOF container and the assertions on post state are
satisfied.
"""
for skip in self.file_skip_list:
if skip in in_path:
Expand Down Expand Up @@ -301,9 +305,9 @@ def _wrap_fixture(self, fixture: BlockchainFixture, traces: bool):

test.blocks.append(block)
elif isinstance(fixture_block, InvalidFixtureBlock):
# Skip - invalid blocks are not supported. Reason: FixtureTransaction doesn't
# support expected exception. But we can continue and test the remaining
# blocks.
# Skip - invalid blocks are not supported. Reason:
# FixtureTransaction doesn't support expected exception. But we
# can continue and test the remaining blocks.
self.metrics[self.INVALID_BLOCKS_SKIPPED] += 1
else:
raise TypeError("not a FixtureBlock")
Expand Down Expand Up @@ -331,13 +335,13 @@ def _validate_eof(self, container: Container, metrics: bool = True) -> bool:
return True


# `no_type_check` required because OpcodeWithOperand.opcode can be `None` when formatting as a
# string, but here it can never be `None`.
# `no_type_check` required because OpcodeWithOperand.opcode can be `None` when
# formatting as a string, but here it can never be `None`.
@no_type_check
def wrap_code(account_code: Bytes) -> Container:
"""
Wrap `account_code` into a simplest EOF container, applying some simple heuristics in
order to obtain a valid code section termination.
Wrap `account_code` into a simplest EOF container, applying some simple
heuristics in order to obtain a valid code section termination.
"""
assert len(account_code) > 0

Expand Down
14 changes: 9 additions & 5 deletions src/cli/evm_bytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ def terminating(self) -> bool:
@property
def bytecode(self) -> Bytecode:
"""Opcode as bytecode with its operands if any."""
# opcode.opcode[*opcode.operands] crashes `black` formatter and doesn't work.
# opcode.opcode[*opcode.operands] crashes `black` formatter and doesn't
# work.
if self.opcode:
return self.opcode.__getitem__(*self.operands) if self.operands else self.opcode
else:
Expand Down Expand Up @@ -181,7 +182,8 @@ def hex_string(hex_string: str, assembly: bool):

Output 1:
\b
Op.PUSH1[0x42] + Op.PUSH1[0x0] + Op.MSTORE + Op.PUSH1[0x20] + Op.PUSH1[0x0] + Op.RETURN
Op.PUSH1[0x42] + Op.PUSH1[0x0] + Op.MSTORE + Op.PUSH1[0x20] +
Op.PUSH1[0x0] + Op.RETURN

Example 2: Convert a hex string to assembly
uv run evm_bytes hex-string --assembly 604260005260206000F3
Expand All @@ -207,14 +209,16 @@ def binary_file(binary_file, assembly: bool):
"""
Convert the BINARY_FILE containing EVM bytes to Python Opcodes or assembly.

BINARY_FILE is a binary file containing EVM bytes, use `-` to read from stdin.
BINARY_FILE is a binary file containing EVM bytes, use `-` to read from
stdin.

Returns:
(str): The processed EVM opcodes in Python or assembly format.

Example: Convert the Withdrawal Request contract to assembly
\b
uv run evm_bytes binary-file ./src/ethereum_test_forks/forks/contracts/withdrawal_request.bin --assembly
uv run evm_bytes binary-file ./src/ethereum_test_forks/forks/
contracts/withdrawal_request.bin --assembly

Output:
\b
Expand All @@ -225,6 +229,6 @@ def binary_file(binary_file, assembly: bool):
jumpi
...

""" # noqa: E501,D301
""" # noqa: D301
processed_output = format_opcodes(process_evm_bytes(binary_file.read()), assembly=assembly)
click.echo(processed_output)
16 changes: 10 additions & 6 deletions src/cli/extract_config.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#!/usr/bin/env python
"""
CLI tool to extract client configuration files (chainspec/genesis.json) from Ethereum clients.
CLI tool to extract client configuration files (chainspec/genesis.json) from
Ethereum clients.

This tool spawns an Ethereum client using Hive and extracts the generated configuration
files such as /chainspec/test.json, /configs/test.cfg, or /genesis.json from the Docker container.
This tool spawns an Ethereum client using Hive and extracts the generated
configuration files such as /chainspec/test.json, /configs/test.cfg, or
/genesis.json from the Docker container.
"""

import io
Expand Down Expand Up @@ -119,7 +121,9 @@ def create_genesis_from_fixture(fixture_path: Path) -> Tuple[FixtureHeader, Allo


def get_client_environment_for_fixture(fork: Fork, chain_id: int) -> dict:
"""Get the environment variables for starting a client with the given fixture."""
"""
Get the environment variables for starting a client with the given fixture.
"""
if fork not in ruleset:
raise ValueError(f"Fork '{fork}' not found in hive ruleset")

Expand Down Expand Up @@ -176,8 +180,8 @@ def extract_config(
Extract client configuration files from Ethereum clients.

This tool spawns an Ethereum client using Hive and extracts the generated
configuration files such as /chainspec/test.json, /configs/test.cfg, or /genesis.json
from the Docker container.
configuration files such as /chainspec/test.json, /configs/test.cfg, or
/genesis.json from the Docker container.
"""
if not fixture:
raise click.UsageError("No fixture provided, use --fixture to specify a fixture")
Expand Down
3 changes: 2 additions & 1 deletion src/cli/fillerconvert/fillerconvert.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,5 +44,6 @@ def main() -> None:
# or file.endswith("vmPerformance/performanceTesterFiller.yml")
# or file.endswith("vmPerformance/loopExpFiller.yml")
# or file.endswith("vmPerformance/loopMulFiller.yml")
# or file.endswith("stRevertTest/RevertRemoteSubCallStorageOOGFiller.yml")
# or
# file.endswith("stRevertTest/RevertRemoteSubCallStorageOOGFiller.yml")
# or file.endswith("stSolidityTest/SelfDestructFiller.yml")
10 changes: 5 additions & 5 deletions src/cli/fillerconvert/verify_filled.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ class FilledStateTest(RootModel[dict[str, StateTest]]):

def verify_refilled(refilled: Path, original: Path) -> int:
"""
Verify post hash of the refilled test against original:
Regex the original d,g,v from the refilled test name.
Find the post record for this d,g,v and the fork of refilled test.
Compare the post hash.
Verify post hash of the refilled test against original: Regex the original
d,g,v from the refilled test name. Find the post record for this d,g,v and
the fork of refilled test. Compare the post hash.
"""
verified_vectors = 0
json_str = refilled.read_text(encoding="utf-8")
Expand All @@ -46,7 +45,8 @@ def verify_refilled(refilled: Path, original: Path) -> int:
json_str = original.read_text(encoding="utf-8")
original_test_wrapper = FilledStateTest.model_validate_json(json_str)

# Each original test has only 1 test with many posts for each fork and many txs
# Each original test has only 1 test with many posts for each fork and many
# txs
original_test_name, test_original = list(original_test_wrapper.root.items())[0]

for refilled_test_name, refilled_test in refilled_test_wrapper.root.items():
Expand Down
Loading
Loading