diff --git a/.github/scripts/generate_eip_report.py b/.github/scripts/generate_eip_report.py
index 66ff8534c53..8cb3ab075e7 100644
--- a/.github/scripts/generate_eip_report.py
+++ b/.github/scripts/generate_eip_report.py
@@ -1,4 +1,7 @@
-"""Generate a markdown report of outdated EIP references from the EIP version checker output."""
+"""
+Generate a markdown report of outdated EIP references from the EIP version
+checker output.
+"""
import os
import re
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 0965b9249c2..a5f6cc1db9f 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -14,7 +14,7 @@
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[python]": {
- "editor.rulers": [100],
+ "editor.rulers": [79, 100],
"editor.formatOnSave": true,
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.codeActionsOnSave": {
diff --git a/pyproject.toml b/pyproject.toml
index d85a73efbee..3c861811aa1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -129,7 +129,7 @@ line-length = 99
[tool.ruff.lint]
select = ["E", "F", "B", "W", "I", "A", "N", "D", "C", "ARG001"]
fixable = ["I", "B", "E", "F", "W", "D", "C"]
-ignore = ["D205", "D203", "D212", "D415", "C420", "C901"]
+ignore = ["D200", "D205", "D203", "D212", "D415", "C420", "C901"]
[tool.ruff.lint.per-file-ignores]
"tests/*" = ["ARG001"] # TODO: ethereum/execution-spec-tests#2188
diff --git a/src/cli/check_fixtures.py b/src/cli/check_fixtures.py
index 6660bceab77..1497d5b2810 100644
--- a/src/cli/check_fixtures.py
+++ b/src/cli/check_fixtures.py
@@ -27,14 +27,15 @@ def check_json(json_file_path: Path):
"""
Check all fixtures in the specified json file:
1. Load the json file into a pydantic model. This checks there are no
- Validation errors when loading fixtures into EEST models.
+ Validation errors when loading fixtures into EEST models.
2. Serialize the loaded pydantic model to "json" (actually python data
- structures, ready to written as json).
+ structures, ready to written as json).
3. Load the serialized data back into a pydantic model (to get an updated
- hash) from step 2.
+ hash) from step 2.
4. Compare hashes:
a. Compare the newly calculated hashes from step 2. and 3. and
- b. If present, compare info["hash"] with the calculated hash from step 2.
+ b. If present, compare info["hash"] with the calculated hash from
+ step 2.
"""
fixtures: Fixtures = Fixtures.model_validate_json(json_file_path.read_text())
fixtures_json = to_json(fixtures)
@@ -86,7 +87,9 @@ def check_json(json_file_path: Path):
help="Stop and raise any exceptions encountered while checking fixtures.",
)
def check_fixtures(input_str: str, quiet_mode: bool, stop_on_error: bool):
- """Perform some checks on the fixtures contained in the specified directory."""
+ """
+ Perform some checks on the fixtures contained in the specified directory.
+ """
input_path = Path(input_str)
success = True
file_count = 0
diff --git a/src/cli/compare_fixtures.py b/src/cli/compare_fixtures.py
index 787e0f9f077..bd7a5b9673e 100644
--- a/src/cli/compare_fixtures.py
+++ b/src/cli/compare_fixtures.py
@@ -1,9 +1,9 @@
"""
Compare two fixture folders and remove duplicates based on fixture hashes.
-This tool reads the .meta/index.json files from two fixture directories and identifies
-fixtures with identical hashes on a test case basis, then removes the duplicates from
-both of the folders. Used within the coverage workflow.
+This tool reads the .meta/index.json files from two fixture directories and
+identifies fixtures with identical hashes on a test case basis, then removes
+the duplicates from both of the folders. Used within the coverage workflow.
"""
import json
@@ -95,8 +95,8 @@ def batch_remove_fixtures_from_files(removals_by_file):
def rewrite_index(folder: Path, index: IndexFile, dry_run: bool):
"""
- Rewrite the index to the correct index file, or if the test count was reduced to zero,
- the entire directory is deleted.
+ Rewrite the index to the correct index file, or if the test count was
+ reduced to zero, the entire directory is deleted.
"""
if len(index.test_cases) > 0:
# Just rewrite the index
@@ -130,7 +130,9 @@ def main(
dry_run: bool,
abort_on_empty_patch: bool,
):
- """Compare two fixture folders and remove duplicates based on fixture hashes."""
+ """
+ Compare two fixture folders and remove duplicates based on fixture hashes.
+ """
try:
# Load indices
base_index = load_index(base)
diff --git a/src/cli/eest/commands/clean.py b/src/cli/eest/commands/clean.py
index a0a65ab743a..58f7b93dacc 100644
--- a/src/cli/eest/commands/clean.py
+++ b/src/cli/eest/commands/clean.py
@@ -19,18 +19,23 @@
def clean(all_files: bool, dry_run: bool, verbose: bool):
"""
Remove all generated files and directories from the repository.
- If `--all` is specified, the virtual environment and .tox directory will also be removed.
+
+ If `--all` is specified, the virtual environment and .tox directory will
+ also be removed.
Args:
- all_files (bool): Remove the virtual environment and .tox directory as well.
+ all_files (bool): Remove the virtual environment and .tox directory
+ as well.
dry_run (bool): Simulate the cleanup without removing files.
verbose (bool): Show verbose output.
- Note: The virtual environment and .tox directory are not removed by default.
+ Note: The virtual environment and .tox directory are not removed by
+ default.
- Example: Cleaning all generated files and directories and show the deleted items.
+ Example: Cleaning all generated files and directories and show the deleted
+ items.
uv run eest clean --all -v
diff --git a/src/cli/eest/make/cli.py b/src/cli/eest/make/cli.py
index 413050d7a70..ae0c75ba8a7 100644
--- a/src/cli/eest/make/cli.py
+++ b/src/cli/eest/make/cli.py
@@ -1,11 +1,14 @@
"""
-The `make` CLI streamlines the process of scaffolding tasks, such as generating new test files,
-enabling developers to concentrate on the core aspects of specification testing.
+The `make` CLI streamlines the process of scaffolding tasks, such as generating
+new test files, enabling developers to concentrate on the core aspects of
+specification testing.
-The module calls the appropriate function for the subcommand. If an invalid subcommand
-is chosen, it throws an error and shows a list of valid subcommands. If no subcommand
-is present, it shows a list of valid subcommands to choose from.
+
+The module calls the appropriate function for the subcommand. If an invalid
+subcommand is chosen, it throws an error and shows a list of valid subcommands.
+If no subcommand is present, it shows a list of valid subcommands to choose
+from.
"""
import click
diff --git a/src/cli/eest/make/commands/__init__.py b/src/cli/eest/make/commands/__init__.py
index 706333ca042..64234b30009 100644
--- a/src/cli/eest/make/commands/__init__.py
+++ b/src/cli/eest/make/commands/__init__.py
@@ -1,7 +1,8 @@
"""
-Holds subcommands for the make command. New subcommands must be created as
-modules and exported from this package, then registered under the make command in
-`cli.py`.
+Holds subcommands for the make command.
+
+New subcommands must be created as modules and exported from this package,
+then registered under the make command in `cli.py`.
"""
from .env import create_default_env
diff --git a/src/cli/eest/make/commands/test.py b/src/cli/eest/make/commands/test.py
index 0eb6e49141e..2d30ee195bb 100644
--- a/src/cli/eest/make/commands/test.py
+++ b/src/cli/eest/make/commands/test.py
@@ -1,9 +1,10 @@
"""
Provides a CLI command to scaffold a test file.
-The `test` command guides the user through a series of prompts to generate a test file
-based on the selected test type, fork, EIP number, and EIP name. The generated test file
-is saved in the appropriate directory with a rendered template using Jinja2.
+The `test` command guides the user through a series of prompts to generate a
+test file based on the selected test type, fork, EIP number, and EIP name. The
+generated test file is saved in the appropriate directory with a rendered
+template using Jinja2.
"""
import os
@@ -38,10 +39,11 @@ def test():
"""
Generate a new test file for an EIP.
- This function guides the user through a series of prompts to generate a test file
- for Ethereum execution specifications. The user is prompted to select the type of test,
- the fork to use, and to provide the EIP number and name. Based on the inputs, a test file
- is created in the appropriate directory with a rendered template.
+ This function guides the user through a series of prompts to generate a
+ test file for Ethereum execution specifications. The user is prompted to
+ select the type of test, the fork to use, and to provide the EIP number and
+ name. Based on the inputs, a test file is created in the appropriate
+ directory with a rendered template.
Example:
uv run eest make test
diff --git a/src/cli/eest/quotes.py b/src/cli/eest/quotes.py
index dfd8353072d..e097a31b9f8 100644
--- a/src/cli/eest/quotes.py
+++ b/src/cli/eest/quotes.py
@@ -51,5 +51,7 @@ def box_quote(quote):
def get_quote():
- """Return random inspirational quote related to system design formatted in a box."""
+ """
+ Return random inspirational quote formatted in a box.
+ """
return box_quote(random.choice(make_something_great))
diff --git a/src/cli/eofwrap.py b/src/cli/eofwrap.py
index c23a695a43d..9d8382a74c9 100644
--- a/src/cli/eofwrap.py
+++ b/src/cli/eofwrap.py
@@ -1,6 +1,6 @@
"""
-Generate a JSON blockchain test from an existing JSON blockchain test by wrapping its pre-state
-code in EOF wherever possible.
+Generate a JSON blockchain test from an existing JSON blockchain test by
+wrapping its pre-state code in EOF wherever possible.
Example Usage:
@@ -44,8 +44,8 @@
@click.option("--traces", is_flag=True, type=bool)
def eof_wrap(input_path: str, output_dir: str, traces: bool):
"""
- Wrap JSON blockchain test file(s) found at `input_path` and
- outputs them to the `output_dir`.
+ Wrap JSON blockchain test file(s) found at `input_path` and outputs them to
+ the `output_dir`.
"""
eof_wrapper = EofWrapper()
@@ -116,7 +116,9 @@ class EofWrapper:
GENERATION_ERRORS = "generation_errors"
def __init__(self):
- """Initialize the EofWrapper with metrics tracking and a unique EOF set."""
+ """
+ Initialize the EofWrapper with metrics tracking and a unique EOF set.
+ """
self.metrics = {
self.FILES_GENERATED: 0,
self.FILES_SKIPPED: 0,
@@ -135,7 +137,8 @@ def __init__(self):
file_skip_list = [
"Pyspecs",
- # EXTCODE* opcodes return different results for EOF targets and that is tested elsewhere
+ # EXTCODE* opcodes return different results for EOF targets and that is
+ # tested elsewhere
"stExtCodeHash",
# bigint syntax
"ValueOverflowParis",
@@ -168,10 +171,11 @@ def __init__(self):
def wrap_file(self, in_path: str, out_path: str, traces: bool):
"""
- Wrap code from a blockchain test JSON file from `in_path` into EOF containers,
- wherever possible. If not possible - skips and tracks that in metrics. Possible means
- at least one account's code can be wrapped in a valid EOF container and the assertions
- on post state are satisfied.
+ Wrap code from a blockchain test JSON file from `in_path` into EOF
+ containers, wherever possible. If not possible - skips and tracks that
+ in metrics. Possible means at least one account's code can be wrapped
+ in a valid EOF container and the assertions on post state are
+ satisfied.
"""
for skip in self.file_skip_list:
if skip in in_path:
@@ -301,9 +305,9 @@ def _wrap_fixture(self, fixture: BlockchainFixture, traces: bool):
test.blocks.append(block)
elif isinstance(fixture_block, InvalidFixtureBlock):
- # Skip - invalid blocks are not supported. Reason: FixtureTransaction doesn't
- # support expected exception. But we can continue and test the remaining
- # blocks.
+ # Skip - invalid blocks are not supported. Reason:
+ # FixtureTransaction doesn't support expected exception. But we
+ # can continue and test the remaining blocks.
self.metrics[self.INVALID_BLOCKS_SKIPPED] += 1
else:
raise TypeError("not a FixtureBlock")
@@ -331,13 +335,13 @@ def _validate_eof(self, container: Container, metrics: bool = True) -> bool:
return True
-# `no_type_check` required because OpcodeWithOperand.opcode can be `None` when formatting as a
-# string, but here it can never be `None`.
+# `no_type_check` required because OpcodeWithOperand.opcode can be `None` when
+# formatting as a string, but here it can never be `None`.
@no_type_check
def wrap_code(account_code: Bytes) -> Container:
"""
- Wrap `account_code` into a simplest EOF container, applying some simple heuristics in
- order to obtain a valid code section termination.
+ Wrap `account_code` into a simplest EOF container, applying some simple
+ heuristics in order to obtain a valid code section termination.
"""
assert len(account_code) > 0
diff --git a/src/cli/evm_bytes.py b/src/cli/evm_bytes.py
index 1b847ed97fb..7f4a73f0966 100644
--- a/src/cli/evm_bytes.py
+++ b/src/cli/evm_bytes.py
@@ -62,7 +62,8 @@ def terminating(self) -> bool:
@property
def bytecode(self) -> Bytecode:
"""Opcode as bytecode with its operands if any."""
- # opcode.opcode[*opcode.operands] crashes `black` formatter and doesn't work.
+ # opcode.opcode[*opcode.operands] crashes `black` formatter and doesn't
+ # work.
if self.opcode:
return self.opcode.__getitem__(*self.operands) if self.operands else self.opcode
else:
@@ -181,7 +182,8 @@ def hex_string(hex_string: str, assembly: bool):
Output 1:
\b
- Op.PUSH1[0x42] + Op.PUSH1[0x0] + Op.MSTORE + Op.PUSH1[0x20] + Op.PUSH1[0x0] + Op.RETURN
+ Op.PUSH1[0x42] + Op.PUSH1[0x0] + Op.MSTORE + Op.PUSH1[0x20] +
+ Op.PUSH1[0x0] + Op.RETURN
Example 2: Convert a hex string to assembly
uv run evm_bytes hex-string --assembly 604260005260206000F3
@@ -207,14 +209,16 @@ def binary_file(binary_file, assembly: bool):
"""
Convert the BINARY_FILE containing EVM bytes to Python Opcodes or assembly.
- BINARY_FILE is a binary file containing EVM bytes, use `-` to read from stdin.
+ BINARY_FILE is a binary file containing EVM bytes, use `-` to read from
+ stdin.
Returns:
(str): The processed EVM opcodes in Python or assembly format.
Example: Convert the Withdrawal Request contract to assembly
\b
- uv run evm_bytes binary-file ./src/ethereum_test_forks/forks/contracts/withdrawal_request.bin --assembly
+ uv run evm_bytes binary-file ./src/ethereum_test_forks/forks/
+ contracts/withdrawal_request.bin --assembly
Output:
\b
@@ -225,6 +229,6 @@ def binary_file(binary_file, assembly: bool):
jumpi
...
- """ # noqa: E501,D301
+ """ # noqa: D301
processed_output = format_opcodes(process_evm_bytes(binary_file.read()), assembly=assembly)
click.echo(processed_output)
diff --git a/src/cli/extract_config.py b/src/cli/extract_config.py
index 503c352eb1c..bf0e1f717c5 100755
--- a/src/cli/extract_config.py
+++ b/src/cli/extract_config.py
@@ -1,9 +1,11 @@
#!/usr/bin/env python
"""
-CLI tool to extract client configuration files (chainspec/genesis.json) from Ethereum clients.
+CLI tool to extract client configuration files (chainspec/genesis.json) from
+Ethereum clients.
-This tool spawns an Ethereum client using Hive and extracts the generated configuration
-files such as /chainspec/test.json, /configs/test.cfg, or /genesis.json from the Docker container.
+This tool spawns an Ethereum client using Hive and extracts the generated
+configuration files such as /chainspec/test.json, /configs/test.cfg, or
+/genesis.json from the Docker container.
"""
import io
@@ -119,7 +121,9 @@ def create_genesis_from_fixture(fixture_path: Path) -> Tuple[FixtureHeader, Allo
def get_client_environment_for_fixture(fork: Fork, chain_id: int) -> dict:
- """Get the environment variables for starting a client with the given fixture."""
+ """
+ Get the environment variables for starting a client with the given fixture.
+ """
if fork not in ruleset:
raise ValueError(f"Fork '{fork}' not found in hive ruleset")
@@ -176,8 +180,8 @@ def extract_config(
Extract client configuration files from Ethereum clients.
This tool spawns an Ethereum client using Hive and extracts the generated
- configuration files such as /chainspec/test.json, /configs/test.cfg, or /genesis.json
- from the Docker container.
+ configuration files such as /chainspec/test.json, /configs/test.cfg, or
+ /genesis.json from the Docker container.
"""
if not fixture:
raise click.UsageError("No fixture provided, use --fixture to specify a fixture")
diff --git a/src/cli/fillerconvert/fillerconvert.py b/src/cli/fillerconvert/fillerconvert.py
index 6cb7a5f3b59..c7392fd420c 100644
--- a/src/cli/fillerconvert/fillerconvert.py
+++ b/src/cli/fillerconvert/fillerconvert.py
@@ -44,5 +44,6 @@ def main() -> None:
# or file.endswith("vmPerformance/performanceTesterFiller.yml")
# or file.endswith("vmPerformance/loopExpFiller.yml")
# or file.endswith("vmPerformance/loopMulFiller.yml")
- # or file.endswith("stRevertTest/RevertRemoteSubCallStorageOOGFiller.yml")
+ # or
+ # file.endswith("stRevertTest/RevertRemoteSubCallStorageOOGFiller.yml")
# or file.endswith("stSolidityTest/SelfDestructFiller.yml")
diff --git a/src/cli/fillerconvert/verify_filled.py b/src/cli/fillerconvert/verify_filled.py
index 706d62ac3e2..3c3302342e8 100644
--- a/src/cli/fillerconvert/verify_filled.py
+++ b/src/cli/fillerconvert/verify_filled.py
@@ -34,10 +34,9 @@ class FilledStateTest(RootModel[dict[str, StateTest]]):
def verify_refilled(refilled: Path, original: Path) -> int:
"""
- Verify post hash of the refilled test against original:
- Regex the original d,g,v from the refilled test name.
- Find the post record for this d,g,v and the fork of refilled test.
- Compare the post hash.
+ Verify post hash of the refilled test against original: Regex the original
+ d,g,v from the refilled test name. Find the post record for this d,g,v and
+ the fork of refilled test. Compare the post hash.
"""
verified_vectors = 0
json_str = refilled.read_text(encoding="utf-8")
@@ -46,7 +45,8 @@ def verify_refilled(refilled: Path, original: Path) -> int:
json_str = original.read_text(encoding="utf-8")
original_test_wrapper = FilledStateTest.model_validate_json(json_str)
- # Each original test has only 1 test with many posts for each fork and many txs
+ # Each original test has only 1 test with many posts for each fork and many
+ # txs
original_test_name, test_original = list(original_test_wrapper.root.items())[0]
for refilled_test_name, refilled_test in refilled_test_wrapper.root.items():
diff --git a/src/cli/gen_index.py b/src/cli/gen_index.py
index a19e2394315..e1d8de1af2d 100644
--- a/src/cli/gen_index.py
+++ b/src/cli/gen_index.py
@@ -1,4 +1,6 @@
-"""Generate an index file of all the json fixtures in the specified directory."""
+"""
+Generate an index file of all the json fixtures in the specified directory.
+"""
import datetime
import json
@@ -72,7 +74,9 @@ def count_json_files_exclude_index(start_path: Path) -> int:
help="Force re-generation of the index file, even if it already exists.",
)
def generate_fixtures_index_cli(input_dir: str, quiet_mode: bool, force_flag: bool):
- """CLI wrapper to an index of all the fixtures in the specified directory."""
+ """
+ CLI wrapper to an index of all the fixtures in the specified directory.
+ """
generate_fixtures_index(
Path(input_dir),
quiet_mode=quiet_mode,
diff --git a/src/cli/generate_checklist_stubs.py b/src/cli/generate_checklist_stubs.py
index 36d3839b369..9468521dbf4 100644
--- a/src/cli/generate_checklist_stubs.py
+++ b/src/cli/generate_checklist_stubs.py
@@ -86,16 +86,17 @@ def generate_checklist_stubs(output: str | None, dry_run: bool) -> None:
Generate mypy stub files for EIPChecklist classes.
This is a development tool that generates .pyi stub files to help mypy
- understand that EIPChecklist classes are callable, fixing type checking issues.
+ understand that EIPChecklist classes are callable, fixing type checking
+ issues.
Examples:
- # Generate stub files (auto-detect location)
+ Generate stub files (auto-detect location):
uv run generate_checklist_stubs
- # Generate to specific location
+ Generate to specific location:
uv run generate_checklist_stubs --output /path/to/stubs.pyi
- # Preview content without writing
+ Preview content without writing:
uv run generate_checklist_stubs --dry-run
"""
diff --git a/src/cli/gentest/request_manager.py b/src/cli/gentest/request_manager.py
index e4eac7e11d4..ac517b7490d 100644
--- a/src/cli/gentest/request_manager.py
+++ b/src/cli/gentest/request_manager.py
@@ -1,13 +1,17 @@
"""
-A request manager Ethereum RPC calls.
+A request manager Ethereum RPC calls.
-The RequestManager handles transactions and block data retrieval from a remote Ethereum node,
-utilizing Pydantic models to define the structure of transactions and blocks.
+The RequestManager handles transactions and block data retrieval from a remote
+Ethereum node, utilizing Pydantic models to define the structure of
+transactions and blocks.
Classes:
-- RequestManager: The main class for managing RPC requests and responses.
-- RemoteTransaction: A Pydantic model representing a transaction retrieved from the node.
-- RemoteBlock: A Pydantic model representing a block retrieved from the node.
+ RequestManager: The main class for managing RPC requests and
+ responses.
+ RemoteTransaction: A Pydantic model representing a transaction
+ retrieved from the node.
+ RemoteBlock: A Pydantic model representing a block retrieved from
+ the node.
"""
from typing import Dict
diff --git a/src/cli/gentest/source_code_generator.py b/src/cli/gentest/source_code_generator.py
index e437631aad5..d78dc82693a 100644
--- a/src/cli/gentest/source_code_generator.py
+++ b/src/cli/gentest/source_code_generator.py
@@ -22,18 +22,22 @@
template_env.filters["stringify"] = lambda value: repr(value)
-# generates a formatted pytest source code by writing provided data on a given template.
+# generates a formatted pytest source code by writing provided data on a given
+# template.
def get_test_source(provider: Provider, template_path: str) -> str:
"""
- Generate formatted pytest source code by rendering a template with provided data.
+ Generate formatted pytest source code by rendering a template with provided
+ data.
- This function uses the given template path to create a pytest-compatible source
- code string. It retrieves context data from the specified provider and applies
- it to the template.
+ This function uses the given template path to create a pytest-compatible
+ source code string. It retrieves context data from the specified provider
+ and applies it to the template.
Args:
- provider: An object that provides the necessary context for rendering the template.
- template_path (str): The path to the Jinja2 template file used to generate tests.
+ provider: An object that provides the necessary context for rendering
+ the template.
+ template_path (str): The path to the Jinja2 template file
+ used to generate tests.
Returns:
str: The formatted pytest source code.
@@ -49,11 +53,11 @@ def format_code(code: str) -> str:
"""
Format the provided Python code using the Black code formatter.
- This function writes the given code to a temporary Python file, formats it using
- the Black formatter, and returns the formatted code as a string.
+ This function writes the given code to a temporary Python file, formats it
+ using the Black formatter, and returns the formatted code as a string.
Args:
- code (str): The Python code to be formatted.
+ code (str): The Python code to be formatted.
Returns:
str: The formatted Python code.
diff --git a/src/cli/gentest/test_context_providers.py b/src/cli/gentest/test_context_providers.py
index f0cb5c3e021..42a56f3f0d8 100644
--- a/src/cli/gentest/test_context_providers.py
+++ b/src/cli/gentest/test_context_providers.py
@@ -2,13 +2,14 @@
Various providers which generate contexts required to create test scripts.
Classes:
-- Provider: An provider generates required context for creating a test.
-- BlockchainTestProvider: The BlockchainTestProvider takes a transaction hash and creates
- required context to create a test.
+ Provider: An provider generates required context for creating a
+ test.
+ BlockchainTestProvider: The BlockchainTestProvider takes a transaction
+ hash and creates required context to create a test.
Example:
- provider = BlockchainTestContextProvider(transaction=transaction)
- context = provider.get_context()
+ provider = BlockchainTestContextProvider(transaction=transaction)
+ context = provider.get_context()
"""
@@ -80,7 +81,8 @@ def _get_pre_state(self) -> Dict[str, Account]:
def _get_transaction(self) -> Transaction:
assert self.transaction_response is not None
- # Validate the RPC TransactionHashResponse and convert it to a Transaction instance.
+ # Validate the RPC TransactionHashResponse and convert it to a
+ # Transaction instance.
return Transaction.model_validate(self.transaction_response.model_dump())
def get_context(self) -> Dict[str, Any]:
@@ -89,7 +91,7 @@ def get_context(self) -> Dict[str, Any]:
Returns:
Dict[str, Any]: A dictionary containing environment,
- pre-state, a transaction and its hash.
+ pre-state, a transaction and its hash.
"""
self._make_rpc_calls()
diff --git a/src/cli/gentest/test_providers.py b/src/cli/gentest/test_providers.py
index dc29d306ee3..d9cdeaba6a2 100644
--- a/src/cli/gentest/test_providers.py
+++ b/src/cli/gentest/test_providers.py
@@ -1,14 +1,21 @@
"""
-Contains various providers which generates context required to create test scripts.
+Contains various providers which generates context required to create test
+scripts.
Classes:
-- BlockchainTestProvider: The BlockchainTestProvider class takes information about a block,
-a transaction, and the associated state, and provides methods to generate various elements
-needed for testing, such as module docstrings, test names, and pre-state items.
+ BlockchainTestProvider: The BlockchainTestProvider class takes
+ information about a block, a transaction, and the
+ associated state, and provides methods to generate
+ various elements needed for testing, such as module
+ docstrings, test names, and pre-state items.
Example:
- provider = BlockchainTestProvider(block=block, transaction=transaction, state=state)
- context = provider.get_context()
+ provider = BlockchainTestProvider(
+ block=block,
+ transaction=transaction,
+ state=state
+ )
+ context = provider.get_context()
"""
@@ -22,7 +29,9 @@
class BlockchainTestProvider(BaseModel):
- """Provides context required to generate a `blockchain_test` using pytest."""
+ """
+ Provides context required to generate a `blockchain_test` using pytest.
+ """
block: Environment
transaction: TransactionByHashResponse
@@ -103,8 +112,9 @@ def get_context(self) -> Dict[str, Any]:
Get the context for generating a blockchain test.
Returns:
- Dict[str, Any]: A dictionary containing module docstring, test name,
- test docstring, environment kwargs, pre-state items, and transaction items.
+ Dict[str, Any]: A dictionary containing module docstring, test
+ name, test docstring, environment kwargs,
+ pre-state items, and transaction items.
"""
return {
diff --git a/src/cli/gentest/tests/test_cli.py b/src/cli/gentest/tests/test_cli.py
index c8d9a869c6e..cac98388d95 100644
--- a/src/cli/gentest/tests/test_cli.py
+++ b/src/cli/gentest/tests/test_cli.py
@@ -92,10 +92,10 @@ def transaction_hash(tx_type: int) -> str: # noqa: D103
@pytest.mark.parametrize("tx_type", list(transactions_by_type.keys()))
def test_tx_type(pytester, tmp_path, monkeypatch, tx_type, transaction_hash, default_t8n):
"""Generates a test case for any transaction type."""
- ## Arrange ##
- # This test is run in a CI environment, where connection to a node could be
- # unreliable. Therefore, we mock the RPC request to avoid any network issues.
- # This is done by patching the `get_context` method of the `StateTestProvider`.
+ # This test is run in a CI environment, where connection to a
+ # node could be unreliable. Therefore, we mock the RPC request to avoid any
+ # network issues. This is done by patching the `get_context` method of the
+ # `StateTestProvider`.
runner = CliRunner()
tmp_path_tests = tmp_path / "tests"
tmp_path_tests.mkdir()
diff --git a/src/cli/hasher.py b/src/cli/hasher.py
index bd5caaeca5a..ab1dd874805 100644
--- a/src/cli/hasher.py
+++ b/src/cli/hasher.py
@@ -20,7 +20,10 @@ class HashableItemType(IntEnum):
@dataclass(kw_only=True)
class HashableItem:
- """Represents an item that can be hashed containing other items that can be hashed as well."""
+ """
+ Represents an item that can be hashed containing other items that can be
+ hashed as well.
+ """
type: HashableItemType
parents: List[str] = field(default_factory=list)
diff --git a/src/cli/input/input_repository.py b/src/cli/input/input_repository.py
index 0a75d14de8f..83b31c830b0 100644
--- a/src/cli/input/input_repository.py
+++ b/src/cli/input/input_repository.py
@@ -6,8 +6,8 @@
class InputRepository(ABC):
"""
- Abstract base class for input handling.
- This class defines the interface for different input types that can be swapped out.
+ Abstract base class for input handling. This class defines the interface
+ for different input types that can be swapped out.
"""
@abstractmethod
diff --git a/src/cli/input/questionary_input_repository.py b/src/cli/input/questionary_input_repository.py
index af0e2e144ce..8ddc6b18b65 100644
--- a/src/cli/input/questionary_input_repository.py
+++ b/src/cli/input/questionary_input_repository.py
@@ -9,7 +9,10 @@
class QuestionaryInputRepository(InputRepository):
- """Repository for handling various types of user inputs using the Questionary library."""
+ """
+ Repository for handling various types of user inputs using the Questionary
+ library.
+ """
def input_text(self, question: str) -> str:
"""Ask a text input question."""
diff --git a/src/cli/modify_static_test_gas_limits.py b/src/cli/modify_static_test_gas_limits.py
index 17f0b5137c4..35a259d88f2 100644
--- a/src/cli/modify_static_test_gas_limits.py
+++ b/src/cli/modify_static_test_gas_limits.py
@@ -1,6 +1,6 @@
"""
-Command to scan and overwrite the static tests' gas limits to new optimized value given in the
-input file.
+Command to scan and overwrite the static tests' gas limits to new optimized
+value given in the input file.
"""
import json
@@ -46,7 +46,9 @@ class StaticTestFile(EthereumTestRootModel):
def _check_fixtures(*, input_path: Path, max_gas_limit: int | None, dry_run: bool, verbose: bool):
- """Perform some checks on the fixtures contained in the specified directory."""
+ """
+ Perform some checks on the fixtures contained in the specified directory.
+ """
# Load the test dictionary from the input JSON file
test_dict = GasLimitDict.model_validate_json(input_path.read_text())
@@ -207,7 +209,9 @@ def _check_fixtures(*, input_path: Path, max_gas_limit: int | None, dry_run: boo
help="Print extra information.",
)
def main(input_str: str, max_gas_limit, dry_run: bool, verbose: bool):
- """Perform some checks on the fixtures contained in the specified directory."""
+ """
+ Perform some checks on the fixtures contained in the specified directory.
+ """
input_path = Path(input_str)
if not dry_run:
# Always dry-run first before actually modifying
diff --git a/src/cli/order_fixtures.py b/src/cli/order_fixtures.py
index 7b90281452e..d35412e755f 100644
--- a/src/cli/order_fixtures.py
+++ b/src/cli/order_fixtures.py
@@ -1,18 +1,17 @@
"""
Functions and CLI interface for recursively ordering and sorting .json files.
-example: Usage
-
- ```
- order_fixtures -i input_dir -o output_dir
- ```
+Usage Example:
+```console
+ order_fixtures -i input_dir -o output_dir
+```
The CLI interface takes the paths of an input directory and an output
directory. It recursively processes each .json file in the input directory and
-its subdirectories, and sorts lists and dictionaries alphabetically and
-writes the sorted output to .json files to the corresponding locations in the
-output directory.
+its subdirectories, and sorts lists and dictionaries alphabetically and writes
+the sorted output to .json files to the corresponding locations in the output
+directory.
"""
import json
@@ -27,16 +26,15 @@ def recursive_sort(item: Dict[str, Any] | List[Any]) -> Dict[str, Any] | List[An
Recursively sorts an item.
If the item is a dictionary, it returns a new dictionary that is a sorted
- version of the input dictionary.
- If the item is a list, it returns a new list that is a sorted version of the
- input list. The elements of the list are also sorted if they are lists or
- dictionaries.
+ version of the input dictionary. If the item is a list, it returns a new
+ list that is a sorted version of the input list. The elements of the list
+ are also sorted if they are lists or dictionaries.
Args:
- item: The item to be sorted. This can be a list or a dictionary.
+ item: The item to be sorted. This can be a list or a dictionary.
Returns:
- The sorted item.
+ The sorted item.
"""
if isinstance(item, dict):
@@ -45,8 +43,8 @@ def recursive_sort(item: Dict[str, Any] | List[Any]) -> Dict[str, Any] | List[An
try:
return sorted(cast(List[Any], [recursive_sort(x) for x in item]))
except TypeError:
- # If a TypeError is raised, we might be dealing with a list of dictionaries
- # Sort them based on their string representation
+ # If a TypeError is raised, we might be dealing with a list of
+ # dictionaries Sort them based on their string representation
return sorted((recursive_sort(x) for x in item), key=str)
else:
return item
@@ -60,8 +58,8 @@ def order_fixture(input_path: Path, output_path: Path) -> None:
to the output path.
Args:
- input_path: The Path object of the input .json file.
- output_path: The Path object of the output .json file.
+ input_path: The Path object of the input .json file.
+ output_path: The Path object of the output .json file.
Returns:
None.
@@ -78,9 +76,9 @@ def process_directory(input_dir: Path, output_dir: Path):
"""
Process a directory.
- Processes each .json file in the input directory and its subdirectories, and
- writes the sorted .json files to the corresponding locations in the output
- directory.
+ Processes each .json file in the input directory and its subdirectories,
+ and writes the sorted .json files to the corresponding locations in the
+ output directory.
Args:
input_dir: The Path object of the input directory.
@@ -106,7 +104,7 @@ def process_directory(input_dir: Path, output_dir: Path):
"input_dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),
required=True,
- help="The input directory",
+ help="input directory",
)
@click.option(
"--output",
diff --git a/src/cli/pytest_commands/base.py b/src/cli/pytest_commands/base.py
index 7061741e47a..444de79561b 100644
--- a/src/cli/pytest_commands/base.py
+++ b/src/cli/pytest_commands/base.py
@@ -24,7 +24,10 @@ class PytestExecution:
"""Path to the pytest configuration file (e.g., 'pytest-fill.ini')."""
command_logic_test_paths: List[str] = field(default_factory=list)
- """List of tests that have to be appended to the start of pytest command arguments."""
+ """
+ List of tests that have to be appended to the start of pytest command
+ arguments.
+ """
args: List[str] = field(default_factory=list)
"""Arguments to pass to pytest."""
@@ -80,7 +83,8 @@ def run_multiple(self, executions: List[PytestExecution]) -> int:
"""
Run multiple pytest executions in sequence.
- Returns the exit code of the final execution, or the first non-zero exit code.
+ Returns the exit code of the final execution, or the first non-zero
+ exit code.
"""
for i, execution in enumerate(executions):
if execution.description and len(executions) > 1:
@@ -102,8 +106,8 @@ class PytestCommand:
"""
Base class for pytest-based CLI commands.
- Provides a standard structure for commands that execute pytest
- with specific configurations and argument processing.
+ Provides a standard structure for commands that execute pytest with
+ specific configurations and argument processing.
"""
config_file: str
@@ -138,8 +142,8 @@ def execute(self, pytest_args: List[str]) -> None:
@property
def test_args(self) -> List[str]:
"""
- Return the test-path arguments that have to be appended to all PytestExecution
- instances.
+ Return the test-path arguments that have to be appended to all
+ PytestExecution instances.
"""
if self.command_logic_test_paths:
return [str(path) for path in self.command_logic_test_paths]
@@ -149,8 +153,8 @@ def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
"""
Create the list of pytest executions for this command.
- This method can be overridden by subclasses to implement
- multi-phase execution (e.g., for future fill command).
+ This method can be overridden by subclasses to implement multi-phase
+ execution (e.g., for future fill command).
"""
processed_args = self.process_arguments(pytest_args)
return [
diff --git a/src/cli/pytest_commands/fill.py b/src/cli/pytest_commands/fill.py
index f0f3ce53283..87d5af00419 100644
--- a/src/cli/pytest_commands/fill.py
+++ b/src/cli/pytest_commands/fill.py
@@ -26,10 +26,12 @@ def __init__(self, **kwargs):
def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
"""
- Create execution plan that supports two-phase pre-allocation group generation.
+ Create execution plan that supports two-phase pre-allocation group
+ generation.
Returns single execution for normal filling, or two-phase execution
- when --generate-pre-alloc-groups or --generate-all-formats is specified.
+ when --generate-pre-alloc-groups or --generate-all-formats is
+ specified.
"""
processed_args = self.process_arguments(pytest_args)
@@ -50,7 +52,10 @@ def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
]
def _create_two_phase_executions(self, args: List[str]) -> List[PytestExecution]:
- """Create two-phase execution: pre-allocation group generation + fixture filling."""
+ """
+ Create two-phase execution: pre-allocation group generation + fixture
+ filling.
+ """
# Phase 1: Pre-allocation group generation (clean and minimal output)
phase1_args = self._create_phase1_args(args)
@@ -87,14 +92,16 @@ def _create_phase1_args(self, args: List[str]) -> List[str]:
# Add required phase 1 flags (with quiet output by default)
phase1_args = [
"--generate-pre-alloc-groups",
- "-qq", # Quiet pytest output by default (user -v/-vv/-vvv can override)
+ "-qq", # Quiet pytest output by default (user -v/-vv/-vvv can
+ # override)
] + filtered_args
return phase1_args
def _create_phase2_args(self, args: List[str]) -> List[str]:
"""Create arguments for phase 2 (fixture filling)."""
- # Remove --generate-pre-alloc-groups and --clean, then add --use-pre-alloc-groups
+ # Remove --generate-pre-alloc-groups and --clean, then add --use-pre-
+ # alloc-groups
phase2_args = self._remove_generate_pre_alloc_groups_flag(args)
phase2_args = self._remove_clean_flag(phase2_args)
phase2_args = self._add_use_pre_alloc_groups_flag(phase2_args)
@@ -138,7 +145,10 @@ def _remove_unwanted_phase1_args(self, args: List[str]) -> List[str]:
return filtered_args
def _remove_generate_pre_alloc_groups_flag(self, args: List[str]) -> List[str]:
- """Remove --generate-pre-alloc-groups flag but keep --generate-all-formats for phase 2."""
+ """
+ Remove --generate-pre-alloc-groups flag but keep --generate-all-formats
+ for phase 2.
+ """
return [arg for arg in args if arg != "--generate-pre-alloc-groups"]
def _remove_clean_flag(self, args: List[str]) -> List[str]:
@@ -181,7 +191,10 @@ def _is_watch_mode(self, args: List[str]) -> bool:
return any(flag in args for flag in ["--watch", "--watcherfall"])
def _is_verbose_watch_mode(self, args: List[str]) -> bool:
- """Check if verbose watch flag (--watcherfall) is present in arguments."""
+ """
+ Check if verbose watch flag (--watcherfall)
+ is present in arguments.
+ """
return "--watcherfall" in args
def execute(self, pytest_args: List[str]) -> None:
@@ -257,5 +270,6 @@ def phil(pytest_args: List[str], **kwargs) -> None:
if __name__ == "__main__":
- # to allow debugging in vscode: in launch config, set "module": "cli.pytest_commands.fill"
+ # to allow debugging in vscode: in launch config, set "module":
+ # "cli.pytest_commands.fill"
fill(prog_name="fill")
diff --git a/src/cli/pytest_commands/processors.py b/src/cli/pytest_commands/processors.py
index ef6a44dd1ed..b63cd3a132b 100644
--- a/src/cli/pytest_commands/processors.py
+++ b/src/cli/pytest_commands/processors.py
@@ -18,8 +18,9 @@ def __init__(self, command_type: str, required_args: List[str] | None = None):
Initialize the help processor.
Args:
- command_type: The type of command (e.g., "fill", "consume", "execute")
- required_args: The arguments that are required for the command to run
+ command_type: The type of command (e.g., "fill", "consume",
+ "execute")
+ required_args: The arguments that are required for the command to run
"""
self.command_type = command_type
@@ -48,8 +49,8 @@ class StdoutFlagsProcessor(ArgumentProcessor):
def process_args(self, args: List[str]) -> List[str]:
"""
- If the user has requested to write to stdout, add pytest arguments
- to suppress pytest's test session header and summary output.
+ If the user has requested to write to stdout, add pytest arguments to
+ suppress pytest's test session header and summary output.
"""
if not self._is_writing_to_stdout(args):
return args
@@ -119,10 +120,16 @@ def _has_parallelism_flag(self, args: List[str]) -> bool:
class WatchFlagsProcessor(ArgumentProcessor):
- """Processes --watch and --watcherfall flags for file watching functionality."""
+ """
+ Processes --watch and --watcherfall flags
+ for file watching functionality.
+ """
def process_args(self, args: List[str]) -> List[str]:
- """Remove --watch and --watcherfall flags from args passed to pytest."""
+ """
+ Remove --watch and --watcherfall
+ flags from args passed to pytest.
+ """
return [arg for arg in args if arg not in ["--watch", "--watcherfall"]]
@@ -134,7 +141,7 @@ def __init__(self, is_hive: bool = False):
Initialize the consume processor.
Args:
- is_hive: Whether this is a hive-based consume command
+ is_hive: Whether this is a hive-based consume command
"""
self.is_hive = is_hive
diff --git a/src/cli/show_pre_alloc_group_stats.py b/src/cli/show_pre_alloc_group_stats.py
index c7cf51cedfd..492c1f0c6ab 100644
--- a/src/cli/show_pre_alloc_group_stats.py
+++ b/src/cli/show_pre_alloc_group_stats.py
@@ -15,7 +15,9 @@
def extract_test_module(test_id: str) -> str:
"""Extract test module path from test ID."""
- # Example: tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::test_beacon_root_contract_calls[fork_Cancun] # noqa: E501
+ # Example:
+ # tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
+ # test_beacon_root_contract_calls[fork_Cancun]
if "::" in test_id:
return test_id.split("::")[0]
return "unknown"
@@ -23,8 +25,12 @@ def extract_test_module(test_id: str) -> str:
def extract_test_function(test_id: str) -> str:
"""Extract test function name from test ID (without parameters)."""
- # Example: tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::test_beacon_root_contract_calls[fork_Cancun] # noqa: E501
- # Returns: tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::test_beacon_root_contract_calls # noqa: E501
+ # Example:
+ # tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
+ # test_beacon_root_contract_calls[fork_Cancun]
+ # Returns:
+ # tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
+ # test_beacon_root_contract_calls
if "::" in test_id:
parts = test_id.split("::")
if len(parts) >= 2:
@@ -43,9 +49,10 @@ def calculate_size_distribution(
Calculate frequency distribution of group sizes with appropriate binning.
Returns:
- - Group count distribution: [(range_label, group_count), ...]
- - Test count distribution: [(range_label, test_count, cumulative_remaining, group_count),
- ...]
+ Group count distribution: [(range_label, group_count), ...]
+ Test count distribution: [(range_label, test_count,
+ cumulative_remaining,
+ group_count), ...]
"""
if not test_counts:
@@ -80,12 +87,15 @@ def calculate_size_distribution(
# Test count distribution with group count
tests_in_bin = sum(groups_in_bin)
- test_distribution.append((label, tests_in_bin, 0, group_count)) # Added group_count
+ # Added group_count
+ test_distribution.append((label, tests_in_bin, 0, group_count))
- # Calculate cumulative values
- # For the table sorted from largest to smallest:
- # - Row N shows: if we exclude groups of size N and smaller, what % of tests remain?
- # - Row N shows: if we include groups of size N and larger, how many groups is that?
+ # Calculate cumulative values For the table sorted from largest to
+ # smallest:
+ # Row N shows: if we exclude groups of size N and smaller, what
+ # percent of tests remain?
+ # Row N shows: if we include groups of size N and
+ # larger, how many groups is that?
cumulative_remaining_tests = 0
cumulative_groups = 0
@@ -167,7 +177,8 @@ class SplitTestFunction(CamelModel):
split_test_functions[test_function].groups += 1
split_test_functions[test_function].forks.add(fork)
- # Filter to only test functions with multiple size-1 groups and calculate ratios
+ # Filter to only test functions with multiple size-1 groups and calculate
+ # ratios
split_functions = {}
for func, split_test_function in split_test_functions.items():
if split_test_function.groups > 1:
@@ -355,7 +366,8 @@ def display_stats(stats: Dict, console: Console, verbose: int = 0):
# Sort modules by group count (descending) - shows execution complexity
sorted_modules = sorted(
stats["module_stats"].items(),
- key=lambda x: (-x[1]["groups"], -x[1]["tests"]), # Secondary sort by tests
+ # Secondary sort by tests
+ key=lambda x: (-x[1]["groups"], -x[1]["tests"]),
)
# Show all modules if -vv, otherwise top 15
@@ -412,7 +424,8 @@ def display_stats(stats: Dict, console: Console, verbose: int = 0):
# Shorten function path for display
display_function = test_function
if display_function.startswith("tests/"):
- display_function = display_function[6:] # Remove "tests/" prefix
+ display_function = display_function[6:] # Remove "tests/"
+ # prefix
split_table.add_row(
display_function,
@@ -475,7 +488,6 @@ def main(pre_alloc_folder: Path, verbose: int):
The pre_alloc file is generated when running tests with the
--generate-pre-alloc-groups and --use-pre-alloc-groups flags to optimize
test execution by grouping tests with identical pre-allocation state.
-
"""
console = Console()
diff --git a/src/cli/tests/test_evm_bytes.py b/src/cli/tests/test_evm_bytes.py
index 64bebd41828..e7e32a81e3c 100644
--- a/src/cli/tests/test_evm_bytes.py
+++ b/src/cli/tests/test_evm_bytes.py
@@ -8,7 +8,9 @@
basic_vector = [
"0x60008080808061AAAA612d5ff1600055",
- "Op.PUSH1[0x0] + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.PUSH2[0xaaaa] + Op.PUSH2[0x2d5f] + Op.CALL + Op.PUSH1[0x0] + Op.SSTORE", # noqa: E501
+ "Op.PUSH1[0x0] + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.DUP1 + "
+ "Op.PUSH2[0xaaaa] + Op.PUSH2[0x2d5f] + Op.CALL + Op.PUSH1[0x0] + "
+ "Op.SSTORE",
]
complex_vector = [
"0x7fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebf5f527fc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedf6020527fe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff60405260786040356020355f35608a565b5f515f55602051600155604051600255005b5e56", # noqa: E501
@@ -24,8 +26,9 @@
]
rjumpv_vector = [
"0xe213b1465aef60276095472e3250cf64736f6c63430008150033a26469706673582212206eab0a7969fe",
- "Op.RJUMPV[-0x4eba, 0x5aef, 0x6027, 0x6095, 0x472e, 0x3250, -0x309c, 0x736f, 0x6c63, 0x4300,"
- + " 0x815, 0x33, -0x5d9c, 0x6970, 0x6673, 0x5822, 0x1220, 0x6eab, 0xa79, 0x69fe]",
+ "Op.RJUMPV[-0x4eba, 0x5aef, 0x6027, 0x6095, 0x472e, 0x3250, -0x309c, "
+ "0x736f, 0x6c63, 0x4300," + " 0x815, 0x33, -0x5d9c, 0x6970, 0x6673, 0x5822, 0x1220, 0x6eab, "
+ "0xa79, 0x69fe]",
]
diff --git a/src/cli/tests/test_generate_all_formats.py b/src/cli/tests/test_generate_all_formats.py
index 9f49678464d..a939f61932d 100644
--- a/src/cli/tests/test_generate_all_formats.py
+++ b/src/cli/tests/test_generate_all_formats.py
@@ -30,7 +30,9 @@ def test_generate_all_formats_creates_two_phase_execution():
def test_generate_all_formats_preserves_other_args():
- """Test that --generate-all-formats preserves other command line arguments."""
+ """
+ Test that --generate-all-formats preserves other command line arguments.
+ """
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
@@ -86,7 +88,8 @@ def test_legacy_generate_pre_alloc_groups_still_works():
phase1_args = executions[0].args
assert "--generate-pre-alloc-groups" in phase1_args
- # Phase 2: Should have --use-pre-alloc-groups but NOT --generate-all-formats
+ # Phase 2: Should have --use-pre-alloc-groups but NOT --generate-all-
+ # formats
phase2_args = executions[1].args
assert "--use-pre-alloc-groups" in phase2_args
assert "--generate-all-formats" not in phase2_args
@@ -110,7 +113,9 @@ def test_single_phase_without_flags():
def test_tarball_output_auto_enables_generate_all_formats():
- """Test that tarball output automatically enables --generate-all-formats."""
+ """
+ Test that tarball output automatically enables --generate-all-formats.
+ """
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
@@ -124,7 +129,8 @@ def test_tarball_output_auto_enables_generate_all_formats():
phase1_args = executions[0].args
assert "--generate-pre-alloc-groups" in phase1_args
- # Phase 2: Should have --generate-all-formats (auto-added) and --use-pre-alloc-groups
+ # Phase 2: Should have --generate-all-formats (auto-added) and --use-pre-
+ # alloc-groups
phase2_args = executions[1].args
assert "--generate-all-formats" in phase2_args
assert "--use-pre-alloc-groups" in phase2_args
@@ -132,7 +138,10 @@ def test_tarball_output_auto_enables_generate_all_formats():
def test_tarball_output_with_explicit_generate_all_formats():
- """Test that explicit --generate-all-formats with tarball output works correctly."""
+ """
+ Test that explicit --generate-all-formats with tarball output works
+ correctly.
+ """
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
@@ -150,7 +159,10 @@ def test_tarball_output_with_explicit_generate_all_formats():
def test_regular_output_does_not_auto_trigger_two_phase():
- """Test that regular directory output doesn't auto-trigger two-phase execution."""
+ """
+ Test that regular directory output doesn't auto-trigger two-phase
+ execution.
+ """
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
diff --git a/src/cli/tests/test_order_fixtures.py b/src/cli/tests/test_order_fixtures.py
index 27a88ceef7b..0c3f2db4f7f 100644
--- a/src/cli/tests/test_order_fixtures.py
+++ b/src/cli/tests/test_order_fixtures.py
@@ -57,7 +57,9 @@ def test_cli_invocation(input_output_dirs):
def test_input_is_file_instead_of_directory():
- """Test the CLI interface when the input path is a file, not a directory."""
+ """
+ Test the CLI interface when the input path is a file, not a directory.
+ """
runner = CliRunner()
with TemporaryDirectory() as temp_dir:
temp_file = Path(temp_dir) / "temp_file.txt"
diff --git a/src/cli/tests/test_pytest_fill_command.py b/src/cli/tests/test_pytest_fill_command.py
index 95c3a3e8f47..0386b05d4e1 100644
--- a/src/cli/tests/test_pytest_fill_command.py
+++ b/src/cli/tests/test_pytest_fill_command.py
@@ -50,11 +50,12 @@ class TestHtmlReportFlags:
@pytest.fixture
def fill_args(self, default_t8n):
"""
- Provide default arguments for the `fill` command when testing html report
- generation.
+ Provide default arguments for the `fill` command when testing html
+ report generation.
- Specifies a single existing example test case for faster fill execution,
- and to allow for tests to check for the fixture generation location.
+ Specifies a single existing example test case for faster fill
+ execution, and to allow for tests to check for the fixture generation
+ location.
"""
return [
"-k",
@@ -81,8 +82,8 @@ def monkeypatch_default_output_directory(self, monkeypatch, temp_dir):
"""
Monkeypatch default output directory for the pytest commands.
- This avoids using the local directory in user space for the output of pytest
- commands and uses the a temporary directory instead.
+ This avoids using the local directory in user space for the output of
+ pytest commands and uses the a temporary directory instead.
"""
def mock_default_output_directory():
@@ -101,7 +102,10 @@ def test_fill_default_output_options(
fill_args,
default_html_report_file_path,
):
- """Test default pytest html behavior: Neither `--html` or `--output` is specified."""
+ """
+ Test default pytest html behavior: Neither `--html` or `--output` is
+ specified.
+ """
default_html_path = temp_dir / default_html_report_file_path
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
@@ -141,7 +145,9 @@ def test_fill_output_option(
fill_args,
default_html_report_file_path,
):
- """Tests pytest html report generation with only the `--output` flag."""
+ """
+ Tests pytest html report generation with only the `--output` flag.
+ """
output_dir = temp_dir / "non_default_output_dir"
non_default_html_path = output_dir / default_html_report_file_path
fill_args += ["--output", str(output_dir)]
@@ -156,7 +162,10 @@ def test_fill_html_and_output_options(
temp_dir,
fill_args,
):
- """Tests pytest html report generation with both `--output` and `--html` flags."""
+ """
+ Tests pytest html report generation with both `--output` and `--html`
+ flags.
+ """
output_dir = temp_dir / "non_default_output_dir_fixtures"
html_path = temp_dir / "non_default_output_dir_html" / "non_default.html"
fill_args += ["--output", str(output_dir), "--html", str(html_path)]
diff --git a/src/cli/tox_helpers.py b/src/cli/tox_helpers.py
index 7dbe0c26cf1..63b4cee668f 100644
--- a/src/cli/tox_helpers.py
+++ b/src/cli/tox_helpers.py
@@ -2,8 +2,8 @@
CLI commands used by tox.ini.
Contains wrappers to the external commands markdownlint-cli2 and pyspelling
-(requires aspell) that fail silently if the command is not available. The
-aim is to avoid disruption to external contributors.
+(requires aspell) that fail silently if the command is not available. The aim
+is to avoid disruption to external contributors.
"""
import os
@@ -23,10 +23,11 @@ def write_github_summary(title: str, tox_env: str, error_message: str, fix_comma
Write a summary to GitHub Actions when a check fails.
Args:
- title: The title of the check that failed
- tox_env: The tox environment name (e.g., "spellcheck")
- error_message: Description of what went wrong
- fix_commands: List of commands to fix the issue locally
+ title: The title of the check that failed tox_env: The tox
+ environment name (e.g., "spellcheck")
+ tox_env: The tox environment
+ error_message: Description of what went wrong
+ fix_commands: List of commands to fix the issue locally
"""
if not os.environ.get("GITHUB_ACTIONS"):
@@ -70,7 +71,8 @@ def markdownlint(args):
"""
markdownlint = shutil.which("markdownlint-cli2")
if not markdownlint:
- # Note: There's an additional step in test.yaml to run markdownlint-cli2 in GitHub Actions
+ # Note: There's an additional step in test.yaml to run markdownlint-
+ # cli2 in GitHub Actions
click.echo("********* Install 'markdownlint-cli2' to enable markdown linting *********")
sys.exit(0)
@@ -194,7 +196,8 @@ def codespell():
@click.command()
def validate_changelog():
"""
- Validate changelog formatting to ensure bullet points end with proper punctuation.
+ Validate changelog formatting to ensure bullet points end with proper
+ punctuation.
Checks that all bullet points (including nested ones) end with either:
- A period (.) for regular entries
diff --git a/src/config/docs.py b/src/config/docs.py
index f081e92d151..ac6c7e93cd6 100644
--- a/src/config/docs.py
+++ b/src/config/docs.py
@@ -2,7 +2,7 @@
A module for managing documentation-related configurations.
Classes:
-- DocsConfig: Holds configurations for documentation generation.
+ DocsConfig: Holds configurations for documentation generation.
"""
from pydantic import BaseModel
@@ -19,5 +19,6 @@ class DocsConfig(BaseModel):
DOCS_BASE_URL: str = "https://eest.ethereum.org"
- # Documentation URLs prefixed with `DOCS_URL__` to avoid conflicts with other URLs
+ # Documentation URLs prefixed with `DOCS_URL__` to avoid conflicts with
+ # other URLs
DOCS_URL__WRITING_TESTS: str = f"{DOCS_BASE_URL}/main/writing_tests/"
diff --git a/src/config/env.py b/src/config/env.py
index 0e2811b2055..f2bf92b2f2b 100644
--- a/src/config/env.py
+++ b/src/config/env.py
@@ -1,12 +1,14 @@
"""
A module for exposing application-wide environment variables.
-This module is responsible for loading, parsing, and validating the application's
-environment configuration from the `env.yaml` file. It uses Pydantic to ensure that
-the configuration adheres to expected formats and types.
+This module is responsible for loading, parsing, and validating the
+application's environment configuration from the `env.yaml` file. It uses
+Pydantic to ensure that the configuration adheres to expected formats and
+types.
Functions:
-- create_default_config: Creates a default configuration file if it doesn't exist.
+- create_default_config: Creates a default configuration file if it
+ doesn't exist.
Classes:
- EnvConfig: Loads the configuration and exposes it as Python objects.
@@ -32,9 +34,11 @@ class RemoteNode(BaseModel):
Represents a configuration for a remote node.
Attributes:
- - name (str): The name of the remote node.
- - node_url (HttpUrl): The URL for the remote node, validated as a proper URL.
- - rpc_headers (Dict[str, str]): A dictionary of optional RPC headers, defaults to empty dict.
+ name (str): The name of the remote node.
+ node_url (HttpUrl): The URL for the remote node, validated as a
+ proper URL.
+ rpc_headers (Dict[str, str]): A dictionary of optional RPC headers,
+ defaults to empty dict.
"""
@@ -48,7 +52,7 @@ class Config(BaseModel):
Represents the overall environment configuration.
Attributes:
- - remote_nodes (List[RemoteNode]): A list of remote node configurations.
+ remote_nodes (List[RemoteNode]): A list of remote node configurations.
"""
@@ -59,8 +63,8 @@ class EnvConfig(Config):
"""
Loads and validates environment configuration from `env.yaml`.
- This is a wrapper class for the Config model. It reads a config file
- from disk into a Config model and then exposes it.
+ This is a wrapper class for the Config model. It reads a config file from
+ disk into a Config model and then exposes it.
"""
def __init__(self):
diff --git a/src/conftest.py b/src/conftest.py
index 59fb310c5fb..82c42bb48b8 100644
--- a/src/conftest.py
+++ b/src/conftest.py
@@ -14,7 +14,8 @@
for transition_tool in TransitionTool.registered_tools
if (
transition_tool.is_installed()
- # Currently, Besu has the same `default_binary` as Geth, so we can't use `is_installed`.
+ # Currently, Besu has the same `default_binary` as Geth, so we can't
+ # use `is_installed`.
and transition_tool != BesuTransitionTool
)
]
@@ -32,7 +33,8 @@ def installed_transition_tool_instances() -> Generator[
transition_tool_instance.start_server()
instances[transition_tool_class.__name__] = transition_tool_instance
except Exception as e:
- # Record the exception in order to provide context when failing the appropriate test
+ # Record the exception in order to provide context when failing the
+ # appropriate test
instances[transition_tool_class.__name__] = e
yield instances
for instance in instances.values():
diff --git a/src/ethereum_clis/__init__.py b/src/ethereum_clis/__init__.py
index 9b2850b7242..6a21ab3c2e1 100644
--- a/src/ethereum_clis/__init__.py
+++ b/src/ethereum_clis/__init__.py
@@ -1,4 +1,7 @@
-"""Library of Python wrappers for the different implementations of transition tools."""
+"""
+Library of Python wrappers for the different implementations of transition
+tools.
+"""
from .cli_types import (
BlockExceptionWithMessage,
diff --git a/src/ethereum_clis/cli_types.py b/src/ethereum_clis/cli_types.py
index b6c9b48bad0..d8745a1034a 100644
--- a/src/ethereum_clis/cli_types.py
+++ b/src/ethereum_clis/cli_types.py
@@ -99,8 +99,8 @@ def from_file(cls, trace_file_path: Path) -> Self:
@staticmethod
def remove_gas(traces: List[TraceLine]):
"""
- Remove the GAS operation opcode result from the stack to make comparison possible
- even if the gas has been pushed to the stack.
+ Remove the GAS operation opcode result from the stack to make
+ comparison possible even if the gas has been pushed to the stack.
"""
for i in range(1, len(traces)):
trace = traces[i]
@@ -143,7 +143,9 @@ def print(self):
class Traces(EthereumTestRootModel):
- """Traces returned from the transition tool for all transactions executed."""
+ """
+ Traces returned from the transition tool for all transactions executed.
+ """
root: List[TransactionTraces]
diff --git a/src/ethereum_clis/clis/ethereumjs.py b/src/ethereum_clis/clis/ethereumjs.py
index 45e7aed7358..59214e00faf 100644
--- a/src/ethereum_clis/clis/ethereumjs.py
+++ b/src/ethereum_clis/clis/ethereumjs.py
@@ -39,13 +39,16 @@ def __init__(
def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
+
Currently, EthereumJS-t8n provides no way to determine supported forks.
"""
return True
class EthereumJSExceptionMapper(ExceptionMapper):
- """Translate between EEST exceptions and error strings returned by EthereumJS."""
+ """
+ Translate between EEST exceptions and error strings returned by EthereumJS.
+ """
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
diff --git a/src/ethereum_clis/clis/ethrex.py b/src/ethereum_clis/clis/ethrex.py
index 69cbbfde421..1d3c17a58da 100644
--- a/src/ethereum_clis/clis/ethrex.py
+++ b/src/ethereum_clis/clis/ethrex.py
@@ -48,7 +48,8 @@ class EthrexExceptionMapper(ExceptionMapper):
r"blob versioned hashes not supported|"
r"Type 3 transactions are not supported before the Cancun fork"
),
- # A type 4 Transaction without a recipient won't even reach the EVM, we can't decode it.
+ # A type 4 Transaction without a recipient won't even reach the EVM, we
+ # can't decode it.
TransactionException.TYPE_4_TX_CONTRACT_CREATION: (
r"unexpected length|Contract creation in type 4 transaction|"
r"Error decoding field 'to' of type primitive_types::H160: InvalidLength"
diff --git a/src/ethereum_clis/clis/evmone.py b/src/ethereum_clis/clis/evmone.py
index c76dfd4cf9f..e2f34f8fb06 100644
--- a/src/ethereum_clis/clis/evmone.py
+++ b/src/ethereum_clis/clis/evmone.py
@@ -37,14 +37,16 @@ def __init__(
def is_fork_supported(self, fork: Fork) -> bool:
"""
- Return True if the fork is supported by the tool.
- Currently, evmone-t8n provides no way to determine supported forks.
+ Return True if the fork is supported by the tool. Currently, evmone-t8n
+ provides no way to determine supported forks.
"""
return True
class EvmoneExceptionMapper(ExceptionMapper):
- """Translate between EEST exceptions and error strings returned by Evmone."""
+ """
+ Translate between EEST exceptions and error strings returned by Evmone.
+ """
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.SENDER_NOT_EOA: "sender not an eoa:",
@@ -82,7 +84,8 @@ class EvmoneExceptionMapper(ExceptionMapper):
),
TransactionException.NONCE_MISMATCH_TOO_LOW: "nonce too low",
TransactionException.NONCE_MISMATCH_TOO_HIGH: "nonce too high",
- # TODO EVMONE needs to differentiate when the section is missing in the header or body
+ # TODO EVMONE needs to differentiate when the section is missing in the
+ # header or body
EOFException.MISSING_STOP_OPCODE: "err: no_terminating_instruction",
EOFException.MISSING_CODE_HEADER: "err: code_section_missing",
EOFException.MISSING_TYPE_HEADER: "err: type_section_missing",
diff --git a/src/ethereum_clis/clis/execution_specs.py b/src/ethereum_clis/clis/execution_specs.py
index 31b5dd2e430..ec2ac7a3349 100644
--- a/src/ethereum_clis/clis/execution_specs.py
+++ b/src/ethereum_clis/clis/execution_specs.py
@@ -29,19 +29,22 @@
class ExecutionSpecsTransitionTool(TransitionTool):
"""
- Ethereum Specs EVM Resolver `ethereum-spec-evm-resolver` Transition Tool wrapper class.
+ Ethereum Specs EVM Resolver `ethereum-spec-evm-resolver` Transition Tool
+ wrapper class.
- `ethereum-spec-evm-resolver` is installed by default for `execution-spec-tests`:
+ `ethereum-spec-evm-resolver` is installed by default for
+ `execution-spec-tests`:
```console
- uv run fill --evm-bin=ethereum-spec-evm-resolver
+ uv run fill --evm-bin=ethereum-spec-evm-resolver
```
- To use a specific version of the `ethereum-spec-evm-resolver` tool, update it to the
- desired version in `pyproject.toml`.
+ To use a specific version of the `ethereum-spec-evm-resolver` tool, update
+ it to the desired version in `pyproject.toml`.
- The `ethereum-spec-evm-resolver` tool essentially wraps around the EELS evm daemon. It can
- handle requests for different EVM forks, even when those forks are implemented by different
- versions of EELS hosted in different places.
+ The `ethereum-spec-evm-resolver` tool essentially wraps around the EELS evm
+ daemon. It can handle requests for different EVM forks, even when those
+ forks are implemented by different versions of EELS hosted in different
+ places.
"""
default_binary = Path("ethereum-spec-evm-resolver")
@@ -57,7 +60,9 @@ def __init__(
trace: bool = False,
server_url: str | None = None,
):
- """Initialize the Ethereum Specs EVM Resolver Transition Tool interface."""
+ """
+ Initialize the Ethereum Specs EVM Resolver Transition Tool interface.
+ """
os.environ.setdefault("NO_PROXY", "*") # Disable proxy for local connections
super().__init__(
exception_mapper=ExecutionSpecsExceptionMapper(), binary=binary, trace=trace
@@ -114,7 +119,8 @@ def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
- If the fork is a transition fork, we want to check the fork it transitions to.
+ If the fork is a transition fork, we want to check the fork it
+ transitions to.
`ethereum-spec-evm` appends newlines to forks in the help string.
"""
@@ -135,7 +141,10 @@ def _generate_post_args(
class ExecutionSpecsExceptionMapper(ExceptionMapper):
- """Translate between EEST exceptions and error strings returned by ExecutionSpecs."""
+ """
+ Translate between EEST exceptions and error strings returned by
+ ExecutionSpecs.
+ """
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: "EmptyAuthorizationListError",
diff --git a/src/ethereum_clis/clis/geth.py b/src/ethereum_clis/clis/geth.py
index bf805270805..769315aa4b6 100644
--- a/src/ethereum_clis/clis/geth.py
+++ b/src/ethereum_clis/clis/geth.py
@@ -200,7 +200,8 @@ def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
- If the fork is a transition fork, we want to check the fork it transitions to.
+ If the fork is a transition fork, we want to check the fork it
+ transitions to.
"""
return fork.transition_tool_name() in self.help_string
@@ -221,8 +222,8 @@ def consume_blockchain_test(
"""
Consume a single blockchain test.
- The `evm blocktest` command takes the `--run` argument which can be used to select a
- specific fixture from the fixture file when executing.
+ The `evm blocktest` command takes the `--run` argument which can be
+ used to select a specific fixture from the fixture file when executing.
"""
subcommand = "blocktest"
global_options = []
@@ -273,10 +274,10 @@ def consume_state_test_file(
"""
Consume an entire state test file.
- The `evm statetest` will always execute all the tests contained in a file without the
- possibility of selecting a single test, so this function is cached in order to only call
- the command once and `consume_state_test` can simply select the result that
- was requested.
+ The `evm statetest` will always execute all the tests contained in a
+ file without the possibility of selecting a single test, so this
+ function is cached in order to only call the command once and
+ `consume_state_test` can simply select the result that was requested.
"""
subcommand = "statetest"
global_options: List[str] = []
@@ -316,8 +317,8 @@ def consume_state_test(
"""
Consume a single state test.
- Uses the cached result from `consume_state_test_file` in order to not call the command
- every time an select a single result from there.
+ Uses the cached result from `consume_state_test_file` in order to not
+ call the command every time an select a single result from there.
"""
file_results = self.consume_state_test_file(
fixture_path=fixture_path,
@@ -346,7 +347,10 @@ def consume_fixture(
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
):
- """Execute the appropriate geth fixture consumer for the fixture at `fixture_path`."""
+ """
+ Execute the appropriate geth fixture consumer for the fixture at
+ `fixture_path`.
+ """
if fixture_format == BlockchainFixture:
self.consume_blockchain_test(
fixture_path=fixture_path,
diff --git a/src/ethereum_clis/clis/nethermind.py b/src/ethereum_clis/clis/nethermind.py
index 13bf2c776f2..85d6ea44847 100644
--- a/src/ethereum_clis/clis/nethermind.py
+++ b/src/ethereum_clis/clis/nethermind.py
@@ -96,8 +96,9 @@ def has_eof_support(self) -> bool:
"""
Return True if the `nethtest` binary supports the `--eofTest` flag.
- Currently, nethtest EOF support is only available in nethermind's feature/evm/eof
- branch https://github.com/NethermindEth/nethermind/tree/feature/evm/eof
+ Currently, nethtest EOF support is only available in nethermind's
+ feature/evm/eof branch
+ https://github.com/NethermindEth/nethermind/tree/feature/evm/eof
"""
return "--eofTest" in self.help()
@@ -121,7 +122,8 @@ def _build_command_with_options(
if fixture_format is BlockchainFixture:
command += ["--blockTest", "--filter", f"{re.escape(fixture_name)}"]
elif fixture_format is StateFixture:
- # TODO: consider using `--filter` here to readily access traces from the output
+ # TODO: consider using `--filter` here to readily access traces
+ # from the output
pass # no additional options needed
elif fixture_format is EOFFixture:
command += ["--eofTest"]
@@ -144,10 +146,10 @@ def consume_state_test_file(
"""
Consume an entire state test file.
- The `evm statetest` will always execute all the tests contained in a file without the
- possibility of selecting a single test, so this function is cached in order to only call
- the command once and `consume_state_test` can simply select the result that
- was requested.
+ The `evm statetest` will always execute all the tests contained in a
+ file without the possibility of selecting a single test, so this
+ function is cached in order to only call the command once and
+ `consume_state_test` can simply select the result that was requested.
"""
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
@@ -180,8 +182,8 @@ def consume_state_test(
"""
Consume a single state test.
- Uses the cached result from `consume_state_test_file` in order to not call the command
- every time an select a single result from there.
+ Uses the cached result from `consume_state_test_file` in order to not
+ call the command every time an select a single result from there.
"""
file_results, stderr = self.consume_state_test_file(
fixture_path=fixture_path,
@@ -203,9 +205,6 @@ def consume_state_test(
for test_result in file_results
if test_result["name"].removesuffix(nethtest_suffix)
== f"{fixture_name.split('/')[-1]}"
- # TODO: the following was required for nethermind's feature/evm/eof branch
- # nethtest version: 1.32.0-unstable+025871675bd2e0839f93d2b70416ebae9dbae012
- # == f"{fixture_name.split('.py::')[-1]}"
]
assert len(test_result) < 2, f"Multiple test results for {fixture_name}"
assert len(test_result) == 1, f"Test result for {fixture_name} missing"
@@ -254,7 +253,8 @@ def consume_eof_test_file(
pattern = re.compile(r"^(test_.+?)\s+(PASS|FAIL)$", re.MULTILINE)
test_results = {
- match.group(1): match.group(2) == "PASS" # Convert "PASS" to True and "FAIL" to False
+ match.group(1): match.group(2) == "PASS" # Convert "PASS" to True
+ # and "FAIL" to False
for match in pattern.finditer(result.stdout)
}
@@ -297,7 +297,10 @@ def consume_fixture(
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
):
- """Execute the appropriate geth fixture consumer for the fixture at `fixture_path`."""
+ """
+ Execute the appropriate geth fixture consumer for the fixture at
+ `fixture_path`.
+ """
command = self._build_command_with_options(
fixture_format, fixture_path, fixture_name, debug_output_path
)
diff --git a/src/ethereum_clis/clis/nimbus.py b/src/ethereum_clis/clis/nimbus.py
index 6c0960774c5..a1f167c363e 100644
--- a/src/ethereum_clis/clis/nimbus.py
+++ b/src/ethereum_clis/clis/nimbus.py
@@ -57,13 +57,16 @@ def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
- If the fork is a transition fork, we want to check the fork it transitions to.
+ If the fork is a transition fork, we want to check the fork it
+ transitions to.
"""
return fork.transition_tool_name() in self.help_string
class NimbusExceptionMapper(ExceptionMapper):
- """Translate between EEST exceptions and error strings returned by Nimbus."""
+ """
+ Translate between EEST exceptions and error strings returned by Nimbus.
+ """
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.TYPE_4_TX_CONTRACT_CREATION: (
diff --git a/src/ethereum_clis/ethereum_cli.py b/src/ethereum_clis/ethereum_cli.py
index 7ea060fcc7c..6bdbf646ea7 100644
--- a/src/ethereum_clis/ethereum_cli.py
+++ b/src/ethereum_clis/ethereum_cli.py
@@ -20,7 +20,9 @@ class UnknownCLIError(Exception):
class CLINotFoundInPathError(Exception):
- """Exception raised if the specified CLI binary is not found in the path."""
+ """
+ Exception raised if the specified CLI binary is not found in the path.
+ """
def __init__(self, message="The CLI binary was not found in the path", binary=None):
"""Initialize the exception."""
@@ -49,11 +51,14 @@ class EthereumCLI:
cached_version: Optional[str] = None
def __init__(self, *, binary: Optional[Path] = None):
- """Abstract initialization method that all subclasses must implement."""
+ """
+ Abstract initialization method that all subclasses must implement.
+ """
if binary is None:
binary = self.default_binary
else:
- # improve behavior of which by resolving the path: ~/relative paths don't work
+ # improve behavior of which by resolving the path: ~/relative paths
+ # don't work
resolved_path = Path(os.path.expanduser(binary)).resolve()
if resolved_path.exists():
binary = resolved_path
@@ -75,10 +80,12 @@ def set_default_tool(cls, tool_subclass: Type[Any]):
@classmethod
def from_binary_path(cls, *, binary_path: Optional[Path], **kwargs) -> Any:
"""
- Instantiate the appropriate CLI subclass derived from the CLI's `binary_path`.
+ Instantiate the appropriate CLI subclass derived from the CLI's
+ `binary_path`.
- This method will attempt to detect the CLI version and instantiate the appropriate
- subclass based on the version output by running the CLI with the version flag.
+ This method will attempt to detect the CLI version and instantiate the
+ appropriate subclass based on the version output by running the CLI
+ with the version flag.
"""
assert cls.default_tool is not None, "default CLI implementation was never set"
@@ -114,8 +121,8 @@ def from_binary_path(cls, *, binary_path: Optional[Path], **kwargs) -> Any:
logger.debug(f"Successfully located the path of the t8n binary: {binary}")
binary = Path(binary)
- # Group the tools by version flag, so we only have to call the tool once for all the
- # classes that share the same version flag
+ # Group the tools by version flag, so we only have to call the tool
+ # once for all the classes that share the same version flag
for version_flag, subclasses in groupby(
cls.registered_tools, key=lambda x: x.version_flag
):
@@ -134,7 +141,9 @@ def from_binary_path(cls, *, binary_path: Optional[Path], **kwargs) -> Any:
if result.returncode != 0:
logger.debug(f"Subprocess returncode is not 0! It is: {result.returncode}")
- continue # don't raise exception, you are supposed to keep trying different version flags # noqa: E501
+ continue
+ # don't raise exception, you are supposed to keep
+ # trying different version flags
if result.stderr:
logger.debug(f"Stderr detected: {result.stderr}") # type: ignore
@@ -143,10 +152,6 @@ def from_binary_path(cls, *, binary_path: Optional[Path], **kwargs) -> Any:
binary_output = ""
if result.stdout:
binary_output = result.stdout.decode().strip()
- # e.g. 1.31.10+f62cfede9b4abfb5cd62d6f138240668620a2b0d should be treated as 1.31.10 # noqa: E501
- # if "+" in binary_output:
- # binary_output = binary_output.split("+")[0]
-
logger.debug(f"Stripped subprocess stdout: {binary_output}")
for subclass in subclasses:
@@ -169,7 +174,10 @@ def from_binary_path(cls, *, binary_path: Optional[Path], **kwargs) -> Any:
@classmethod
def detect_binary(cls, binary_output: str) -> bool:
- """Return True if a CLI's `binary_output` matches the class's expected output."""
+ """
+ Return True if a CLI's `binary_output` matches the class's expected
+ output.
+ """
assert cls.detect_binary_pattern is not None
return cls.detect_binary_pattern.match(binary_output) is not None
@@ -187,7 +195,10 @@ def is_installed(cls, binary_path: Optional[Path] = None) -> bool:
return binary is not None
def version(self) -> str:
- """Return the name and version of the CLI as reported by the CLI's version flag."""
+ """
+ Return the name and version of the CLI as reported by the CLI's version
+ flag.
+ """
if self.cached_version is None:
result = subprocess.run(
[str(self.binary), self.version_flag],
diff --git a/src/ethereum_clis/fixture_consumer_tool.py b/src/ethereum_clis/fixture_consumer_tool.py
index 1f958e2c139..3391c49a9ce 100644
--- a/src/ethereum_clis/fixture_consumer_tool.py
+++ b/src/ethereum_clis/fixture_consumer_tool.py
@@ -9,8 +9,8 @@
class FixtureConsumerTool(FixtureConsumer, EthereumCLI):
"""
- Fixture consumer tool abstract base class which should be inherited by all fixture consumer
- tool implementations.
+ Fixture consumer tool abstract base class which should be inherited by all
+ fixture consumer tool implementations.
"""
registered_tools: List[Type["FixtureConsumerTool"]] = []
diff --git a/src/ethereum_clis/tests/test_execution_specs.py b/src/ethereum_clis/tests/test_execution_specs.py
index 964820dd8a6..a748c9a5b31 100644
--- a/src/ethereum_clis/tests/test_execution_specs.py
+++ b/src/ethereum_clis/tests/test_execution_specs.py
@@ -22,10 +22,11 @@
@pytest.fixture(autouse=True)
def monkeypatch_path_for_entry_points(monkeypatch):
"""
- Monkeypatch the PATH to add the "bin" directory where entrypoints are installed.
+ Monkeypatch the PATH to add the "bin" directory where entrypoints are
+ installed.
- This would typically be in the venv in which pytest is running these tests and fill,
- which, with uv, is `./.venv/bin`.
+ This would typically be in the venv in which pytest is running these tests
+ and fill, which, with uv, is `./.venv/bin`.
This is required in order for fill to locate the ethereum-spec-evm-resolver
"binary" (entrypoint) when being executed using pytester.
@@ -98,7 +99,8 @@ def test_evm_tool_binary_arg(evm_tool, binary_arg):
elif binary_arg == "path_type":
evm_bin = which(DEFAULT_EVM_T8N_BINARY_NAME)
if not evm_bin:
- # typing: Path can not take None; but if it is None, we may as well fail explicitly.
+ # typing: Path can not take None; but if it is None, we may as well
+ # fail explicitly.
raise Exception("Failed to find `{DEFAULT_EVM_T8N_BINARY_NAME}` in the PATH via which")
evm_tool(binary=Path(evm_bin)).version()
return
@@ -143,7 +145,9 @@ def test_evm_t8n(
env: Environment,
test_dir: str,
) -> None:
- """Test the `evaluate` method of the `ExecutionSpecsTransitionTool` class."""
+ """
+ Test the `evaluate` method of the `ExecutionSpecsTransitionTool` class.
+ """
expected_path = Path(FIXTURES_ROOT, test_dir, "exp.json")
with open(expected_path, "r") as exp:
@@ -162,8 +166,9 @@ def test_evm_t8n(
)
assert to_json(t8n_output.alloc) == expected.get("alloc")
if isinstance(default_t8n, ExecutionSpecsTransitionTool):
- # The expected output was generated with geth, instead of deleting any info from
- # this expected output, the fields not returned by eels are handled here.
+ # The expected output was generated with geth, instead of deleting
+ # any info from this expected output, the fields not returned by
+ # eels are handled here.
missing_receipt_fields = [
"root",
"status",
diff --git a/src/ethereum_clis/tests/test_transition_tools_support.py b/src/ethereum_clis/tests/test_transition_tools_support.py
index 953af15744b..8c6f94278bc 100644
--- a/src/ethereum_clis/tests/test_transition_tools_support.py
+++ b/src/ethereum_clis/tests/test_transition_tools_support.py
@@ -44,7 +44,9 @@ def test_ci_multi_t8n_support(
installed_transition_tool_instances: Dict[str, TransitionTool | Exception],
running_in_ci: bool,
):
- """Check that the instances of t8n we expect in CI environment were found."""
+ """
+ Check that the instances of t8n we expect in CI environment were found.
+ """
names = set(installed_transition_tool_instances.keys())
expected_names = {"ExecutionSpecsTransitionTool"}
if running_in_ci:
diff --git a/src/ethereum_clis/transition_tool.py b/src/ethereum_clis/transition_tool.py
index c3fa04ec278..80e5ba3a85a 100644
--- a/src/ethereum_clis/transition_tool.py
+++ b/src/ethereum_clis/transition_tool.py
@@ -38,22 +38,24 @@
model_dump_config: Mapping = {"by_alias": True, "exclude_none": True}
-# TODO: reduce NORMAL_SERVER_TIMEOUT back down to 20 once BLS timeout issue is resolved:
-# https://github.com/ethereum/execution-spec-tests/issues/1894
+# TODO: reduce NORMAL_SERVER_TIMEOUT back down to 20 once BLS timeout issue is
+# resolved: https://github.com/ethereum/execution-spec-tests/issues/1894
NORMAL_SERVER_TIMEOUT = 600
SLOW_REQUEST_TIMEOUT = 600
def get_valid_transition_tool_names() -> set[str]:
- """Get all valid transition tool names from deployed and development forks."""
+ """
+ Get all valid transition tool names from deployed and development forks.
+ """
all_available_forks = get_forks() + get_development_forks()
return {fork.transition_tool_name() for fork in all_available_forks}
class TransitionTool(EthereumCLI):
"""
- Transition tool abstract base class which should be inherited by all transition tool
- implementations.
+ Transition tool abstract base class which should be inherited by all
+ transition tool implementations.
"""
traces: List[Traces] | None = None
@@ -80,7 +82,9 @@ def __init__(
binary: Optional[Path] = None,
trace: bool = False,
):
- """Abstract initialization method that all subclasses must implement."""
+ """
+ Abstract initialization method that all subclasses must implement.
+ """
assert exception_mapper is not None
self.exception_mapper = exception_mapper
super().__init__(binary=binary)
@@ -112,7 +116,9 @@ def reset_traces(self):
self.traces = None
def append_traces(self, new_traces: Traces):
- """Append a list of traces of a state transition to the current list."""
+ """
+ Append a list of traces of a state transition to the current list.
+ """
if self.traces is None:
self.traces = []
self.traces.append(new_traces)
@@ -127,7 +133,10 @@ def collect_traces(
temp_dir: tempfile.TemporaryDirectory,
debug_output_path: str = "",
) -> Traces:
- """Collect the traces from the t8n tool output and store them in the traces list."""
+ """
+ Collect the traces from the t8n tool output and store them in the
+ traces list.
+ """
traces: Traces = Traces(root=[])
temp_dir_path = Path(temp_dir.name)
for i, r in enumerate(receipts):
@@ -194,7 +203,10 @@ def _evaluate_filesystem(
t8n_data: TransitionToolData,
debug_output_path: str = "",
) -> TransitionToolOutput:
- """Execute a transition tool using the filesystem for its inputs and outputs."""
+ """
+ Execute a transition tool using the filesystem for its inputs and
+ outputs.
+ """
temp_dir = tempfile.TemporaryDirectory()
os.mkdir(os.path.join(temp_dir.name, "input"))
os.mkdir(os.path.join(temp_dir.name, "output"))
@@ -256,14 +268,16 @@ def _evaluate_filesystem(
t8n_call = t8n_call.replace(
os.path.dirname(file_path), os.path.join(debug_output_path, "input")
)
- t8n_call = t8n_call.replace( # use a new output path for basedir and outputs
+ # use a new output path for basedir and outputs
+ t8n_call = t8n_call.replace(
temp_dir.name,
t8n_output_base_dir,
)
t8n_script = textwrap.dedent(
f"""\
#!/bin/bash
- rm -rf {debug_output_path}/t8n.sh.out # hard-coded to avoid surprises
+ # hard-coded to avoid surprises
+ rm -rf {debug_output_path}/t8n.sh.out
mkdir -p {debug_output_path}/t8n.sh.out/output
{t8n_call}
"""
@@ -355,7 +369,9 @@ def _evaluate_server(
debug_output_path: str = "",
timeout: int,
) -> TransitionToolOutput:
- """Execute the transition tool sending inputs and outputs via a server."""
+ """
+ Execute the transition tool sending inputs and outputs via a server.
+ """
request_data = t8n_data.get_request_data()
request_data_json = request_data.model_dump(mode="json", **model_dump_config)
@@ -424,7 +440,10 @@ def _evaluate_stream(
t8n_data: TransitionToolData,
debug_output_path: str = "",
) -> TransitionToolOutput:
- """Execute a transition tool using stdin and stdout for its inputs and outputs."""
+ """
+ Execute a transition tool using stdin and stdout for its inputs and
+ outputs.
+ """
temp_dir = tempfile.TemporaryDirectory()
args = self.construct_args_stream(t8n_data, temp_dir)
@@ -468,7 +487,8 @@ def safe_t8n_args(
self, fork_name: str, chain_id: int, reward: int, temp_dir=None
) -> List[str]:
"""Safely construct t8n arguments with validated inputs."""
- # Validate fork name against actual transition tool names from all available forks
+ # Validate fork name against actual transition tool names from all
+ # available forks
valid_forks = get_valid_transition_tool_names()
if fork_name not in valid_forks:
raise ValueError(f"Invalid fork name: {fork_name}")
@@ -528,7 +548,9 @@ def dump_debug_stream(
args: List[str],
result: subprocess.CompletedProcess,
):
- """Export debug files if requested when interacting with t8n via streams."""
+ """
+ Export debug files if requested when interacting with t8n via streams.
+ """
if not debug_output_path:
return
@@ -539,8 +561,11 @@ def dump_debug_stream(
t8n_script = textwrap.dedent(
f"""\
#!/bin/bash
- rm -rf {debug_output_path}/t8n.sh.out # hard-coded to avoid surprises
- mkdir {debug_output_path}/t8n.sh.out # unused if tracing is not enabled
+ # hard-coded to avoid surprises
+ rm -rf {debug_output_path}/t8n.sh.out
+
+ # unused if tracing is not enabled
+ mkdir {debug_output_path}/t8n.sh.out
{t8n_call} < {debug_output_path}/stdin.txt
"""
)
diff --git a/src/ethereum_test_base_types/base_types.py b/src/ethereum_test_base_types/base_types.py
index d50155ac38a..041df926ca7 100644
--- a/src/ethereum_test_base_types/base_types.py
+++ b/src/ethereum_test_base_types/base_types.py
@@ -24,15 +24,18 @@
class ToStringSchema:
"""
- Type converter to add a simple pydantic schema that correctly
- parses and serializes the type.
+ Type converter to add a simple pydantic schema that correctly parses and
+ serializes the type.
"""
@staticmethod
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
@@ -86,7 +89,9 @@ def __new__(cls, input_number: NumberConvertible | Self):
@staticmethod
def _get_multiplier(unit: str) -> int:
- """Return the multiplier for the given unit of wei, handling synonyms."""
+ """
+ Return the multiplier for the given unit of wei, handling synonyms.
+ """
match unit:
case "wei":
return 1
@@ -117,7 +122,10 @@ def __str__(self) -> str:
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
@@ -143,7 +151,10 @@ def hex(self) -> str:
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
@@ -197,7 +208,10 @@ def sha256(self) -> "Hash":
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
@@ -256,7 +270,10 @@ def hex(self) -> str:
def __get_pydantic_core_schema__(
cls: Type[Self], source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
pattern = f"^0x([0-9a-fA-F]{{{cls.byte_length * 2}}})*$"
return no_info_plain_validator_function(
source_type,
@@ -341,7 +358,10 @@ def __ne__(self, other: object) -> bool:
def __get_pydantic_core_schema__(
cls: Type[Self], source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call the class constructor without info and appends the serialization schema."""
+ """
+ Call the class constructor without info and appends the serialization
+ schema.
+ """
pattern = f"^0x([0-9a-fA-F]{{{cls.byte_length * 2}}})*$"
return no_info_plain_validator_function(
source_type,
@@ -351,7 +371,9 @@ def __get_pydantic_core_schema__(
class ForkHash(FixedSizeBytes[4]): # type: ignore
- """Class that helps represent the CRC config hashes and identifiers of a fork."""
+ """
+ Class that helps represent the CRC config hashes and identifiers of a fork.
+ """
pass
@@ -391,7 +413,8 @@ class StorageKey(FixedSizeBytes[32]): # type: ignore
def __new__(cls, value, **kwargs):
"""Create a new StorageKey with automatic left padding."""
- # Always apply left_padding for storage keys unless explicitly set to False
+ # Always apply left_padding for storage keys unless explicitly set to
+ # False
if "left_padding" not in kwargs:
kwargs["left_padding"] = True
return super().__new__(cls, value, **kwargs)
diff --git a/src/ethereum_test_base_types/composite_types.py b/src/ethereum_test_base_types/composite_types.py
index 035bfb2a88e..63fbe534ff6 100644
--- a/src/ethereum_test_base_types/composite_types.py
+++ b/src/ethereum_test_base_types/composite_types.py
@@ -41,7 +41,9 @@ class Storage(EthereumTestRootModel[Dict[StorageKeyValueType, StorageKeyValueTyp
@dataclass(kw_only=True)
class InvalidTypeError(Exception):
- """Invalid type used when describing test's expected storage key or value."""
+ """
+ Invalid type used when describing test's expected storage key or value.
+ """
key_or_value: Any
@@ -90,8 +92,8 @@ def __str__(self):
@dataclass(kw_only=True)
class KeyValueMismatchError(Exception):
"""
- Test expected a certain value in a storage key but value found
- was different.
+ Test expected a certain value in a storage key but value found was
+ different.
"""
address: Address
@@ -101,7 +103,10 @@ class KeyValueMismatchError(Exception):
hint: str
def __init__(self, address: Address, key: int, want: int, got: int, hint: str = "", *args):
- """Initialize the exception with the address, key, wanted and got values."""
+ """
+ Initialize the exception with the address, key, wanted and got
+ values.
+ """
super().__init__(args)
self.address = address
self.key = key
@@ -183,14 +188,17 @@ def items(self):
return self.root.items()
def set_expect_any(self, key: StorageKeyValueTypeConvertible | StorageKeyValueType):
- """Mark key to be able to have any expected value when comparing storages."""
+ """
+ Mark key to be able to have any expected value when comparing storages.
+ """
self._any_map[StorageKeyValueTypeAdapter.validate_python(key)] = True
def store_next(
self, value: StorageKeyValueTypeConvertible | StorageKeyValueType | bool, hint: str = ""
) -> StorageKeyValueType:
"""
- Store a value in the storage and returns the key where the value is stored.
+ Store a value in the storage and returns the key where the value is
+ stored.
Increments the key counter so the next time this function is called,
the next key is used.
@@ -208,10 +216,9 @@ def peek_slot(self) -> int:
def contains(self, other: "Storage") -> bool:
"""
- Return True if self contains all keys with equal value as
- contained by second storage.
- Used for comparison with test expected post state and alloc returned
- by the transition tool.
+ Return True if self contains all keys with equal value as contained by
+ second storage. Used for comparison with test expected post state and
+ alloc returned by the transition tool.
"""
for key in other.keys():
if key not in self:
@@ -222,11 +229,10 @@ def contains(self, other: "Storage") -> bool:
def must_contain(self, address: Address, other: "Storage"):
"""
- Succeeds only if self contains all keys with equal value as
- contained by second storage.
- Used for comparison with test expected post state and alloc returned
- by the transition tool.
- Raises detailed exception when a difference is found.
+ Succeeds only if self contains all keys with equal value as contained
+ by second storage. Used for comparison with test expected post state
+ and alloc returned by the transition tool. Raises detailed exception
+ when a difference is found.
"""
for key in other.keys():
if key not in self:
@@ -283,8 +289,9 @@ def must_be_equal(self, address: Address, other: "Storage | None"):
def canary(self) -> "Storage":
"""
- Return a canary storage filled with non-zero values where the current storage expects
- zero values, to guarantee that the test overwrites the storage.
+ Return a canary storage filled with non-zero values where the current
+ storage expects zero values, to guarantee that the test overwrites the
+ storage.
"""
return Storage({key: HashInt(0xBA5E) for key in self.keys() if self[key] == 0})
@@ -294,22 +301,15 @@ class Account(CamelModel):
nonce: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
"""
- The scalar value equal to a) the number of transactions sent by
- an Externally Owned Account, b) the amount of contracts created by a
- contract.
+ The scalar value equal to a) the number of transactions sent by an
+ Externally Owned Account, b) the amount of contracts created by a contract.
"""
balance: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
- """
- The amount of Wei (10-18 Eth) the account has.
- """
+ """The amount of Wei (10-18 Eth) the account has."""
code: Bytes = Bytes(b"")
- """
- Bytecode contained by the account.
- """
+ """Bytecode contained by the account."""
storage: Storage = Field(default_factory=Storage)
- """
- Storage within a contract.
- """
+ """Storage within a contract."""
NONEXISTENT: ClassVar[None] = None
"""
@@ -329,7 +329,9 @@ class NonceMismatchError(Exception):
got: int | None
def __init__(self, address: Address, want: int | None, got: int | None, *args):
- """Initialize the exception with the address, wanted and got values."""
+ """
+ Initialize the exception with the address, wanted and got values.
+ """
super().__init__(args)
self.address = address
self.want = want
@@ -348,8 +350,8 @@ def __str__(self):
@dataclass(kw_only=True)
class BalanceMismatchError(Exception):
"""
- Test expected a certain balance for an account but a different
- value was found.
+ Test expected a certain balance for an account but a different value
+ was found.
"""
address: Address
@@ -357,7 +359,9 @@ class BalanceMismatchError(Exception):
got: int | None
def __init__(self, address: Address, want: int | None, got: int | None, *args):
- """Initialize the exception with the address, wanted and got values."""
+ """
+ Initialize the exception with the address, wanted and got values.
+ """
super().__init__(args)
self.address = address
self.want = want
@@ -376,8 +380,8 @@ def __str__(self):
@dataclass(kw_only=True)
class CodeMismatchError(Exception):
"""
- Test expected a certain bytecode for an account but a different
- one was found.
+ Test expected a certain bytecode for an account but a different one was
+ found.
"""
address: Address
@@ -385,7 +389,9 @@ class CodeMismatchError(Exception):
got: bytes | None
def __init__(self, address: Address, want: bytes | None, got: bytes | None, *args):
- """Initialize the exception with the address, wanted and got values."""
+ """
+ Initialize the exception with the address, wanted and got values.
+ """
super().__init__(args)
self.address = address
self.want = want
diff --git a/src/ethereum_test_base_types/conversions.py b/src/ethereum_test_base_types/conversions.py
index e99743ba241..84fbb778959 100644
--- a/src/ethereum_test_base_types/conversions.py
+++ b/src/ethereum_test_base_types/conversions.py
@@ -60,12 +60,16 @@ def to_fixed_size_bytes(
"""
Convert multiple types into fixed-size bytes.
- :param input_bytes: The input data to convert.
- :param size: The size of the output bytes.
- :param left_padding: Whether to allow left-padding of the input data bytes using zeros. If the
- input data is an integer, padding is always performed.
- :param right_padding: Whether to allow right-padding of the input data bytes using zeros. If
- the input data is an integer, padding is always performed.
+ Args:
+ input_bytes: The input data to convert.
+ size: The size of the output bytes.
+ left_padding: Whether to allow left-padding of the input data bytes
+ using zeros. If the input data is an integer, padding is
+ always performed.
+ right_padding: Whether to allow right-padding of the input data bytes
+ using zeros. If the input data is an integer, padding
+ is always performed.
+
"""
if isinstance(input_bytes, int):
return int.to_bytes(input_bytes, length=size, byteorder="big", signed=input_bytes < 0)
diff --git a/src/ethereum_test_base_types/mixins.py b/src/ethereum_test_base_types/mixins.py
index f44eafcaa57..d5c1d4cf5fa 100644
--- a/src/ethereum_test_base_types/mixins.py
+++ b/src/ethereum_test_base_types/mixins.py
@@ -8,10 +8,10 @@
class ModelCustomizationsMixin:
"""
A mixin that customizes the behavior of pydantic models. Any pydantic
- configuration override that must apply to all models
- should be placed here.
+ configuration override that must apply to all models should be placed here.
- This mixin is applied to both `EthereumTestBaseModel` and `EthereumTestRootModel`.
+ This mixin is applied to both `EthereumTestBaseModel` and
+ `EthereumTestRootModel`.
"""
def serialize(
@@ -23,12 +23,18 @@ def serialize(
"""
Serialize the model to the specified format with the given parameters.
- :param mode: The mode of serialization.
- If mode is 'json', the output will only contain JSON serializable types.
- If mode is 'python', the output may contain non-JSON-serializable Python objects.
- :param by_alias: Whether to use aliases for field names.
- :param exclude_none: Whether to exclude fields with None values, default is True.
- :return: The serialized representation of the model.
+ Args:
+ mode: The mode of serialization. If mode is 'json', the output
+ will only contain JSON serializable types. If mode is
+ 'python', the output may contain non-JSON-serializable
+ Python objects.
+ by_alias: Whether to use aliases for field names.
+ exclude_none: Whether to exclude fields with None values,
+ default is True.
+
+ Returns:
+ dict[str, Any]: The serialized representation of the model.
+
"""
if not hasattr(self, "model_dump"):
raise NotImplementedError(
@@ -41,35 +47,44 @@ def __repr_args__(self):
"""
Generate a list of attribute-value pairs for the object representation.
- This method serializes the model, retrieves the attribute names,
- and constructs a list of tuples containing attribute names and their corresponding values.
- Only attributes with non-None values are included in the list.
+ This method serializes the model, retrieves the attribute names, and
+ constructs a list of tuples containing attribute names and their
+ corresponding values. Only attributes with non-None values are included
+ in the list.
- This method is used by the __repr__ method to generate the object representation,
- and is used by `gentest` module to generate the test cases.
+ This method is used by the __repr__ method to generate the object
+ representation, and is used by `gentest` module to generate the test
+ cases.
See:
- - https://pydantic-docs.helpmanual.io/usage/models/#custom-repr
- - https://github.com/ethereum/execution-spec-tests/pull/901#issuecomment-2443296835
+ https://pydantic-docs.helpmanual.io/usage/models/
+ #custom-repr
+
+ and
+
+ https://github.com/ethereum/execution-spec-tests/pull/
+ 901#issuecomment-24432968 35
Returns:
- List[Tuple[str, Any]]: A list of tuples where each tuple contains an attribute name
- and its corresponding non-None value.
+ List[Tuple[str, Any]]: A list of tuples where each tuple
+ contains an attribute name and its
+ corresponding non-None value.
"""
attrs_names = self.serialize(mode="python", by_alias=False).keys()
attrs = ((s, getattr(self, s)) for s in attrs_names)
- # Convert field values based on their type.
- # This ensures consistency between JSON and Python object representations.
- # Should a custom `__repr__` be needed for a specific type, it can added in the
- # match statement below.
- # Otherwise, the default string representation is used.
+ # Convert field values based on their type. This ensures consistency
+ # between JSON and Python object representations. Should a custom
+ # `__repr__` be needed for a specific type, it can added in the match
+ # statement below. Otherwise, the default string representation is
+ # used.
repr_attrs = []
for a, v in attrs:
match v:
# Note: The `None` case handles an edge case with transactions
- # see: https://github.com/ethereum/execution-spec-tests/pull/901#discussion_r1828491918 # noqa: E501
+ # see: https://github.com/ethereum/execution-spec-tests/pull/
+ # 901#discussion_r1828491918
case list() | dict() | BaseModel() | None:
repr_attrs.append((a, v))
case _:
diff --git a/src/ethereum_test_base_types/pydantic.py b/src/ethereum_test_base_types/pydantic.py
index 6471fe07fa3..6d58f1fcf95 100644
--- a/src/ethereum_test_base_types/pydantic.py
+++ b/src/ethereum_test_base_types/pydantic.py
@@ -27,7 +27,9 @@ class CopyValidateModel(EthereumTestBaseModel):
"""Model that supports copying with validation."""
def copy(self: Self, **kwargs) -> Self:
- """Create a copy of the model with the updated fields that are validated."""
+ """
+ Create a copy of the model with the updated fields that are validated.
+ """
return self.__class__(**(self.model_dump(exclude_unset=True) | kwargs))
@@ -35,8 +37,8 @@ class CamelModel(CopyValidateModel):
"""
A base model that converts field names to camel case when serializing.
- For example, the field name `current_timestamp` in a Python model will be represented
- as `currentTimestamp` when it is serialized to json.
+ For example, the field name `current_timestamp` in a Python model will be
+ represented as `currentTimestamp` when it is serialized to json.
"""
model_config = ConfigDict(
diff --git a/src/ethereum_test_base_types/reference_spec/reference_spec.py b/src/ethereum_test_base_types/reference_spec/reference_spec.py
index 96a0f493926..3bf36113522 100644
--- a/src/ethereum_test_base_types/reference_spec/reference_spec.py
+++ b/src/ethereum_test_base_types/reference_spec/reference_spec.py
@@ -36,7 +36,10 @@ def name(self) -> str:
@abstractmethod
def has_known_version(self) -> bool:
- """Return true if the reference spec object is hard-coded with a latest known version."""
+ """
+ Return true if the reference spec object is hard-coded with a latest
+ known version.
+ """
pass
@abstractmethod
@@ -46,7 +49,9 @@ def known_version(self) -> str:
@abstractmethod
def api_url(self) -> str:
- """Return the URL required to poll the version from an API, if needed."""
+ """
+ Return the URL required to poll the version from an API, if needed.
+ """
pass
@abstractmethod
@@ -64,13 +69,19 @@ def is_outdated(self) -> bool:
@abstractmethod
def write_info(self, info: Dict[str, Dict[str, Any] | str]):
- """Write info about the reference specification used into the output fixture."""
+ """
+ Write info about the reference specification used into the output
+ fixture.
+ """
pass
@staticmethod
@abstractmethod
def parseable_from_module(module_dict: Dict[str, Any]) -> bool:
- """Check whether the module's dict contains required reference spec information."""
+ """
+ Check whether the module's dict contains required reference spec
+ information.
+ """
pass
@staticmethod
diff --git a/src/ethereum_test_base_types/serialization.py b/src/ethereum_test_base_types/serialization.py
index 35aeaaf233e..f0f18f50848 100644
--- a/src/ethereum_test_base_types/serialization.py
+++ b/src/ethereum_test_base_types/serialization.py
@@ -36,26 +36,29 @@ class RLPSerializable:
def get_rlp_fields(self) -> List[str]:
"""
- Return an ordered list of field names to be included in RLP serialization.
+ Return an ordered list of field names to be included in RLP
+ serialization.
Function can be overridden to customize the logic to return the fields.
By default, rlp_fields class variable is used.
- The list can be nested list up to one extra level to represent nested fields.
+ The list can be nested list up to one extra level to represent nested
+ fields.
"""
return self.rlp_fields
def get_rlp_signing_fields(self) -> List[str]:
"""
- Return an ordered list of field names to be included in the RLP serialization of the object
- signature.
+ Return an ordered list of field names to be included in the RLP
+ serialization of the object signature.
Function can be overridden to customize the logic to return the fields.
By default, rlp_signing_fields class variable is used.
- The list can be nested list up to one extra level to represent nested fields.
+ The list can be nested list up to one extra level to represent nested
+ fields.
"""
return self.rlp_signing_fields
@@ -69,7 +72,8 @@ def get_rlp_prefix(self) -> bytes:
def get_rlp_signing_prefix(self) -> bytes:
"""
- Return a prefix that has to be appended to the serialized signing object.
+ Return a prefix that has to be appended to the serialized signing
+ object.
By default, an empty string is returned.
"""
@@ -117,8 +121,9 @@ def to_list(self, signing: bool = False) -> List[Any]:
field_list = self.get_rlp_signing_fields()
else:
if self.signable:
- # Automatically sign signable objects during full serialization:
- # Ensures nested objects have valid signatures in the final RLP.
+ # Automatically sign signable objects during full
+ # serialization: Ensures nested objects have valid signatures
+ # in the final RLP.
self.sign()
field_list = self.get_rlp_fields()
@@ -136,7 +141,9 @@ def rlp(self) -> Bytes:
class SignableRLPSerializable(RLPSerializable):
- """Class that adds RLP serialization to another class with signing support."""
+ """
+ Class that adds RLP serialization to another class with signing support.
+ """
signable: ClassVar[bool] = True
diff --git a/src/ethereum_test_base_types/tests/test_reference_spec.py b/src/ethereum_test_base_types/tests/test_reference_spec.py
index bd94814d07d..dbb15d89ccf 100644
--- a/src/ethereum_test_base_types/tests/test_reference_spec.py
+++ b/src/ethereum_test_base_types/tests/test_reference_spec.py
@@ -8,7 +8,8 @@
from ..reference_spec.git_reference_spec import GitReferenceSpec
from ..reference_spec.reference_spec import NoLatestKnownVersionError
-# the content field from https://api.github.com/repos/ethereum/EIPs/contents/EIPS/eip-100.md
+# the content field from
+# https://api.github.com/repos/ethereum/EIPs/contents/EIPS/eip-100.md
# as of 2023-08-29
response_content = "LS0tCmVpcDogMTAwCnRpdGxlOiBDaGFuZ2UgZGlmZmljdWx0eSBhZGp1c3Rt\
ZW50IHRvIHRhcmdldCBtZWFuIGJsb2NrIHRpbWUgaW5jbHVkaW5nIHVuY2xl\
diff --git a/src/ethereum_test_benchmark/__init__.py b/src/ethereum_test_benchmark/__init__.py
index 60f0e66a5fb..fb8b71a207f 100644
--- a/src/ethereum_test_benchmark/__init__.py
+++ b/src/ethereum_test_benchmark/__init__.py
@@ -1,4 +1,7 @@
-"""Benchmark code generator classes for creating optimized bytecode patterns."""
+"""
+Benchmark code generator classes for
+creating optimized bytecode patterns.
+"""
from .benchmark_code_generator import (
BenchmarkCodeGenerator,
diff --git a/src/ethereum_test_benchmark/benchmark_code_generator.py b/src/ethereum_test_benchmark/benchmark_code_generator.py
index 9c2c9b7814a..311b4132c3b 100644
--- a/src/ethereum_test_benchmark/benchmark_code_generator.py
+++ b/src/ethereum_test_benchmark/benchmark_code_generator.py
@@ -1,4 +1,7 @@
-"""Benchmark code generator classes for creating optimized bytecode patterns."""
+"""
+Benchmark code generator classes for creating
+optimized bytecode patterns.
+"""
from ethereum_test_forks import Fork
from ethereum_test_specs.benchmark import BenchmarkCodeGenerator
@@ -13,7 +16,8 @@ class JumpLoopGenerator(BenchmarkCodeGenerator):
def deploy_contracts(self, pre: Alloc, fork: Fork) -> None:
"""Deploy the looping contract."""
# Benchmark Test Structure:
- # setup + JUMPDEST + attack + attack + ... + attack + JUMP(setup_length)
+ # setup + JUMPDEST + attack + attack + ... +
+ # attack + JUMP(setup_length)
code = self.generate_repeated_code(self.attack_block, self.setup, fork)
self._contract_address = pre.deploy_contract(code=code)
@@ -30,13 +34,17 @@ def generate_transaction(self, pre: Alloc, gas_limit: int, fork: Fork) -> Transa
class ExtCallGenerator(BenchmarkCodeGenerator):
- """Generates bytecode that fills the contract to maximum allowed code size."""
+ """
+ Generates bytecode that fills the contract to
+ maximum allowed code size.
+ """
def deploy_contracts(self, pre: Alloc, fork: Fork) -> None:
"""Deploy both target and caller contracts."""
# Benchmark Test Structure:
# There are two contracts:
- # 1. The target contract that executes certain operation but not loop (e.g. PUSH)
+ # 1. The target contract that executes certain operation
+ # but not loop (e.g. PUSH)
# 2. The loop contract that calls the target contract in a loop
max_iterations = min(
@@ -49,8 +57,12 @@ def deploy_contracts(self, pre: Alloc, fork: Fork) -> None:
)
# Create caller contract that repeatedly calls the target contract
- # attack = POP(STATICCALL(GAS, target_contract_address, 0, 0, 0, 0))
- # setup + JUMPDEST + attack + attack + ... + attack + JUMP(setup_length)
+ # attack = POP(
+ # STATICCALL(GAS, target_contract_address, 0, 0, 0, 0)
+ # )
+ #
+ # setup + JUMPDEST + attack + attack + ... + attack +
+ # JUMP(setup_length)
code_sequence = Op.POP(Op.STATICCALL(Op.GAS, self._target_contract_address, 0, 0, 0, 0))
caller_code = self.generate_repeated_code(code_sequence, Bytecode(), fork)
diff --git a/src/ethereum_test_checklists/eip_checklist.py b/src/ethereum_test_checklists/eip_checklist.py
index 9a3b0b26cdb..1361965eb72 100644
--- a/src/ethereum_test_checklists/eip_checklist.py
+++ b/src/ethereum_test_checklists/eip_checklist.py
@@ -1,12 +1,12 @@
"""
EIP Testing Checklist Enum definitions.
-Note: This module includes a companion .pyi stub file that provides mypy type hints
-for making EIPChecklist classes callable. The stub file is auto-generated using:
- uv run generate_checklist_stubs
+Note: This module includes a companion .pyi stub file that provides mypy type
+hints for making EIPChecklist classes callable. The stub file is auto-generated
+using: uv run generate_checklist_stubs
-If you modify the EIPChecklist class structure, regenerate the stub file to maintain
-proper type checking support.
+If you modify the EIPChecklist class structure, regenerate the stub file to
+maintain proper type checking support.
"""
import re
@@ -16,9 +16,11 @@
def camel_to_snake(name: str) -> str:
"""Convert CamelCase to snake_case."""
- # Insert an underscore before any uppercase letter that follows a lowercase letter
+ # Insert an underscore before any uppercase letter that follows a lowercase
+ # letter
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
- # Insert an underscore before any uppercase letter that follows a lowercase letter or number
+ # Insert an underscore before any uppercase letter that follows a lowercase
+ # letter or number
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
@@ -73,8 +75,8 @@ def __repr__(cls) -> str:
def __call__(cls, *args, **kwargs):
"""Return a pytest mark decorator for the checklist item."""
- # If called with a function as the first argument (direct decorator usage)
- # and no other arguments, apply the decorator to the function
+ # If called with a function as the first argument (direct decorator
+ # usage) and no other arguments, apply the decorator to the function
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
func = args[0]
marker = pytest.mark.eip_checklist(cls._path)
@@ -93,20 +95,21 @@ class EIPChecklist:
"""
Main namespace for EIP testing checklist items.
- This class provides a structured way to reference checklist items for EIP testing.
- The class structure is automatically converted to callable pytest markers.
+ This class provides a structured way to reference checklist items for EIP
+ testing. The class structure is automatically converted to callable pytest
+ markers.
- Note: If you modify this class structure, regenerate the type stub file using:
- uv run generate_checklist_stubs
+ Note: If you modify this class structure, regenerate the type stub file
+ using: uv run generate_checklist_stubs
Examples:
- @EIPChecklist.Opcode.Test.GasUsage.Normal()
- def test_normal_gas():
- pass
+ @EIPChecklist.Opcode.Test.GasUsage.Normal()
+ def test_normal_gas():
+ pass
- @EIPChecklist.Opcode.Test.StackOverflow
- def test_stack_overflow():
- pass
+ @EIPChecklist.Opcode.Test.StackOverflow
+ def test_stack_overflow():
+ pass
"""
@@ -239,8 +242,9 @@ class Even(ChecklistItem):
class DataPortionVariables(ChecklistItem, override_name="data_portion_variables"):
"""
- If the opcode contains variables in its data portion, for each variable `n`
- of the opcode that accesses the nth stack item, test `n` being.
+ If the opcode contains variables in its data portion, for
+ each variable `n` of the opcode that accesses the nth stack
+ item, test `n` being.
"""
class Top(ChecklistItem):
@@ -1688,28 +1692,31 @@ class Test(ChecklistItem):
"""Test vectors for the new validity constraint."""
class ForkTransition(ChecklistItem):
- """Tests for the new transaction validity constraint on fork boundary."""
+ """
+ Tests for the new transaction validity constraint on fork
+ boundary.
+ """
class AcceptedBeforeFork(ChecklistItem):
"""
- Verify that a block before the activation fork is accepted even when the new
- constraint is not met.
+ Verify that a block before the activation fork is accepted
+ even when the new constraint is not met.
"""
pass
class AcceptedAfterFork(ChecklistItem):
"""
- Verify that a block after the activation fork is accepted when the new
- validity constraint is met.
+ Verify that a block after the activation fork is accepted
+ when the new validity constraint is met.
"""
pass
class RejectedAfterFork(ChecklistItem):
"""
- Verify that a block after the activation fork is rejected when the new
- validity constraint is not met.
+ Verify that a block after the activation fork is rejected
+ when the new validity constraint is not met.
"""
pass
@@ -1721,36 +1728,41 @@ class Test(ChecklistItem):
"""Test vectors for the modified validity constraint."""
class ForkTransition(ChecklistItem):
- """Tests for the modified transaction validity constraint on fork boundary."""
+ """
+ Tests for the modified transaction validity constraint on fork
+ boundary.
+ """
class AcceptedBeforeFork(ChecklistItem):
"""
- Verify that a block before the activation fork is accepted when the existing
- constraint is met and, ideally, the new constraint is not met.
+ Verify that a block before the activation fork is accepted
+ when the existing constraint is met and, ideally, the new
+ constraint is not met.
"""
pass
class RejectedBeforeFork(ChecklistItem):
"""
- Verify that a block before the activation fork is rejected when the existing
- constraint is not met and, ideally, the new constraint is met.
+ Verify that a block before the activation fork is rejected
+ when the existing constraint is not met and, ideally, the
+ new constraint is met.
"""
pass
class AcceptedAfterFork(ChecklistItem):
"""
- Verify that a block after the activation fork is accepted when the new
- validity constraint is met.
+ Verify that a block after the activation fork is accepted
+ when the new validity constraint is met.
"""
pass
class RejectedAfterFork(ChecklistItem):
"""
- Verify that a block after the activation fork is rejected when the new
- validity constraint is not met.
+ Verify that a block after the activation fork is rejected
+ when the new validity constraint is not met.
"""
pass
diff --git a/src/ethereum_test_checklists/eip_checklist.pyi b/src/ethereum_test_checklists/eip_checklist.pyi
index 2a0f37d22d1..6dd26539237 100644
--- a/src/ethereum_test_checklists/eip_checklist.pyi
+++ b/src/ethereum_test_checklists/eip_checklist.pyi
@@ -1,7 +1,8 @@
"""
Type stubs for EIP checklist - auto-generated.
-DO NOT EDIT MANUALLY - This file is generated by `uv run generate_checklist_stubs`
+DO NOT EDIT MANUALLY -
+This file is generated by `uv run generate_checklist_stubs`
"""
from typing import Any, Callable, TypeVar, overload
diff --git a/src/ethereum_test_checklists/tests/test_checklist_template_consistency.py b/src/ethereum_test_checklists/tests/test_checklist_template_consistency.py
index 69be5f2157a..f8a190ae578 100644
--- a/src/ethereum_test_checklists/tests/test_checklist_template_consistency.py
+++ b/src/ethereum_test_checklists/tests/test_checklist_template_consistency.py
@@ -33,7 +33,9 @@ def extract_markdown_ids(markdown_content: str) -> Set[str]:
def get_all_checklist_ids(obj) -> Set[str]:
- """Recursively extract all checklist IDs from EIPChecklist and its children."""
+ """
+ Recursively extract all checklist IDs from EIPChecklist and its children.
+ """
ids = set()
# Iterate through all attributes of the object
@@ -59,7 +61,9 @@ def get_all_checklist_ids(obj) -> Set[str]:
def test_checklist_template_consistency():
- """Test that all IDs in markdown template match EIPChecklist class exactly."""
+ """
+ Test that all IDs in markdown template match EIPChecklist class exactly.
+ """
# Read the markdown template
with open(TEMPLATE_PATH, "r", encoding="utf-8") as f:
markdown_content = f.read()
@@ -135,7 +139,10 @@ def test_id_extraction_functions():
def test_eip_checklist_decorator_usage():
- """Test EIPChecklist items work correctly as decorators both with and without parentheses."""
+ """
+ Test EIPChecklist items work correctly as decorators both with and without
+ parentheses.
+ """
# Test decorator with parentheses
@EIPChecklist.Opcode.Test.StackComplexOperations()
@@ -149,7 +156,8 @@ def test_function_with_parens():
assert len(eip_markers) == 1
assert eip_markers[0].args == ("opcode/test/stack_complex_operations",)
- # Test decorator without parentheses (direct usage - this is the key fix for issue #1)
+ # Test decorator without parentheses (direct usage - this is the key fix
+ # for issue #1)
@EIPChecklist.Opcode.Test.StackOverflow
def test_function_no_parens():
pass
@@ -192,6 +200,7 @@ def test_eip_checklist_pytest_param_usage():
with pytest.raises((TypeError, AssertionError)):
pytest.param(
"test_value",
- marks=EIPChecklist.Opcode.Test.StackOverflow, # Without () should fail
+ # Without () should fail
+ marks=EIPChecklist.Opcode.Test.StackOverflow,
id="should_fail",
)
diff --git a/src/ethereum_test_exceptions/exception_mapper.py b/src/ethereum_test_exceptions/exception_mapper.py
index 43d4d7af667..cf8e7b4e764 100644
--- a/src/ethereum_test_exceptions/exception_mapper.py
+++ b/src/ethereum_test_exceptions/exception_mapper.py
@@ -20,9 +20,11 @@ class ExceptionMapper(ABC):
mapping_substring: ClassVar[Dict[ExceptionBase, str]]
"""
- Mapping of exception to substring that should be present in the error message.
+ Mapping of exception to substring that should be present in the error
+ message.
- Items in this mapping are used for substring matching (`substring in message`).
+ Items in this mapping are used for substring matching (`substring in
+ message`).
"""
mapping_regex: ClassVar[Dict[ExceptionBase, str]]
@@ -34,13 +36,14 @@ class ExceptionMapper(ABC):
"""
reliable: ClassVar[bool] = True
"""
- Whether the exceptions returned by the tool are reliable and can be accurately
- mapped to the exceptions in this class.
+ Whether the exceptions returned by the tool are reliable and can be
+ accurately mapped to the exceptions in this class.
"""
def __init__(self) -> None:
"""Initialize the exception mapper."""
- # Ensure that the subclass has properly defined mapping_substring before accessing it
+ # Ensure that the subclass has properly defined mapping_substring
+ # before accessing it
assert self.mapping_substring is not None, "mapping_substring must be defined in subclass"
assert self.mapping_regex is not None, "mapping_regex must be defined in subclass"
self.mapper_name = self.__class__.__name__
@@ -66,8 +69,8 @@ def message_to_exception(
class ExceptionWithMessage(BaseModel, Generic[ExceptionBoundTypeVar]):
"""
- Class that contains the exception along with the verbatim message from the external
- tool/client.
+ Class that contains the exception along with the verbatim message from the
+ external tool/client.
"""
exceptions: List[ExceptionBoundTypeVar]
@@ -86,8 +89,8 @@ def __str__(self):
def mapper_validator(v: str, info: ValidationInfo) -> Dict[str, Any] | UndefinedException | None:
"""
- Use the exception mapper that must be included in the context to map the exception
- from the external tool.
+ Use the exception mapper that must be included in the context to map the
+ exception from the external tool.
"""
if v is None:
return v
@@ -110,17 +113,18 @@ def mapper_validator(v: str, info: ValidationInfo) -> Dict[str, Any] | Undefined
ExceptionMapperValidator = BeforeValidator(mapper_validator)
"""
-Validator that can be used to annotate a pydantic field in a model that is meant to be
-parsed from an external tool or client.
+Validator that can be used to annotate a pydantic field in a model that is
+meant to be parsed from an external tool or client.
-The annotated type must be an union that can include `None`, `UndefinedException` and a
-custom model as:
+The annotated type must be an union that can include `None`,
+`UndefinedException` and a custom model as:
```
class BlockExceptionWithMessage(ExceptionWithMessage[BlockException]):
pass
```
where `BlockException` can be any derivation of `ExceptionBase`.
-The `message` attribute is the verbatim message received from the external tool or client,
-and can be used to be printed for extra context information in case of failures.
+The `message` attribute is the verbatim message received from the external tool
+or client, and can be used to be printed for extra context information in case
+of failures.
"""
diff --git a/src/ethereum_test_exceptions/exceptions.py b/src/ethereum_test_exceptions/exceptions.py
index 437b5ad8f25..8de5ac505c3 100644
--- a/src/ethereum_test_exceptions/exceptions.py
+++ b/src/ethereum_test_exceptions/exceptions.py
@@ -25,7 +25,10 @@ def __init_subclass__(cls) -> None:
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call class constructor without info and appends the serialization schema."""
+ """
+ Call class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
cls.from_str,
serialization=to_string_ser_schema(),
@@ -104,7 +107,10 @@ def __new__(cls, value: str, *, mapper_name: str | None = None) -> "UndefinedExc
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call class constructor without info and appends the serialization schema."""
+ """
+ Call class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
cls,
serialization=to_string_ser_schema(),
@@ -114,195 +120,125 @@ def __get_pydantic_core_schema__(
@unique
class TransactionException(ExceptionBase):
"""
- Exception raised when a transaction is invalid, and thus cannot be executed.
+ Exception raised when a transaction is invalid, and thus cannot be
+ executed.
- If a transaction with any of these exceptions is included in a block, the block is invalid.
+ If a transaction with any of these exceptions is included in a block, the
+ block is invalid.
"""
TYPE_NOT_SUPPORTED = auto()
- """
- Transaction type is not supported on this chain configuration.
- """
+ """Transaction type is not supported on this chain configuration."""
SENDER_NOT_EOA = auto()
- """
- Transaction is coming from address that is not exist anymore.
- """
+ """Transaction is coming from address that is not exist anymore."""
ADDRESS_TOO_SHORT = auto()
- """
- Transaction `to` is not allowed to be less than 20 bytes.
- """
+ """Transaction `to` is not allowed to be less than 20 bytes."""
ADDRESS_TOO_LONG = auto()
- """
- Transaction `to` is not allowed to be more than 20 bytes.
- """
+ """Transaction `to` is not allowed to be more than 20 bytes."""
NONCE_MISMATCH_TOO_HIGH = auto()
- """
- Transaction nonce > sender.nonce.
- """
+ """Transaction nonce > sender.nonce."""
NONCE_MISMATCH_TOO_LOW = auto()
- """
- Transaction nonce < sender.nonce.
- """
+ """Transaction nonce < sender.nonce."""
NONCE_TOO_BIG = auto()
"""
- Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably TransactionTest).
+ Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably
+ TransactionTest).
"""
NONCE_IS_MAX = auto()
"""
- Transaction `nonce` is not allowed to be max_uint64 - 1 (this is StateTests).
+ Transaction `nonce` is not allowed to be max_uint64 - 1 (this is
+ StateTests).
"""
NONCE_OVERFLOW = auto()
- """
- Transaction `nonce` is not allowed to be more than uint64.
- """
+ """Transaction `nonce` is not allowed to be more than uint64."""
GASLIMIT_OVERFLOW = auto()
- """
- Transaction gaslimit exceeds 2^64-1 maximum value.
- """
+ """Transaction gaslimit exceeds 2^64-1 maximum value."""
VALUE_OVERFLOW = auto()
- """
- Transaction value exceeds 2^256-1 maximum value.
- """
+ """Transaction value exceeds 2^256-1 maximum value."""
GASPRICE_OVERFLOW = auto()
- """
- Transaction gasPrice exceeds 2^256-1 maximum value.
- """
+ """Transaction gasPrice exceeds 2^256-1 maximum value."""
GASLIMIT_PRICE_PRODUCT_OVERFLOW = auto()
- """
- Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value.
- """
+ """Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value."""
INVALID_SIGNATURE_VRS = auto()
- """
- Invalid transaction v, r, s values.
- """
+ """Invalid transaction v, r, s values."""
RLP_INVALID_SIGNATURE_R = auto()
- """
- Error reading transaction signature R value.
- """
+ """Error reading transaction signature R value."""
RLP_INVALID_SIGNATURE_S = auto()
- """
- Error reading transaction signature S value.
- """
+ """Error reading transaction signature S value."""
RLP_LEADING_ZEROS_GASLIMIT = auto()
- """
- Error reading transaction gaslimit field RLP.
- """
+ """Error reading transaction gaslimit field RLP."""
RLP_LEADING_ZEROS_GASPRICE = auto()
- """
- Error reading transaction gasprice field RLP.
- """
+ """Error reading transaction gasprice field RLP."""
RLP_LEADING_ZEROS_VALUE = auto()
- """
- Error reading transaction value field RLP.
- """
+ """Error reading transaction value field RLP."""
RLP_LEADING_ZEROS_NONCE = auto()
- """
- Error reading transaction nonce field RLP.
- """
+ """Error reading transaction nonce field RLP."""
RLP_LEADING_ZEROS_R = auto()
- """
- Error reading transaction signature R field RLP.
- """
+ """Error reading transaction signature R field RLP."""
RLP_LEADING_ZEROS_S = auto()
- """
- Error reading transaction signature S field RLP.
- """
+ """Error reading transaction signature S field RLP."""
RLP_LEADING_ZEROS_V = auto()
- """
- Error reading transaction signature V field RLP.
- """
+ """Error reading transaction signature V field RLP."""
RLP_LEADING_ZEROS_BASEFEE = auto()
- """
- Error reading transaction basefee field RLP.
- """
+ """Error reading transaction basefee field RLP."""
RLP_LEADING_ZEROS_PRIORITY_FEE = auto()
- """
- Error reading transaction priority fee field RLP.
- """
+ """Error reading transaction priority fee field RLP."""
RLP_LEADING_ZEROS_DATA_SIZE = auto()
"""
- Error reading transaction data field RLP, (rlp field length has leading zeros).
+ Error reading transaction data field RLP, (rlp field length has leading
+ zeros).
"""
RLP_LEADING_ZEROS_NONCE_SIZE = auto()
"""
- Error reading transaction nonce field RLP, (rlp field length has leading zeros).
+ Error reading transaction nonce field RLP, (rlp field length has leading
+ zeros).
"""
RLP_TOO_FEW_ELEMENTS = auto()
"""
- Error reading transaction RLP, structure has too few elements than expected.
+ Error reading transaction RLP, structure has too few elements than
+ expected.
"""
RLP_TOO_MANY_ELEMENTS = auto()
"""
- Error reading transaction RLP, structure has too many elements than expected.
+ Error reading transaction RLP, structure has too many elements than
+ expected.
"""
RLP_ERROR_EOF = auto()
- """
- Error reading transaction RLP, rlp stream unexpectedly finished.
- """
+ """Error reading transaction RLP, rlp stream unexpectedly finished."""
RLP_ERROR_SIZE = auto()
- """
- Error reading transaction RLP, rlp size is invalid.
- """
+ """Error reading transaction RLP, rlp size is invalid."""
RLP_ERROR_SIZE_LEADING_ZEROS = auto()
- """
- Error reading transaction RLP, field size has leading zeros.
- """
+ """Error reading transaction RLP, field size has leading zeros."""
INVALID_CHAINID = auto()
- """
- Transaction chain id encoding is incorrect.
- """
+ """Transaction chain id encoding is incorrect."""
RLP_INVALID_DATA = auto()
- """
- Transaction data field is invalid rlp.
- """
+ """Transaction data field is invalid rlp."""
RLP_INVALID_GASLIMIT = auto()
- """
- Transaction gaslimit field is invalid rlp.
- """
+ """Transaction gaslimit field is invalid rlp."""
RLP_INVALID_NONCE = auto()
- """
- Transaction nonce field is invalid rlp.
- """
+ """Transaction nonce field is invalid rlp."""
RLP_INVALID_TO = auto()
- """
- Transaction to field is invalid rlp.
- """
+ """Transaction to field is invalid rlp."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_LONG = auto()
- """
- Transaction access list address is > 20 bytes.
- """
+ """Transaction access list address is > 20 bytes."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_SHORT = auto()
- """
- Transaction access list address is < 20 bytes.
- """
+ """Transaction access list address is < 20 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_LONG = auto()
- """
- Transaction access list storage hash > 32 bytes.
- """
+ """Transaction access list storage hash > 32 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_SHORT = auto()
- """
- Transaction access list storage hash < 32 bytes.
- """
+ """Transaction access list storage hash < 32 bytes."""
RLP_INVALID_HEADER = auto()
- """
- Transaction failed to read from RLP as rlp header is invalid.
- """
+ """Transaction failed to read from RLP as rlp header is invalid."""
RLP_INVALID_VALUE = auto()
- """
- Transaction value field is invalid rlp/structure.
- """
+ """Transaction value field is invalid rlp/structure."""
EC_RECOVERY_FAIL = auto()
- """
- Transaction has correct signature, but ec recovery failed.
- """
+ """Transaction has correct signature, but ec recovery failed."""
INSUFFICIENT_ACCOUNT_FUNDS = auto()
"""
Transaction's sender does not have enough funds to pay for the transaction.
"""
INSUFFICIENT_MAX_FEE_PER_GAS = auto()
- """
- Transaction's max-fee-per-gas is lower than the block base-fee.
- """
+ """Transaction's max-fee-per-gas is lower than the block base-fee."""
PRIORITY_OVERFLOW = auto()
"""
Transaction's max-priority-fee-per-gas is exceeds 2^256-1 maximum value.
@@ -313,88 +249,59 @@ class TransactionException(ExceptionBase):
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS_2 = auto()
"""
- Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas (TransactionTests).
+ Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas
+ (TransactionTests).
"""
INSUFFICIENT_MAX_FEE_PER_BLOB_GAS = auto()
"""
- Transaction's max-fee-per-blob-gas is lower than the block's blob-gas price.
+ Transaction's max-fee-per-blob-gas is lower than the block's blob-gas
+ price.
"""
INTRINSIC_GAS_TOO_LOW = auto()
- """
- Transaction's gas limit is too low.
- """
+ """Transaction's gas limit is too low."""
INTRINSIC_GAS_BELOW_FLOOR_GAS_COST = auto()
- """
- Transaction's gas limit is below the floor gas cost.
- """
+ """Transaction's gas limit is below the floor gas cost."""
INITCODE_SIZE_EXCEEDED = auto()
"""
Transaction's initcode for a contract-creating transaction is too large.
"""
TYPE_3_TX_PRE_FORK = auto()
- """
- Transaction type 3 included before activation fork.
- """
+ """Transaction type 3 included before activation fork."""
TYPE_3_TX_ZERO_BLOBS_PRE_FORK = auto()
- """
- Transaction type 3, with zero blobs, included before activation fork.
- """
+ """Transaction type 3, with zero blobs, included before activation fork."""
TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH = auto()
- """
- Transaction contains a blob versioned hash with an invalid version.
- """
+ """Transaction contains a blob versioned hash with an invalid version."""
TYPE_3_TX_WITH_FULL_BLOBS = auto()
- """
- Transaction contains full blobs (network-version of the transaction).
- """
+ """Transaction contains full blobs (network-version of the transaction)."""
TYPE_3_TX_BLOB_COUNT_EXCEEDED = auto()
- """
- Transaction contains too many blob versioned hashes.
- """
+ """Transaction contains too many blob versioned hashes."""
TYPE_3_TX_CONTRACT_CREATION = auto()
- """
- Transaction is a type 3 transaction and has an empty `to`.
- """
+ """Transaction is a type 3 transaction and has an empty `to`."""
TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED = auto()
- """
- Transaction causes block to go over blob gas limit.
- """
+ """Transaction causes block to go over blob gas limit."""
GAS_ALLOWANCE_EXCEEDED = auto()
- """
- Transaction causes block to go over blob gas limit.
- """
+ """Transaction causes block to go over blob gas limit."""
GAS_LIMIT_EXCEEDS_MAXIMUM = auto()
"""
Transaction gas limit exceeds the maximum allowed limit of 30 million.
"""
TYPE_3_TX_ZERO_BLOBS = auto()
- """
- Transaction is type 3, but has no blobs.
- """
+ """Transaction is type 3, but has no blobs."""
TYPE_4_EMPTY_AUTHORIZATION_LIST = auto()
- """
- Transaction is type 4, but has an empty authorization list.
- """
+ """Transaction is type 4, but has an empty authorization list."""
TYPE_4_INVALID_AUTHORITY_SIGNATURE = auto()
- """
- Transaction authority signature is invalid
- """
+ """Transaction authority signature is invalid"""
TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH = auto()
- """
- Transaction authority signature is invalid
- """
+ """Transaction authority signature is invalid"""
TYPE_4_TX_CONTRACT_CREATION = auto()
- """
- Transaction is a type 4 transaction and has an empty `to`.
- """
+ """Transaction is a type 4 transaction and has an empty `to`."""
TYPE_4_INVALID_AUTHORIZATION_FORMAT = auto()
"""
- Transaction is type 4, but contains an authorization that has an invalid format.
+ Transaction is type 4, but contains an authorization that has an invalid
+ format.
"""
TYPE_4_TX_PRE_FORK = auto()
- """
- Transaction type 4 included before activation fork.
- """
+ """Transaction type 4 included before activation fork."""
@unique
@@ -402,202 +309,158 @@ class BlockException(ExceptionBase):
"""
Exception raised when a block is invalid, but not due to a transaction.
- E.g. all transactions in the block are valid, and can be applied to the state, but the
- block header contains an invalid field.
+ E.g. all transactions in the block are valid, and can be applied to the
+ state, but the block header contains an invalid field.
"""
TOO_MANY_UNCLES = auto()
- """
- Block declares too many uncles over the allowed limit.
- """
+ """Block declares too many uncles over the allowed limit."""
UNCLE_IN_CHAIN = auto()
- """
- Block declares uncle header that is already imported into chain.
- """
+ """Block declares uncle header that is already imported into chain."""
UNCLE_IS_ANCESTOR = auto()
- """
- Block declares uncle header that is directly a parent of this block.
- """
+ """Block declares uncle header that is directly a parent of this block."""
UNCLE_IS_BROTHER = auto()
- """
- Block declares two similar uncle headers.
- """
+ """Block declares two similar uncle headers."""
UNCLE_PARENT_INCORRECT = auto()
- """
- Block declares uncle header that is an outdated block to be an uncle.
- """
+ """Block declares uncle header that is an outdated block to be an uncle."""
EXTRA_DATA_TOO_BIG = auto()
- """
- Block header's extra data >32 bytes.
- """
+ """Block header's extra data >32 bytes."""
EXTRA_DATA_INVALID_DAO = auto()
"""
Block header's extra data after dao fork must be a fixed pre defined hash.
"""
UNKNOWN_PARENT = auto()
"""
- Block header's parent hash does not correspond to any of existing blocks on chain.
+ Block header's parent hash does not correspond to any of existing blocks on
+ chain.
"""
UNCLE_UNKNOWN_PARENT = auto()
"""
- Uncle header's parent hash does not correspond to any of existing blocks on chain.
+ Uncle header's parent hash does not correspond to any of existing blocks on
+ chain.
"""
UNKNOWN_PARENT_ZERO = auto()
- """
- Block header's parent hash is zero hash.
- """
+ """Block header's parent hash is zero hash."""
GASLIMIT_TOO_BIG = auto()
- """
- Block header's gas limit > 0x7fffffffffffffff.
- """
+ """Block header's gas limit > 0x7fffffffffffffff."""
INVALID_BLOCK_NUMBER = auto()
- """
- Block header's number != parent header's number + 1.
- """
+ """Block header's number != parent header's number + 1."""
INVALID_BLOCK_TIMESTAMP_OLDER_THAN_PARENT = auto()
- """
- Block header's timestamp <= parent header's timestamp.
- """
+ """Block header's timestamp <= parent header's timestamp."""
INVALID_DIFFICULTY = auto()
"""
- Block header's difficulty does not match the difficulty formula calculated from previous block.
+ Block header's difficulty does not match the difficulty formula calculated
+ from previous block.
"""
INVALID_LOG_BLOOM = auto()
"""
- Block header's logs bloom hash does not match the actually computed log bloom.
+ Block header's logs bloom hash does not match the actually computed log
+ bloom.
"""
INVALID_STATE_ROOT = auto()
"""
- Block header's state root hash does not match the actually computed hash of the state.
+ Block header's state root hash does not match the actually computed hash of
+ the state.
"""
INVALID_RECEIPTS_ROOT = auto()
"""
- Block header's receipts root hash does not match the actually computed hash of receipts.
+ Block header's receipts root hash does not match the actually computed hash
+ of receipts.
"""
INVALID_TRANSACTIONS_ROOT = auto()
"""
- Block header's transactions root hash does not match the actually computed hash of tx tree.
+ Block header's transactions root hash does not match the actually computed
+ hash of tx tree.
"""
INVALID_UNCLES_HASH = auto()
"""
- Block header's uncle hash does not match the actually computed hash of block's uncles.
+ Block header's uncle hash does not match the actually computed hash of
+ block's uncles.
"""
GAS_USED_OVERFLOW = auto()
- """
- Block transactions consume more gas than block header allow.
- """
+ """Block transactions consume more gas than block header allow."""
INVALID_GASLIMIT = auto()
"""
- Block header's gas limit does not match the gas limit formula calculated from previous block.
+ Block header's gas limit does not match the gas limit formula calculated
+ from previous block.
"""
INVALID_BASEFEE_PER_GAS = auto()
- """
- Block header's base_fee_per_gas field is calculated incorrect.
- """
+ """Block header's base_fee_per_gas field is calculated incorrect."""
INVALID_GAS_USED = auto()
"""
Block header's actual gas used does not match the provided header's value
"""
INVALID_GAS_USED_ABOVE_LIMIT = auto()
- """
- Block header's gas used value is above the gas limit field's value.
- """
+ """Block header's gas used value is above the gas limit field's value."""
INVALID_WITHDRAWALS_ROOT = auto()
"""
Block header's withdrawals root does not match calculated withdrawals root.
"""
INCORRECT_BLOCK_FORMAT = auto()
"""
- Block's format is incorrect, contains invalid fields, is missing fields, or contains fields of
- a fork that is not active yet.
+ Block's format is incorrect, contains invalid fields, is missing fields, or
+ contains fields of a fork that is not active yet.
"""
BLOB_GAS_USED_ABOVE_LIMIT = auto()
- """
- Block's blob gas used in header is above the limit.
- """
+ """Block's blob gas used in header is above the limit."""
INCORRECT_BLOB_GAS_USED = auto()
- """
- Block's blob gas used in header is incorrect.
- """
+ """Block's blob gas used in header is incorrect."""
INCORRECT_EXCESS_BLOB_GAS = auto()
- """
- Block's excess blob gas in header is incorrect.
- """
+ """Block's excess blob gas in header is incorrect."""
INVALID_VERSIONED_HASHES = auto()
- """
- Incorrect number of versioned hashes in a payload.
- """
+ """Incorrect number of versioned hashes in a payload."""
RLP_STRUCTURES_ENCODING = auto()
"""
Block's rlp encoding is valid but ethereum structures in it are invalid.
"""
RLP_WITHDRAWALS_NOT_READ = auto()
- """
- Block's rlp encoding is missing withdrawals.
- """
+ """Block's rlp encoding is missing withdrawals."""
RLP_INVALID_FIELD_OVERFLOW_64 = auto()
- """
- One of block's fields rlp is overflow 2**64 value.
- """
+ """One of block's fields rlp is overflow 2**64 value."""
RLP_INVALID_ADDRESS = auto()
- """
- Block withdrawals address is rlp of invalid address != 20 bytes.
- """
+ """Block withdrawals address is rlp of invalid address != 20 bytes."""
RLP_BLOCK_LIMIT_EXCEEDED = auto()
- """
- Block's rlp encoding is larger than the allowed limit.
- """
+ """Block's rlp encoding is larger than the allowed limit."""
INVALID_REQUESTS = auto()
- """
- Block's requests are invalid.
- """
+ """Block's requests are invalid."""
IMPORT_IMPOSSIBLE_LEGACY = auto()
- """
- Legacy block import is impossible in this chain configuration.
- """
+ """Legacy block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_LEGACY_WRONG_PARENT = auto()
"""
- Legacy block import is impossible, trying to import on top of a block that is not legacy.
+ Legacy block import is impossible, trying to import on top of a block that
+ is not legacy.
"""
IMPORT_IMPOSSIBLE_LONDON_WRONG_PARENT = auto()
"""
Trying to import london (basefee) block on top of block that is not 1559.
"""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POW = auto()
- """
- Trying to import paris(merge) block with PoW enabled.
- """
+ """Trying to import paris(merge) block with PoW enabled."""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POS = auto()
"""
Trying to import paris(merge) block with PoS enabled before TTD is reached.
"""
IMPORT_IMPOSSIBLE_LONDON_OVER_PARIS = auto()
- """
- Trying to import london looking block over paris network (POS).
- """
+ """Trying to import london looking block over paris network (POS)."""
IMPORT_IMPOSSIBLE_PARIS_OVER_SHANGHAI = auto()
- """
- Trying to import paris block on top of shanghai block.
- """
+ """Trying to import paris block on top of shanghai block."""
IMPORT_IMPOSSIBLE_SHANGHAI = auto()
- """
- Shanghai block import is impossible in this chain configuration.
- """
+ """Shanghai block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_UNCLES_OVER_PARIS = auto()
"""
Trying to import a block after paris fork that has not empty uncles hash.
"""
IMPORT_IMPOSSIBLE_DIFFICULTY_OVER_PARIS = auto()
- """
- Trying to import a block after paris fork that has difficulty != 0.
- """
+ """Trying to import a block after paris fork that has difficulty != 0."""
SYSTEM_CONTRACT_EMPTY = auto()
"""
- A system contract address contains no code at the end of fork activation block.
+ A system contract address contains no code at the end of fork activation
+ block.
"""
SYSTEM_CONTRACT_CALL_FAILED = auto()
"""
- A system contract call at the end of block execution (from the system address) fails.
+ A system contract call at the end of block execution (from the system
+ address) fails.
"""
INVALID_BLOCK_HASH = auto()
"""
@@ -605,8 +468,8 @@ class BlockException(ExceptionBase):
"""
INVALID_DEPOSIT_EVENT_LAYOUT = auto()
"""
- Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but the layout
- of the event does not match the required layout.
+ Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but
+ the layout of the event does not match the required layout.
"""
@@ -615,218 +478,122 @@ class EOFException(ExceptionBase):
"""Exception raised when an EOF container is invalid."""
DEFAULT_EXCEPTION = auto()
- """
- Expect some exception, not yet known.
- """
+ """Expect some exception, not yet known."""
UNDEFINED_EXCEPTION = auto()
- """
- Indicates that exception string is not mapped to an exception enum.
- """
+ """Indicates that exception string is not mapped to an exception enum."""
UNDEFINED_INSTRUCTION = auto()
- """
- EOF container has undefined instruction in it's body code.
- """
+ """EOF container has undefined instruction in it's body code."""
UNKNOWN_VERSION = auto()
- """
- EOF container has an unknown version.
- """
+ """EOF container has an unknown version."""
INCOMPLETE_MAGIC = auto()
- """
- EOF container has not enough bytes to read magic.
- """
+ """EOF container has not enough bytes to read magic."""
INVALID_MAGIC = auto()
- """
- EOF container has not allowed magic version byte.
- """
+ """EOF container has not allowed magic version byte."""
INVALID_VERSION = auto()
- """
- EOF container version bytes mismatch.
- """
+ """EOF container version bytes mismatch."""
INVALID_NON_RETURNING_FLAG = auto()
- """
- EOF container's section has non-returning flag set incorrectly.
- """
+ """EOF container's section has non-returning flag set incorrectly."""
INVALID_RJUMP_DESTINATION = auto()
- """
- Code has RJUMP instruction with invalid parameters.
- """
+ """Code has RJUMP instruction with invalid parameters."""
MISSING_TYPE_HEADER = auto()
- """
- EOF container missing types section.
- """
+ """EOF container missing types section."""
INVALID_TYPE_SECTION_SIZE = auto()
- """
- EOF container types section has wrong size.
- """
+ """EOF container types section has wrong size."""
INVALID_TYPE_BODY = auto()
- """
- EOF container types body section bytes are wrong.
- """
+ """EOF container types body section bytes are wrong."""
MISSING_CODE_HEADER = auto()
- """
- EOF container missing code section.
- """
+ """EOF container missing code section."""
INVALID_CODE_SECTION = auto()
- """
- EOF container code section bytes are incorrect.
- """
+ """EOF container code section bytes are incorrect."""
INCOMPLETE_CODE_HEADER = auto()
- """
- EOF container code header missing bytes.
- """
+ """EOF container code header missing bytes."""
INCOMPLETE_DATA_HEADER = auto()
- """
- EOF container data header missing bytes.
- """
+ """EOF container data header missing bytes."""
ZERO_SECTION_SIZE = auto()
- """
- EOF container data header construction is wrong.
- """
+ """EOF container data header construction is wrong."""
MISSING_DATA_SECTION = auto()
- """
- EOF container missing data section
- """
+ """EOF container missing data section"""
INCOMPLETE_CONTAINER = auto()
- """
- EOF container bytes are incomplete.
- """
+ """EOF container bytes are incomplete."""
INVALID_SECTION_BODIES_SIZE = auto()
- """
- Sections bodies does not match sections headers.
- """
+ """Sections bodies does not match sections headers."""
TRAILING_BYTES = auto()
- """
- EOF container has bytes beyond data section.
- """
+ """EOF container has bytes beyond data section."""
MISSING_TERMINATOR = auto()
- """
- EOF container missing terminator bytes between header and body.
- """
+ """EOF container missing terminator bytes between header and body."""
MISSING_HEADERS_TERMINATOR = auto()
- """
- Some type of another exception about missing headers terminator.
- """
+ """Some type of another exception about missing headers terminator."""
INVALID_FIRST_SECTION_TYPE = auto()
- """
- EOF container header does not have types section first.
- """
+ """EOF container header does not have types section first."""
INCOMPLETE_SECTION_NUMBER = auto()
- """
- EOF container header has section that is missing declaration bytes.
- """
+ """EOF container header has section that is missing declaration bytes."""
INCOMPLETE_SECTION_SIZE = auto()
- """
- EOF container header has section that is defined incorrectly.
- """
+ """EOF container header has section that is defined incorrectly."""
TOO_MANY_CODE_SECTIONS = auto()
- """
- EOF container header has too many code sections.
- """
+ """EOF container header has too many code sections."""
MISSING_STOP_OPCODE = auto()
- """
- EOF container's code missing STOP bytecode at it's end.
- """
+ """EOF container's code missing STOP bytecode at it's end."""
INPUTS_OUTPUTS_NUM_ABOVE_LIMIT = auto()
- """
- EOF container code section inputs/outputs number is above the limit
- """
+ """EOF container code section inputs/outputs number is above the limit"""
UNREACHABLE_INSTRUCTIONS = auto()
- """
- EOF container's code have instructions that are unreachable.
- """
+ """EOF container's code have instructions that are unreachable."""
UNREACHABLE_CODE_SECTIONS = auto()
- """
- EOF container's body have code sections that are unreachable.
- """
+ """EOF container's body have code sections that are unreachable."""
STACK_UNDERFLOW = auto()
- """
- EOF container's code produces an stack underflow.
- """
+ """EOF container's code produces an stack underflow."""
STACK_OVERFLOW = auto()
- """
- EOF container's code produces an stack overflow.
- """
+ """EOF container's code produces an stack overflow."""
STACK_HEIGHT_MISMATCH = auto()
- """
- EOF container section stack height mismatch.
- """
+ """EOF container section stack height mismatch."""
MAX_STACK_INCREASE_ABOVE_LIMIT = auto()
- """
- EOF container's specified max stack increase is above the limit.
- """
+ """EOF container's specified max stack increase is above the limit."""
STACK_HIGHER_THAN_OUTPUTS = auto()
"""
- EOF container section stack height is higher than the outputs.
- when returning
+ EOF container section stack height is higher than the outputs. when
+ returning
"""
JUMPF_DESTINATION_INCOMPATIBLE_OUTPUTS = auto()
"""
- EOF container section JUMPF's to a destination section with incompatible outputs.
+ EOF container section JUMPF's to a destination section with incompatible
+ outputs.
"""
INVALID_MAX_STACK_INCREASE = auto()
"""
- EOF container section's specified max stack increase does not match the actual stack height.
+ EOF container section's specified max stack increase does not match the
+ actual stack height.
"""
INVALID_DATALOADN_INDEX = auto()
- """
- A DATALOADN instruction has out-of-bounds index for the data section.
- """
+ """A DATALOADN instruction has out-of-bounds index for the data section."""
TRUNCATED_INSTRUCTION = auto()
- """
- EOF container's code section has truncated instruction.
- """
+ """EOF container's code section has truncated instruction."""
TOPLEVEL_CONTAINER_TRUNCATED = auto()
- """
- Top-level EOF container has data section truncated
- """
+ """Top-level EOF container has data section truncated"""
ORPHAN_SUBCONTAINER = auto()
- """
- EOF container has an unreferenced subcontainer.
- '"""
+ """EOF container has an unreferenced subcontainer. '"""
CONTAINER_SIZE_ABOVE_LIMIT = auto()
- """
- EOF container is above size limit
- """
+ """EOF container is above size limit"""
INVALID_CONTAINER_SECTION_INDEX = auto()
- """
- Instruction references container section that does not exist.
- """
+ """Instruction references container section that does not exist."""
INCOMPATIBLE_CONTAINER_KIND = auto()
- """
- Incompatible instruction found in a container of a specific kind.
- """
+ """Incompatible instruction found in a container of a specific kind."""
AMBIGUOUS_CONTAINER_KIND = auto()
- """
- The kind of a sub-container cannot be uniquely deduced.
- """
+ """The kind of a sub-container cannot be uniquely deduced."""
TOO_MANY_CONTAINERS = auto()
- """
- EOF container header has too many sub-containers.
- """
+ """EOF container header has too many sub-containers."""
INVALID_CODE_SECTION_INDEX = auto()
- """
- CALLF Operation refers to a non-existent code section
- """
+ """CALLF Operation refers to a non-existent code section"""
UNEXPECTED_HEADER_KIND = auto()
- """
- Header parsing encountered a section kind it wasn't expecting
- """
+ """Header parsing encountered a section kind it wasn't expecting"""
CALLF_TO_NON_RETURNING = auto()
- """
- CALLF instruction targeting a non-returning code section
- """
+ """CALLF instruction targeting a non-returning code section"""
EOFCREATE_WITH_TRUNCATED_CONTAINER = auto()
- """
- EOFCREATE with truncated container
- """
+ """EOFCREATE with truncated container"""
-"""
-Pydantic Annotated Types
-"""
+"""Pydantic Annotated Types"""
ExceptionInstanceOrList = Annotated[
List[TransactionException | BlockException] | TransactionException | BlockException,
diff --git a/src/ethereum_test_exceptions/exceptions/base.py b/src/ethereum_test_exceptions/exceptions/base.py
index 96b52929f6c..ebfb0d79e99 100644
--- a/src/ethereum_test_exceptions/exceptions/base.py
+++ b/src/ethereum_test_exceptions/exceptions/base.py
@@ -25,7 +25,10 @@ def __init_subclass__(cls) -> None:
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call class constructor without info and appends the serialization schema."""
+ """
+ Call class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
cls.from_str,
serialization=to_string_ser_schema(),
@@ -82,7 +85,10 @@ def __new__(cls, value: str, *, mapper_name: str | None = None) -> "UndefinedExc
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call class constructor without info and appends the serialization schema."""
+ """
+ Call class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
cls,
serialization=to_string_ser_schema(),
diff --git a/src/ethereum_test_exceptions/exceptions/block.py b/src/ethereum_test_exceptions/exceptions/block.py
index 4053b1938e8..8e7eae0b24b 100644
--- a/src/ethereum_test_exceptions/exceptions/block.py
+++ b/src/ethereum_test_exceptions/exceptions/block.py
@@ -10,202 +10,158 @@ class BlockException(ExceptionBase):
"""
Exception raised when a block is invalid, but not due to a transaction.
- E.g. all transactions in the block are valid, and can be applied to the state, but the
- block header contains an invalid field.
+ E.g. all transactions in the block are valid, and can be applied to the
+ state, but the block header contains an invalid field.
"""
TOO_MANY_UNCLES = auto()
- """
- Block declares too many uncles over the allowed limit.
- """
+ """Block declares too many uncles over the allowed limit."""
UNCLE_IN_CHAIN = auto()
- """
- Block declares uncle header that is already imported into chain.
- """
+ """Block declares uncle header that is already imported into chain."""
UNCLE_IS_ANCESTOR = auto()
- """
- Block declares uncle header that is directly a parent of this block.
- """
+ """Block declares uncle header that is directly a parent of this block."""
UNCLE_IS_BROTHER = auto()
- """
- Block declares two similar uncle headers.
- """
+ """Block declares two similar uncle headers."""
UNCLE_PARENT_INCORRECT = auto()
- """
- Block declares uncle header that is an outdated block to be an uncle.
- """
+ """Block declares uncle header that is an outdated block to be an uncle."""
EXTRA_DATA_TOO_BIG = auto()
- """
- Block header's extra data >32 bytes.
- """
+ """Block header's extra data >32 bytes."""
EXTRA_DATA_INVALID_DAO = auto()
"""
Block header's extra data after dao fork must be a fixed pre defined hash.
"""
UNKNOWN_PARENT = auto()
"""
- Block header's parent hash does not correspond to any of existing blocks on chain.
+ Block header's parent hash does not correspond to any of existing blocks on
+ chain.
"""
UNCLE_UNKNOWN_PARENT = auto()
"""
- Uncle header's parent hash does not correspond to any of existing blocks on chain.
+ Uncle header's parent hash does not correspond to any of existing blocks on
+ chain.
"""
UNKNOWN_PARENT_ZERO = auto()
- """
- Block header's parent hash is zero hash.
- """
+ """Block header's parent hash is zero hash."""
GASLIMIT_TOO_BIG = auto()
- """
- Block header's gas limit > 0x7fffffffffffffff.
- """
+ """Block header's gas limit > 0x7fffffffffffffff."""
INVALID_BLOCK_NUMBER = auto()
- """
- Block header's number != parent header's number + 1.
- """
+ """Block header's number != parent header's number + 1."""
INVALID_BLOCK_TIMESTAMP_OLDER_THAN_PARENT = auto()
- """
- Block header's timestamp <= parent header's timestamp.
- """
+ """Block header's timestamp <= parent header's timestamp."""
INVALID_DIFFICULTY = auto()
"""
- Block header's difficulty does not match the difficulty formula calculated from previous block.
+ Block header's difficulty does not match the difficulty formula calculated
+ from previous block.
"""
INVALID_LOG_BLOOM = auto()
"""
- Block header's logs bloom hash does not match the actually computed log bloom.
+ Block header's logs bloom hash does not match the actually computed log
+ bloom.
"""
INVALID_STATE_ROOT = auto()
"""
- Block header's state root hash does not match the actually computed hash of the state.
+ Block header's state root hash does not match the actually computed hash of
+ the state.
"""
INVALID_RECEIPTS_ROOT = auto()
"""
- Block header's receipts root hash does not match the actually computed hash of receipts.
+ Block header's receipts root hash does not match the actually computed hash
+ of receipts.
"""
INVALID_TRANSACTIONS_ROOT = auto()
"""
- Block header's transactions root hash does not match the actually computed hash of tx tree.
+ Block header's transactions root hash does not match the actually computed
+ hash of tx tree.
"""
INVALID_UNCLES_HASH = auto()
"""
- Block header's uncle hash does not match the actually computed hash of block's uncles.
+ Block header's uncle hash does not match the actually computed hash of
+ block's uncles.
"""
GAS_USED_OVERFLOW = auto()
- """
- Block transactions consume more gas than block header allow.
- """
+ """Block transactions consume more gas than block header allow."""
INVALID_GASLIMIT = auto()
"""
- Block header's gas limit does not match the gas limit formula calculated from previous block.
+ Block header's gas limit does not match the gas limit formula calculated
+ from previous block.
"""
INVALID_BASEFEE_PER_GAS = auto()
- """
- Block header's base_fee_per_gas field is calculated incorrect.
- """
+ """Block header's base_fee_per_gas field is calculated incorrect."""
INVALID_GAS_USED = auto()
"""
Block header's actual gas used does not match the provided header's value
"""
INVALID_GAS_USED_ABOVE_LIMIT = auto()
- """
- Block header's gas used value is above the gas limit field's value.
- """
+ """Block header's gas used value is above the gas limit field's value."""
INVALID_WITHDRAWALS_ROOT = auto()
"""
Block header's withdrawals root does not match calculated withdrawals root.
"""
INCORRECT_BLOCK_FORMAT = auto()
"""
- Block's format is incorrect, contains invalid fields, is missing fields, or contains fields of
- a fork that is not active yet.
+ Block's format is incorrect, contains invalid fields, is missing fields, or
+ contains fields of a fork that is not active yet.
"""
BLOB_GAS_USED_ABOVE_LIMIT = auto()
- """
- Block's blob gas used in header is above the limit.
- """
+ """Block's blob gas used in header is above the limit."""
INCORRECT_BLOB_GAS_USED = auto()
- """
- Block's blob gas used in header is incorrect.
- """
+ """Block's blob gas used in header is incorrect."""
INCORRECT_EXCESS_BLOB_GAS = auto()
- """
- Block's excess blob gas in header is incorrect.
- """
+ """Block's excess blob gas in header is incorrect."""
INVALID_VERSIONED_HASHES = auto()
- """
- Incorrect number of versioned hashes in a payload.
- """
+ """Incorrect number of versioned hashes in a payload."""
RLP_STRUCTURES_ENCODING = auto()
"""
Block's rlp encoding is valid but ethereum structures in it are invalid.
"""
RLP_WITHDRAWALS_NOT_READ = auto()
- """
- Block's rlp encoding is missing withdrawals.
- """
+ """Block's rlp encoding is missing withdrawals."""
RLP_INVALID_FIELD_OVERFLOW_64 = auto()
- """
- One of block's fields rlp is overflow 2**64 value.
- """
+ """One of block's fields rlp is overflow 2**64 value."""
RLP_INVALID_ADDRESS = auto()
- """
- Block withdrawals address is rlp of invalid address != 20 bytes.
- """
+ """Block withdrawals address is rlp of invalid address != 20 bytes."""
RLP_BLOCK_LIMIT_EXCEEDED = auto()
- """
- Block's rlp encoding is larger than the allowed limit.
- """
+ """Block's rlp encoding is larger than the allowed limit."""
INVALID_REQUESTS = auto()
- """
- Block's requests are invalid.
- """
+ """Block's requests are invalid."""
IMPORT_IMPOSSIBLE_LEGACY = auto()
- """
- Legacy block import is impossible in this chain configuration.
- """
+ """Legacy block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_LEGACY_WRONG_PARENT = auto()
"""
- Legacy block import is impossible, trying to import on top of a block that is not legacy.
+ Legacy block import is impossible, trying to import on top of a block that
+ is not legacy.
"""
IMPORT_IMPOSSIBLE_LONDON_WRONG_PARENT = auto()
"""
Trying to import london (basefee) block on top of block that is not 1559.
"""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POW = auto()
- """
- Trying to import paris(merge) block with PoW enabled.
- """
+ """Trying to import paris(merge) block with PoW enabled."""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POS = auto()
"""
Trying to import paris(merge) block with PoS enabled before TTD is reached.
"""
IMPORT_IMPOSSIBLE_LONDON_OVER_PARIS = auto()
- """
- Trying to import london looking block over paris network (POS).
- """
+ """Trying to import london looking block over paris network (POS)."""
IMPORT_IMPOSSIBLE_PARIS_OVER_SHANGHAI = auto()
- """
- Trying to import paris block on top of shanghai block.
- """
+ """Trying to import paris block on top of shanghai block."""
IMPORT_IMPOSSIBLE_SHANGHAI = auto()
- """
- Shanghai block import is impossible in this chain configuration.
- """
+ """Shanghai block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_UNCLES_OVER_PARIS = auto()
"""
Trying to import a block after paris fork that has not empty uncles hash.
"""
IMPORT_IMPOSSIBLE_DIFFICULTY_OVER_PARIS = auto()
- """
- Trying to import a block after paris fork that has difficulty != 0.
- """
+ """Trying to import a block after paris fork that has difficulty != 0."""
SYSTEM_CONTRACT_EMPTY = auto()
"""
- A system contract address contains no code at the end of fork activation block.
+ A system contract address contains no code at the end of fork activation
+ block.
"""
SYSTEM_CONTRACT_CALL_FAILED = auto()
"""
- A system contract call at the end of block execution (from the system address) fails.
+ A system contract call at the end of block execution (from the system
+ address) fails.
"""
INVALID_BLOCK_HASH = auto()
"""
@@ -213,21 +169,18 @@ class BlockException(ExceptionBase):
"""
INVALID_DEPOSIT_EVENT_LAYOUT = auto()
"""
- Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but the layout
- of the event does not match the required layout.
+ Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but
+ the layout of the event does not match the required layout.
"""
# --- Block-Level Access Lists (EIP-7928) --- #
INVALID_BLOCK_ACCESS_LIST = auto()
- """
- Block's access list is invalid.
- """
+ """Block's access list is invalid."""
INVALID_BAL_HASH = auto()
- """
- Block header's BAL hash does not match the computed BAL hash.
- """
+ """Block header's BAL hash does not match the computed BAL hash."""
INVALID_BAL_EXTRA_ACCOUNT = auto()
"""
- Block BAL contains an account change that is not present in the computed BAL.
+ Block BAL contains an account change that is not present in the computed
+ BAL.
"""
INVALID_BAL_MISSING_ACCOUNT = auto()
"""
diff --git a/src/ethereum_test_exceptions/exceptions/eof.py b/src/ethereum_test_exceptions/exceptions/eof.py
index 03df7faf649..0e9c0c5f6c6 100644
--- a/src/ethereum_test_exceptions/exceptions/eof.py
+++ b/src/ethereum_test_exceptions/exceptions/eof.py
@@ -10,210 +10,116 @@ class EOFException(ExceptionBase):
"""Exception raised when an EOF container is invalid."""
DEFAULT_EXCEPTION = auto()
- """
- Expect some exception, not yet known.
- """
+ """Expect some exception, not yet known."""
UNDEFINED_EXCEPTION = auto()
- """
- Indicates that exception string is not mapped to an exception enum.
- """
+ """Indicates that exception string is not mapped to an exception enum."""
UNDEFINED_INSTRUCTION = auto()
- """
- EOF container has undefined instruction in it's body code.
- """
+ """EOF container has undefined instruction in it's body code."""
UNKNOWN_VERSION = auto()
- """
- EOF container has an unknown version.
- """
+ """EOF container has an unknown version."""
INCOMPLETE_MAGIC = auto()
- """
- EOF container has not enough bytes to read magic.
- """
+ """EOF container has not enough bytes to read magic."""
INVALID_MAGIC = auto()
- """
- EOF container has not allowed magic version byte.
- """
+ """EOF container has not allowed magic version byte."""
INVALID_VERSION = auto()
- """
- EOF container version bytes mismatch.
- """
+ """EOF container version bytes mismatch."""
INVALID_NON_RETURNING_FLAG = auto()
- """
- EOF container's section has non-returning flag set incorrectly.
- """
+ """EOF container's section has non-returning flag set incorrectly."""
INVALID_RJUMP_DESTINATION = auto()
- """
- Code has RJUMP instruction with invalid parameters.
- """
+ """Code has RJUMP instruction with invalid parameters."""
MISSING_TYPE_HEADER = auto()
- """
- EOF container missing types section.
- """
+ """EOF container missing types section."""
INVALID_TYPE_SECTION_SIZE = auto()
- """
- EOF container types section has wrong size.
- """
+ """EOF container types section has wrong size."""
INVALID_TYPE_BODY = auto()
- """
- EOF container types body section bytes are wrong.
- """
+ """EOF container types body section bytes are wrong."""
MISSING_CODE_HEADER = auto()
- """
- EOF container missing code section.
- """
+ """EOF container missing code section."""
INVALID_CODE_SECTION = auto()
- """
- EOF container code section bytes are incorrect.
- """
+ """EOF container code section bytes are incorrect."""
INCOMPLETE_CODE_HEADER = auto()
- """
- EOF container code header missing bytes.
- """
+ """EOF container code header missing bytes."""
INCOMPLETE_DATA_HEADER = auto()
- """
- EOF container data header missing bytes.
- """
+ """EOF container data header missing bytes."""
ZERO_SECTION_SIZE = auto()
- """
- EOF container data header construction is wrong.
- """
+ """EOF container data header construction is wrong."""
MISSING_DATA_SECTION = auto()
- """
- EOF container missing data section
- """
+ """EOF container missing data section"""
INCOMPLETE_CONTAINER = auto()
- """
- EOF container bytes are incomplete.
- """
+ """EOF container bytes are incomplete."""
INVALID_SECTION_BODIES_SIZE = auto()
- """
- Sections bodies does not match sections headers.
- """
+ """Sections bodies does not match sections headers."""
TRAILING_BYTES = auto()
- """
- EOF container has bytes beyond data section.
- """
+ """EOF container has bytes beyond data section."""
MISSING_TERMINATOR = auto()
- """
- EOF container missing terminator bytes between header and body.
- """
+ """EOF container missing terminator bytes between header and body."""
MISSING_HEADERS_TERMINATOR = auto()
- """
- Some type of another exception about missing headers terminator.
- """
+ """Some type of another exception about missing headers terminator."""
INVALID_FIRST_SECTION_TYPE = auto()
- """
- EOF container header does not have types section first.
- """
+ """EOF container header does not have types section first."""
INCOMPLETE_SECTION_NUMBER = auto()
- """
- EOF container header has section that is missing declaration bytes.
- """
+ """EOF container header has section that is missing declaration bytes."""
INCOMPLETE_SECTION_SIZE = auto()
- """
- EOF container header has section that is defined incorrectly.
- """
+ """EOF container header has section that is defined incorrectly."""
TOO_MANY_CODE_SECTIONS = auto()
- """
- EOF container header has too many code sections.
- """
+ """EOF container header has too many code sections."""
MISSING_STOP_OPCODE = auto()
- """
- EOF container's code missing STOP bytecode at it's end.
- """
+ """EOF container's code missing STOP bytecode at it's end."""
INPUTS_OUTPUTS_NUM_ABOVE_LIMIT = auto()
- """
- EOF container code section inputs/outputs number is above the limit
- """
+ """EOF container code section inputs/outputs number is above the limit"""
UNREACHABLE_INSTRUCTIONS = auto()
- """
- EOF container's code have instructions that are unreachable.
- """
+ """EOF container's code have instructions that are unreachable."""
UNREACHABLE_CODE_SECTIONS = auto()
- """
- EOF container's body have code sections that are unreachable.
- """
+ """EOF container's body have code sections that are unreachable."""
STACK_UNDERFLOW = auto()
- """
- EOF container's code produces an stack underflow.
- """
+ """EOF container's code produces an stack underflow."""
STACK_OVERFLOW = auto()
- """
- EOF container's code produces an stack overflow.
- """
+ """EOF container's code produces an stack overflow."""
STACK_HEIGHT_MISMATCH = auto()
- """
- EOF container section stack height mismatch.
- """
+ """EOF container section stack height mismatch."""
MAX_STACK_INCREASE_ABOVE_LIMIT = auto()
- """
- EOF container's specified max stack increase is above the limit.
- """
+ """EOF container's specified max stack increase is above the limit."""
STACK_HIGHER_THAN_OUTPUTS = auto()
"""
- EOF container section stack height is higher than the outputs.
- when returning
+ EOF container section stack height is higher than the outputs. when
+ returning
"""
JUMPF_DESTINATION_INCOMPATIBLE_OUTPUTS = auto()
"""
- EOF container section JUMPF's to a destination section with incompatible outputs.
+ EOF container section JUMPF's to a destination section with incompatible
+ outputs.
"""
INVALID_MAX_STACK_INCREASE = auto()
"""
- EOF container section's specified max stack increase does not match the actual stack height.
+ EOF container section's specified max stack increase does not match the
+ actual stack height.
"""
INVALID_DATALOADN_INDEX = auto()
- """
- A DATALOADN instruction has out-of-bounds index for the data section.
- """
+ """A DATALOADN instruction has out-of-bounds index for the data section."""
TRUNCATED_INSTRUCTION = auto()
- """
- EOF container's code section has truncated instruction.
- """
+ """EOF container's code section has truncated instruction."""
TOPLEVEL_CONTAINER_TRUNCATED = auto()
- """
- Top-level EOF container has data section truncated
- """
+ """Top-level EOF container has data section truncated"""
ORPHAN_SUBCONTAINER = auto()
- """
- EOF container has an unreferenced subcontainer.
- '"""
+ """EOF container has an unreferenced subcontainer. '"""
CONTAINER_SIZE_ABOVE_LIMIT = auto()
- """
- EOF container is above size limit
- """
+ """EOF container is above size limit"""
INVALID_CONTAINER_SECTION_INDEX = auto()
- """
- Instruction references container section that does not exist.
- """
+ """Instruction references container section that does not exist."""
INCOMPATIBLE_CONTAINER_KIND = auto()
- """
- Incompatible instruction found in a container of a specific kind.
- """
+ """Incompatible instruction found in a container of a specific kind."""
AMBIGUOUS_CONTAINER_KIND = auto()
- """
- The kind of a sub-container cannot be uniquely deduced.
- """
+ """The kind of a sub-container cannot be uniquely deduced."""
TOO_MANY_CONTAINERS = auto()
- """
- EOF container header has too many sub-containers.
- """
+ """EOF container header has too many sub-containers."""
INVALID_CODE_SECTION_INDEX = auto()
- """
- CALLF Operation refers to a non-existent code section
- """
+ """CALLF Operation refers to a non-existent code section"""
UNEXPECTED_HEADER_KIND = auto()
- """
- Header parsing encountered a section kind it wasn't expecting
- """
+ """Header parsing encountered a section kind it wasn't expecting"""
CALLF_TO_NON_RETURNING = auto()
- """
- CALLF instruction targeting a non-returning code section
- """
+ """CALLF instruction targeting a non-returning code section"""
EOFCREATE_WITH_TRUNCATED_CONTAINER = auto()
- """
- EOFCREATE with truncated container
- """
+ """EOFCREATE with truncated container"""
diff --git a/src/ethereum_test_exceptions/exceptions/transaction.py b/src/ethereum_test_exceptions/exceptions/transaction.py
index 4d3bbc97b86..ee67b0d55ef 100644
--- a/src/ethereum_test_exceptions/exceptions/transaction.py
+++ b/src/ethereum_test_exceptions/exceptions/transaction.py
@@ -8,195 +8,125 @@
@unique
class TransactionException(ExceptionBase):
"""
- Exception raised when a transaction is invalid, and thus cannot be executed.
+ Exception raised when a transaction is invalid, and thus cannot be
+ executed.
- If a transaction with any of these exceptions is included in a block, the block is invalid.
+ If a transaction with any of these exceptions is included in a block, the
+ block is invalid.
"""
TYPE_NOT_SUPPORTED = auto()
- """
- Transaction type is not supported on this chain configuration.
- """
+ """Transaction type is not supported on this chain configuration."""
SENDER_NOT_EOA = auto()
- """
- Transaction is coming from address that is not exist anymore.
- """
+ """Transaction is coming from address that is not exist anymore."""
ADDRESS_TOO_SHORT = auto()
- """
- Transaction `to` is not allowed to be less than 20 bytes.
- """
+ """Transaction `to` is not allowed to be less than 20 bytes."""
ADDRESS_TOO_LONG = auto()
- """
- Transaction `to` is not allowed to be more than 20 bytes.
- """
+ """Transaction `to` is not allowed to be more than 20 bytes."""
NONCE_MISMATCH_TOO_HIGH = auto()
- """
- Transaction nonce > sender.nonce.
- """
+ """Transaction nonce > sender.nonce."""
NONCE_MISMATCH_TOO_LOW = auto()
- """
- Transaction nonce < sender.nonce.
- """
+ """Transaction nonce < sender.nonce."""
NONCE_TOO_BIG = auto()
"""
- Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably TransactionTest).
+ Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably
+ TransactionTest).
"""
NONCE_IS_MAX = auto()
"""
- Transaction `nonce` is not allowed to be max_uint64 - 1 (this is StateTests).
+ Transaction `nonce` is not allowed to be max_uint64 - 1 (this is
+ StateTests).
"""
NONCE_OVERFLOW = auto()
- """
- Transaction `nonce` is not allowed to be more than uint64.
- """
+ """Transaction `nonce` is not allowed to be more than uint64."""
GASLIMIT_OVERFLOW = auto()
- """
- Transaction gaslimit exceeds 2^64-1 maximum value.
- """
+ """Transaction gaslimit exceeds 2^64-1 maximum value."""
VALUE_OVERFLOW = auto()
- """
- Transaction value exceeds 2^256-1 maximum value.
- """
+ """Transaction value exceeds 2^256-1 maximum value."""
GASPRICE_OVERFLOW = auto()
- """
- Transaction gasPrice exceeds 2^256-1 maximum value.
- """
+ """Transaction gasPrice exceeds 2^256-1 maximum value."""
GASLIMIT_PRICE_PRODUCT_OVERFLOW = auto()
- """
- Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value.
- """
+ """Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value."""
INVALID_SIGNATURE_VRS = auto()
- """
- Invalid transaction v, r, s values.
- """
+ """Invalid transaction v, r, s values."""
RLP_INVALID_SIGNATURE_R = auto()
- """
- Error reading transaction signature R value.
- """
+ """Error reading transaction signature R value."""
RLP_INVALID_SIGNATURE_S = auto()
- """
- Error reading transaction signature S value.
- """
+ """Error reading transaction signature S value."""
RLP_LEADING_ZEROS_GASLIMIT = auto()
- """
- Error reading transaction gaslimit field RLP.
- """
+ """Error reading transaction gaslimit field RLP."""
RLP_LEADING_ZEROS_GASPRICE = auto()
- """
- Error reading transaction gasprice field RLP.
- """
+ """Error reading transaction gasprice field RLP."""
RLP_LEADING_ZEROS_VALUE = auto()
- """
- Error reading transaction value field RLP.
- """
+ """Error reading transaction value field RLP."""
RLP_LEADING_ZEROS_NONCE = auto()
- """
- Error reading transaction nonce field RLP.
- """
+ """Error reading transaction nonce field RLP."""
RLP_LEADING_ZEROS_R = auto()
- """
- Error reading transaction signature R field RLP.
- """
+ """Error reading transaction signature R field RLP."""
RLP_LEADING_ZEROS_S = auto()
- """
- Error reading transaction signature S field RLP.
- """
+ """Error reading transaction signature S field RLP."""
RLP_LEADING_ZEROS_V = auto()
- """
- Error reading transaction signature V field RLP.
- """
+ """Error reading transaction signature V field RLP."""
RLP_LEADING_ZEROS_BASEFEE = auto()
- """
- Error reading transaction basefee field RLP.
- """
+ """Error reading transaction basefee field RLP."""
RLP_LEADING_ZEROS_PRIORITY_FEE = auto()
- """
- Error reading transaction priority fee field RLP.
- """
+ """Error reading transaction priority fee field RLP."""
RLP_LEADING_ZEROS_DATA_SIZE = auto()
"""
- Error reading transaction data field RLP, (rlp field length has leading zeros).
+ Error reading transaction data field RLP, (rlp field length has leading
+ zeros).
"""
RLP_LEADING_ZEROS_NONCE_SIZE = auto()
"""
- Error reading transaction nonce field RLP, (rlp field length has leading zeros).
+ Error reading transaction nonce field RLP, (rlp field length has leading
+ zeros).
"""
RLP_TOO_FEW_ELEMENTS = auto()
"""
- Error reading transaction RLP, structure has too few elements than expected.
+ Error reading transaction RLP, structure has too few elements than
+ expected.
"""
RLP_TOO_MANY_ELEMENTS = auto()
"""
- Error reading transaction RLP, structure has too many elements than expected.
+ Error reading transaction RLP, structure has too many elements than
+ expected.
"""
RLP_ERROR_EOF = auto()
- """
- Error reading transaction RLP, rlp stream unexpectedly finished.
- """
+ """Error reading transaction RLP, rlp stream unexpectedly finished."""
RLP_ERROR_SIZE = auto()
- """
- Error reading transaction RLP, rlp size is invalid.
- """
+ """Error reading transaction RLP, rlp size is invalid."""
RLP_ERROR_SIZE_LEADING_ZEROS = auto()
- """
- Error reading transaction RLP, field size has leading zeros.
- """
+ """Error reading transaction RLP, field size has leading zeros."""
INVALID_CHAINID = auto()
- """
- Transaction chain id encoding is incorrect.
- """
+ """Transaction chain id encoding is incorrect."""
RLP_INVALID_DATA = auto()
- """
- Transaction data field is invalid rlp.
- """
+ """Transaction data field is invalid rlp."""
RLP_INVALID_GASLIMIT = auto()
- """
- Transaction gaslimit field is invalid rlp.
- """
+ """Transaction gaslimit field is invalid rlp."""
RLP_INVALID_NONCE = auto()
- """
- Transaction nonce field is invalid rlp.
- """
+ """Transaction nonce field is invalid rlp."""
RLP_INVALID_TO = auto()
- """
- Transaction to field is invalid rlp.
- """
+ """Transaction to field is invalid rlp."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_LONG = auto()
- """
- Transaction access list address is > 20 bytes.
- """
+ """Transaction access list address is > 20 bytes."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_SHORT = auto()
- """
- Transaction access list address is < 20 bytes.
- """
+ """Transaction access list address is < 20 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_LONG = auto()
- """
- Transaction access list storage hash > 32 bytes.
- """
+ """Transaction access list storage hash > 32 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_SHORT = auto()
- """
- Transaction access list storage hash < 32 bytes.
- """
+ """Transaction access list storage hash < 32 bytes."""
RLP_INVALID_HEADER = auto()
- """
- Transaction failed to read from RLP as rlp header is invalid.
- """
+ """Transaction failed to read from RLP as rlp header is invalid."""
RLP_INVALID_VALUE = auto()
- """
- Transaction value field is invalid rlp/structure.
- """
+ """Transaction value field is invalid rlp/structure."""
EC_RECOVERY_FAIL = auto()
- """
- Transaction has correct signature, but ec recovery failed.
- """
+ """Transaction has correct signature, but ec recovery failed."""
INSUFFICIENT_ACCOUNT_FUNDS = auto()
"""
Transaction's sender does not have enough funds to pay for the transaction.
"""
INSUFFICIENT_MAX_FEE_PER_GAS = auto()
- """
- Transaction's max-fee-per-gas is lower than the block base-fee.
- """
+ """Transaction's max-fee-per-gas is lower than the block base-fee."""
PRIORITY_OVERFLOW = auto()
"""
Transaction's max-priority-fee-per-gas is exceeds 2^256-1 maximum value.
@@ -207,85 +137,56 @@ class TransactionException(ExceptionBase):
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS_2 = auto()
"""
- Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas (TransactionTests).
+ Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas
+ (TransactionTests).
"""
INSUFFICIENT_MAX_FEE_PER_BLOB_GAS = auto()
"""
- Transaction's max-fee-per-blob-gas is lower than the block's blob-gas price.
+ Transaction's max-fee-per-blob-gas is lower than the block's blob-gas
+ price.
"""
INTRINSIC_GAS_TOO_LOW = auto()
- """
- Transaction's gas limit is too low.
- """
+ """Transaction's gas limit is too low."""
INTRINSIC_GAS_BELOW_FLOOR_GAS_COST = auto()
- """
- Transaction's gas limit is below the floor gas cost.
- """
+ """Transaction's gas limit is below the floor gas cost."""
INITCODE_SIZE_EXCEEDED = auto()
"""
Transaction's initcode for a contract-creating transaction is too large.
"""
TYPE_3_TX_PRE_FORK = auto()
- """
- Transaction type 3 included before activation fork.
- """
+ """Transaction type 3 included before activation fork."""
TYPE_3_TX_ZERO_BLOBS_PRE_FORK = auto()
- """
- Transaction type 3, with zero blobs, included before activation fork.
- """
+ """Transaction type 3, with zero blobs, included before activation fork."""
TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH = auto()
- """
- Transaction contains a blob versioned hash with an invalid version.
- """
+ """Transaction contains a blob versioned hash with an invalid version."""
TYPE_3_TX_WITH_FULL_BLOBS = auto()
- """
- Transaction contains full blobs (network-version of the transaction).
- """
+ """Transaction contains full blobs (network-version of the transaction)."""
TYPE_3_TX_BLOB_COUNT_EXCEEDED = auto()
- """
- Transaction contains too many blob versioned hashes.
- """
+ """Transaction contains too many blob versioned hashes."""
TYPE_3_TX_CONTRACT_CREATION = auto()
- """
- Transaction is a type 3 transaction and has an empty `to`.
- """
+ """Transaction is a type 3 transaction and has an empty `to`."""
TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED = auto()
- """
- Transaction causes block to go over blob gas limit.
- """
+ """Transaction causes block to go over blob gas limit."""
GAS_ALLOWANCE_EXCEEDED = auto()
- """
- Transaction causes block to go over blob gas limit.
- """
+ """Transaction causes block to go over blob gas limit."""
GAS_LIMIT_EXCEEDS_MAXIMUM = auto()
"""
Transaction gas limit exceeds the maximum allowed limit of 30 million.
"""
TYPE_3_TX_ZERO_BLOBS = auto()
- """
- Transaction is type 3, but has no blobs.
- """
+ """Transaction is type 3, but has no blobs."""
TYPE_4_EMPTY_AUTHORIZATION_LIST = auto()
- """
- Transaction is type 4, but has an empty authorization list.
- """
+ """Transaction is type 4, but has an empty authorization list."""
TYPE_4_INVALID_AUTHORITY_SIGNATURE = auto()
- """
- Transaction authority signature is invalid
- """
+ """Transaction authority signature is invalid"""
TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH = auto()
- """
- Transaction authority signature is invalid
- """
+ """Transaction authority signature is invalid"""
TYPE_4_TX_CONTRACT_CREATION = auto()
- """
- Transaction is a type 4 transaction and has an empty `to`.
- """
+ """Transaction is a type 4 transaction and has an empty `to`."""
TYPE_4_INVALID_AUTHORIZATION_FORMAT = auto()
"""
- Transaction is type 4, but contains an authorization that has an invalid format.
+ Transaction is type 4, but contains an authorization that has an invalid
+ format.
"""
TYPE_4_TX_PRE_FORK = auto()
- """
- Transaction type 4 included before activation fork.
- """
+ """Transaction type 4 included before activation fork."""
diff --git a/src/ethereum_test_exceptions/tests/test_exceptions.py b/src/ethereum_test_exceptions/tests/test_exceptions.py
index 925d76f9c60..a09381be22e 100644
--- a/src/ethereum_test_exceptions/tests/test_exceptions.py
+++ b/src/ethereum_test_exceptions/tests/test_exceptions.py
@@ -34,7 +34,10 @@
def test_exceptions_string_conversion(
exception: BlockException | TransactionException, expected: str
):
- """Test that the exceptions are unique and have the correct string representation."""
+ """
+ Test that the exceptions are unique and have the correct string
+ representation.
+ """
assert str(exception) == expected
diff --git a/src/ethereum_test_execution/base.py b/src/ethereum_test_execution/base.py
index 1bf1d00437c..58a3411443b 100644
--- a/src/ethereum_test_execution/base.py
+++ b/src/ethereum_test_execution/base.py
@@ -44,8 +44,8 @@ class LabeledExecuteFormat:
"""
Represents an execution format with a custom label.
- This label will be used in the test id and also will be added as a marker to the
- generated test case when executing the test.
+ This label will be used in the test id and also will be added as a marker
+ to the generated test case when executing the test.
"""
format: Type[BaseExecute]
@@ -85,8 +85,8 @@ def __eq__(self, other: Any) -> bool:
"""
Check if two labeled execute formats are equal.
- If the other object is a ExecuteFormat type, the format of the labeled execute
- format will be compared with the format of the other object.
+ If the other object is a ExecuteFormat type, the format of the labeled
+ execute format will be compared with the format of the other object.
"""
if isinstance(other, LabeledExecuteFormat):
return self.format == other.format
diff --git a/src/ethereum_test_execution/blob_transaction.py b/src/ethereum_test_execution/blob_transaction.py
index 6c7f6a90d0d..21bedbf4ddb 100644
--- a/src/ethereum_test_execution/blob_transaction.py
+++ b/src/ethereum_test_execution/blob_transaction.py
@@ -47,8 +47,9 @@ def versioned_hashes_with_blobs_and_proofs(
class BlobTransaction(BaseExecute):
"""
- Represents a test execution format to send blob transactions to the client and then
- use `engine_getBlobsV*` end points to validate the proofs generated by the execution client.
+ Represents a test execution format to send blob transactions to the client
+ and then use `engine_getBlobsV*` end points to validate the proofs
+ generated by the execution client.
"""
format_name: ClassVar[str] = "blob_transaction_test"
@@ -94,16 +95,18 @@ def execute(
version = fork.engine_get_blobs_version()
assert version is not None, "Engine get blobs version is not supported by the fork."
- # ensure that clients respond 'null' when they have no access to at least one blob
+ # ensure that clients respond 'null' when they have no access to at
+ # least one blob
list_versioned_hashes = list(versioned_hashes.keys())
if self.nonexisting_blob_hashes is not None:
list_versioned_hashes.extend(self.nonexisting_blob_hashes)
blob_response: GetBlobsResponse | None = engine_rpc.get_blobs(
list_versioned_hashes, version=version
- ) # noqa: E501
+ )
- # if non-existing blob hashes were request then the response must be 'null'
+ # if non-existing blob hashes were request then the response must be
+ # 'null'
if self.nonexisting_blob_hashes is not None:
if blob_response is not None:
raise ValueError(
@@ -121,8 +124,9 @@ def execute(
assert blob_response is not None
local_blobs_and_proofs = list(versioned_hashes.values())
- assert len(blob_response) == len(local_blobs_and_proofs), "Expected "
- f"{len(local_blobs_and_proofs)} blobs and proofs, got {len(blob_response)}."
+ assert len(blob_response) == len(local_blobs_and_proofs), (
+ f"Expected {len(local_blobs_and_proofs)} blobs and proofs, got {len(blob_response)}."
+ )
for expected_blob, received_blob in zip(
local_blobs_and_proofs, blob_response.root, strict=True
diff --git a/src/ethereum_test_execution/transaction_post.py b/src/ethereum_test_execution/transaction_post.py
index fce6b2bbcb9..5c97eb045bc 100644
--- a/src/ethereum_test_execution/transaction_post.py
+++ b/src/ethereum_test_execution/transaction_post.py
@@ -14,7 +14,9 @@
class TransactionPost(BaseExecute):
- """Represents a simple transaction-send then post-check execution format."""
+ """
+ Represents a simple transaction-send then post-check execution format.
+ """
blocks: List[List[Transaction]]
post: Alloc
diff --git a/src/ethereum_test_fixtures/base.py b/src/ethereum_test_fixtures/base.py
index b4531e4580a..e0670b7b34a 100644
--- a/src/ethereum_test_fixtures/base.py
+++ b/src/ethereum_test_fixtures/base.py
@@ -64,7 +64,10 @@ class BaseFixture(CamelModel):
@classmethod
def output_base_dir_name(cls) -> str:
- """Return name of the subdirectory where this type of fixture should be dumped to."""
+ """
+ Return name of the subdirectory where this type of fixture should be
+ dumped to.
+ """
return cls.format_name.replace("test", "tests")
@classmethod
@@ -161,7 +164,10 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the appropriate marker is
+ used.
+ """
return False
@@ -169,8 +175,8 @@ class LabeledFixtureFormat:
"""
Represents a fixture format with a custom label.
- This label will be used in the test id and also will be added as a marker to the
- generated test case when filling the test.
+ This label will be used in the test id and also will be added as a marker
+ to the generated test case when filling the test.
"""
format: Type[BaseFixture]
@@ -210,8 +216,8 @@ def __eq__(self, other: Any) -> bool:
"""
Check if two labeled fixture formats are equal.
- If the other object is a FixtureFormat type, the format of the labeled fixture
- format will be compared with the format of the other object.
+ If the other object is a FixtureFormat type, the format of the labeled
+ fixture format will be compared with the format of the other object.
"""
if isinstance(other, LabeledFixtureFormat):
return self.format == other.format
diff --git a/src/ethereum_test_fixtures/blockchain.py b/src/ethereum_test_fixtures/blockchain.py
index 74d60f16bb0..b5ef43cb400 100644
--- a/src/ethereum_test_fixtures/blockchain.py
+++ b/src/ethereum_test_fixtures/blockchain.py
@@ -55,10 +55,8 @@ def post_state_validator(alternate_field: str | None = None, mode: str = "after"
"""
Create a validator to ensure exactly one post-state field is provided.
- Args:
- alternate_field: Alternative field name to post_state_hash (e.g., 'post_state_diff').
- mode: Pydantic validation mode.
-
+ Args: alternate_field: Alternative field name to post_state_hash (e.g.,
+ 'post_state_diff'). mode: Pydantic validation mode.
"""
def decorator(cls):
@@ -68,10 +66,12 @@ def validate_post_state_fields(self):
if mode == "after":
# Determine which fields to check
if alternate_field:
- # For engine x fixtures: check post_state vs post_state_diff
+ # For engine x fixtures: check post_state vs
+ # post_state_diff
field1_name, field2_name = "post_state", alternate_field
else:
- # For standard fixtures: check post_state vs post_state_hash
+ # For standard fixtures: check post_state vs
+ # post_state_hash
field1_name, field2_name = "post_state", "post_state_hash"
field1_value = getattr(self, field1_name, None)
@@ -93,8 +93,8 @@ def validate_post_state_fields(self):
class HeaderForkRequirement(str):
"""
- Fork requirement class that specifies the name of the method that should be called
- to check if the field is required.
+ Fork requirement class that specifies the name of the method that should be
+ called to check if the field is required.
"""
def __new__(cls, value: str) -> "HeaderForkRequirement":
@@ -173,7 +173,10 @@ class FixtureHeader(CamelModel):
fork: Fork | None = Field(None, exclude=True)
def model_post_init(self, __context):
- """Model post init method used to check for required fields of a given fork."""
+ """
+ Model post init method used to check for required fields of a given
+ fork.
+ """
super().model_post_init(__context)
if self.fork is None:
@@ -184,8 +187,9 @@ def model_post_init(self, __context):
block_number = self.number
timestamp = self.timestamp
- # For each field, check if any of the annotations are of type HeaderForkRequirement and
- # if so, check if the field is required for the given fork.
+ # For each field, check if any of the annotations are of type
+ # HeaderForkRequirement and if so, check if the field is required for
+ # the given fork.
annotated_hints = get_type_hints(self, include_extras=True)
for field in self.__class__.model_fields:
@@ -244,7 +248,9 @@ def genesis(cls, fork: Fork, env: Environment, state_root: Hash) -> "FixtureHead
class FixtureExecutionPayload(CamelModel):
- """Representation of an Ethereum execution payload within a test Fixture."""
+ """
+ Representation of an Ethereum execution payload within a test Fixture.
+ """
parent_hash: Hash
fee_recipient: Address
@@ -282,8 +288,8 @@ def from_fixture_header(
block_access_list: Bytes | None = None,
) -> "FixtureExecutionPayload":
"""
- Return FixtureExecutionPayload from a FixtureHeader, a list
- of transactions, a list of withdrawals, and an optional block access list.
+ Return FixtureExecutionPayload from a FixtureHeader, a list of
+ transactions, a list of withdrawals, and an optional block access list.
"""
return cls(
**header.model_dump(exclude={"rlp"}, exclude_none=True),
@@ -303,8 +309,8 @@ def from_fixture_header(
]
EngineNewPayloadV5Parameters = EngineNewPayloadV4Parameters
-# Important: We check EngineNewPayloadV3Parameters first as it has more fields, and pydantic
-# has a weird behavior when the smaller tuple is checked first.
+# Important: We check EngineNewPayloadV3Parameters first as it has more fields,
+# and pydantic has a weird behavior when the smaller tuple is checked first.
EngineNewPayloadParameters = Union[
EngineNewPayloadV5Parameters,
EngineNewPayloadV4Parameters,
@@ -315,8 +321,8 @@ def from_fixture_header(
class FixtureEngineNewPayload(CamelModel):
"""
- Representation of the `engine_newPayloadVX` information to be
- sent using the block information.
+ Representation of the `engine_newPayloadVX` information to be sent using
+ the block information.
"""
params: EngineNewPayloadParameters
@@ -445,7 +451,10 @@ def parse_witness_chunks(cls, s: str) -> List["WitnessChunk"]:
class FixtureBlockBase(CamelModel):
- """Representation of an Ethereum block within a test Fixture without RLP bytes."""
+ """
+ Representation of an Ethereum block within a test Fixture without RLP
+ bytes.
+ """
header: FixtureHeader = Field(..., alias="blockHeader")
txs: List[FixtureTransaction] = Field(default_factory=list, alias="transactions")
@@ -467,7 +476,9 @@ def with_rlp(self, txs: List[Transaction]) -> "FixtureBlock":
block = [
self.header.rlp_encode_list,
[tx.serializable_list for tx in txs],
- self.ommers, # TODO: This is incorrect, and we probably need to serialize the ommers
+ # TODO: This is incorrect, and we probably
+ # need to serialize the ommers
+ self.ommers,
]
if self.withdrawals is not None:
@@ -519,15 +530,16 @@ class BlockchainFixtureCommon(BaseFixture):
pre: Alloc
post_state: Alloc | None = Field(None)
post_state_hash: Hash | None = Field(None)
- last_block_hash: Hash = Field(..., alias="lastblockhash") # FIXME: lastBlockHash
+ # FIXME: lastBlockHash
+ last_block_hash: Hash = Field(..., alias="lastblockhash")
config: FixtureConfig
@model_validator(mode="before")
@classmethod
def config_defaults_for_backwards_compatibility(cls, data: Any) -> Any:
"""
- Check if the config field is populated, otherwise use the root-level field values for
- backwards compatibility.
+ Check if the config field is populated, otherwise use the root-level
+ field values for backwards compatibility.
"""
if isinstance(data, dict):
if "config" not in data:
@@ -566,7 +578,8 @@ class BlockchainEngineFixtureCommon(BaseFixture):
fork: Fork = Field(..., alias="network")
post_state_hash: Hash | None = Field(None)
- last_block_hash: Hash = Field(..., alias="lastblockhash") # FIXME: lastBlockHash
+ # FIXME: lastBlockHash
+ last_block_hash: Hash = Field(..., alias="lastblockhash")
config: FixtureConfig
def get_fork(self) -> Fork | None:
@@ -616,7 +629,10 @@ class BlockchainEngineXFixture(BlockchainEngineFixtureCommon):
"""Hash of the pre-allocation group this test belongs to."""
post_state_diff: Alloc | None = None
- """State difference from genesis after test execution (efficiency optimization)."""
+ """
+ State difference from genesis after test execution (efficiency
+ optimization).
+ """
payloads: List[FixtureEngineNewPayload] = Field(..., alias="engineNewPayloads")
"""Engine API payloads for blockchain execution."""
diff --git a/src/ethereum_test_fixtures/collector.py b/src/ethereum_test_fixtures/collector.py
index e13e043125d..10606bfefc1 100644
--- a/src/ethereum_test_fixtures/collector.py
+++ b/src/ethereum_test_fixtures/collector.py
@@ -1,6 +1,6 @@
"""
-Fixture collector class used to collect, sort and combine the different types of generated
-fixtures.
+Fixture collector class used to collect, sort and combine the different types
+of generated fixtures.
"""
import json
@@ -23,9 +23,11 @@ class TestInfo:
"""Contains test information from the current node."""
name: str # pytest: Item.name, e.g. test_paris_one[fork_Paris-state_test]
- id: str # pytest: Item.nodeid, e.g. tests/paris/test_module_paris.py::test_paris_one[...]
+ id: str # pytest: Item.nodeid, e.g.
+ # tests/paris/test_module_paris.py::test_paris_one[...]
original_name: str # pytest: Item.originalname, e.g. test_paris_one
- module_path: Path # pytest: Item.path, e.g. .../tests/paris/test_module_paris.py
+ module_path: Path # pytest: Item.path, e.g.
+ # .../tests/paris/test_module_paris.py
test_prefix: ClassVar[str] = "test_" # Python test prefix
filler_suffix: ClassVar[str] = "Filler" # Static test suffix
@@ -41,11 +43,11 @@ def strip_test_name(cls, name: str) -> str:
def get_name_and_parameters(self) -> Tuple[str, str]:
"""
- Convert test name to a tuple containing the test name and test parameters.
-
- Example:
- test_push0_key_sstore[fork_Shanghai] -> test_push0_key_sstore, fork_Shanghai
+ Convert test name to a tuple containing the test name and test
+ parameters.
+ Example: test_push0_key_sstore[fork_Shanghai] -> test_push0_key_sstore,
+ fork_Shanghai
"""
test_name, parameters = self.name.split("[")
return test_name, re.sub(r"[\[\-]", "_", parameters).replace("]", "")
@@ -91,9 +93,8 @@ def get_module_relative_output_dir(self, filler_path: Path) -> Path:
base ./tests directory) that can be used for output (within the
configured fixtures output path or the base_dump_dir directory).
- Example:
- tests/shanghai/eip3855_push0/test_push0.py -> shanghai/eip3855_push0/test_push0
-
+ Example: tests/shanghai/eip3855_push0/test_push0.py ->
+ shanghai/eip3855_push0/test_push0
"""
basename = self.module_path.with_suffix("").absolute()
basename_relative = basename.relative_to(
@@ -122,8 +123,9 @@ def get_fixture_basename(self, info: TestInfo) -> Path:
"""Return basename of the fixture file for a given test case."""
module_relative_output_dir = info.get_module_relative_output_dir(self.filler_path)
- # Each legacy test filler has only 1 test per file if it's a !state test!
- # So no need to create directory Add11/add11.json it can be plain add11.json
+ # Each legacy test filler has only 1 test per file if it's a !state
+ # test! So no need to create directory Add11/add11.json it can be plain
+ # add11.json
if self.fill_static_tests:
return module_relative_output_dir.parent / info.original_name
@@ -140,7 +142,8 @@ def add_fixture(self, info: TestInfo, fixture: BaseFixture) -> Path:
/ fixture.output_base_dir_name()
/ fixture_basename.with_suffix(fixture.output_file_extension)
)
- if fixture_path not in self.all_fixtures.keys(): # relevant when we group by test function
+ # relevant when we group by test function
+ if fixture_path not in self.all_fixtures.keys():
self.all_fixtures[fixture_path] = Fixtures(root={})
self.json_path_to_test_item[fixture_path] = info
diff --git a/src/ethereum_test_fixtures/consume.py b/src/ethereum_test_fixtures/consume.py
index dcec641b3ab..b55f35efd47 100644
--- a/src/ethereum_test_fixtures/consume.py
+++ b/src/ethereum_test_fixtures/consume.py
@@ -34,7 +34,10 @@ def consume_fixture(
fixture_name: str | None = None,
debug_output_path: Path | None = None,
):
- """Test the client with the specified fixture using its direct consumer interface."""
+ """
+ Test the client with the specified fixture using its direct consumer
+ interface.
+ """
raise NotImplementedError(
"The `consume_fixture()` function is not supported by this tool."
)
@@ -59,7 +62,9 @@ class TestCaseStream(TestCaseBase):
class TestCaseIndexFile(TestCaseBase):
- """The test case model used to save/load test cases to/from an index file."""
+ """
+ The test case model used to save/load test cases to/from an index file.
+ """
json_path: Path
__test__ = False # stop pytest from collecting this class as a test
diff --git a/src/ethereum_test_fixtures/file.py b/src/ethereum_test_fixtures/file.py
index 2f74e06b37c..8679cfabcf5 100644
--- a/src/ethereum_test_fixtures/file.py
+++ b/src/ethereum_test_fixtures/file.py
@@ -15,13 +15,14 @@
class Fixtures(EthereumTestRootModel):
"""
A base class for defining top-level models that encapsulate multiple test
- fixtures. Each fixture is stored in a dictionary, where each key is a string
- (typically the fixture name) and its corresponding value is a fixture object.
- This is the structure used for blockchain and state JSON fixture files.
-
- This class implements dunder methods and other common functionality to allow
- interaction with the model's fixtures as if they were being accessed directly
- from a dictionary.
+ fixtures. Each fixture is stored in a dictionary, where each key is a
+ string (typically the fixture name) and its corresponding value is a
+ fixture object. This is the structure used for blockchain and state JSON
+ fixture files.
+
+ This class implements dunder methods and other common functionality to
+ allow interaction with the model's fixtures as if they were being accessed
+ directly from a dictionary.
"""
root: Dict[str, SerializeAsAny[BaseFixture]]
@@ -54,8 +55,8 @@ def collect_into_file(self, file_path: Path):
"""
For all formats, we join the fixtures as json into a single file.
- Note: We don't use pydantic model_dump_json() on the Fixtures object as we
- add the hash to the info field on per-fixture basis.
+ Note: We don't use pydantic model_dump_json() on the Fixtures object as
+ we add the hash to the info field on per-fixture basis.
"""
json_fixtures: Dict[str, Dict[str, Any]] = {}
lock_file_path = file_path.with_suffix(".lock")
diff --git a/src/ethereum_test_fixtures/pre_alloc_groups.py b/src/ethereum_test_fixtures/pre_alloc_groups.py
index c358b757093..25173760d08 100644
--- a/src/ethereum_test_fixtures/pre_alloc_groups.py
+++ b/src/ethereum_test_fixtures/pre_alloc_groups.py
@@ -22,7 +22,8 @@ class PreAllocGroup(CamelModel):
pre-allocation group optimization.
"""
- model_config = {"populate_by_name": True} # Allow both field names and aliases
+ # Allow both field names and aliases
+ model_config = {"populate_by_name": True}
test_ids: List[str] = Field(default_factory=list)
environment: Environment = Field(..., description="Grouping environment for this test group")
@@ -65,9 +66,10 @@ def to_file(self, file: Path) -> None:
else:
new_account = self.pre[account]
if new_account != existing_account:
- # This procedure fails during xdist worker's pytest_sessionfinish
- # and is not reported to the master thread.
- # We signal here that the groups created contain a collision.
+ # This procedure fails during xdist worker's
+ # pytest_sessionfinish and is not reported to the
+ # master thread. We signal here that the groups
+ # created contain a collision.
collision_file_path = file.with_suffix(".fail")
collision_exception = Alloc.CollisionError(
address=account,
@@ -87,7 +89,8 @@ class PreAllocGroups(EthereumTestRootModel):
"""
Root model mapping pre-allocation group hashes to test groups.
- If lazy_load is True, the groups are not loaded from the folder until they are accessed.
+ If lazy_load is True, the groups are not loaded from the folder until they
+ are accessed.
Iterating will fail if lazy_load is True.
"""
diff --git a/src/ethereum_test_fixtures/state.py b/src/ethereum_test_fixtures/state.py
index 8d389db57f3..3dfb6ba29c4 100644
--- a/src/ethereum_test_fixtures/state.py
+++ b/src/ethereum_test_fixtures/state.py
@@ -64,7 +64,9 @@ def from_transaction(cls, tx: Transaction) -> "FixtureTransaction":
class FixtureForkPostIndexes(BaseModel):
- """Type used to describe the indexes of a single post state of a single Fork."""
+ """
+ Type used to describe the indexes of a single post state of a single Fork.
+ """
data: int = 0
gas: int = 0
diff --git a/src/ethereum_test_fixtures/tests/test_blockchain.py b/src/ethereum_test_fixtures/tests/test_blockchain.py
index c2917a2ccf8..944f2ece29d 100644
--- a/src/ethereum_test_fixtures/tests/test_blockchain.py
+++ b/src/ethereum_test_fixtures/tests/test_blockchain.py
@@ -531,8 +531,9 @@
id="invalid_fixture_block_2",
),
pytest.param(
- False, # Can not be deserialized: A single expect_exception str will not be
- # deserialized as a list and therefore will not match the model_instance definition.
+ False, # Can not be deserialized: A single expect_exception str
+ # will not be deserialized as a list and therefore will not
+ # match the model_instance definition.
InvalidFixtureBlock(
rlp="0x00",
expect_exception=[TransactionException.INTRINSIC_GAS_TOO_LOW],
diff --git a/src/ethereum_test_fixtures/tests/test_state.py b/src/ethereum_test_fixtures/tests/test_state.py
index e4b3ab50cef..b1881065b20 100644
--- a/src/ethereum_test_fixtures/tests/test_state.py
+++ b/src/ethereum_test_fixtures/tests/test_state.py
@@ -50,8 +50,9 @@
id="state_fixture_fork_post_exception",
),
pytest.param(
- False, # Can not be deserialized: A single expect_exception str will not be
- # deserialized as a list and therefore will not match the model_instance definition.
+ False, # Can not be deserialized: A single expect_exception str
+ # will not be deserialized as a list and therefore will not
+ # match the model_instance definition.
FixtureForkPost(
state_root=0,
logs_hash=1,
diff --git a/src/ethereum_test_forks/base_fork.py b/src/ethereum_test_forks/base_fork.py
index 74840c55e12..69af59800d5 100644
--- a/src/ethereum_test_forks/base_fork.py
+++ b/src/ethereum_test_forks/base_fork.py
@@ -26,15 +26,22 @@
class ForkAttribute(Protocol):
- """A protocol to get the attribute of a fork at a given block number and timestamp."""
+ """
+ A protocol to get the attribute of a fork at a given block number and
+ timestamp.
+ """
def __call__(self, block_number: int = 0, timestamp: int = 0) -> Any:
- """Return value of the attribute at the given block number and timestamp."""
+ """
+ Return value of the attribute at the given block number and timestamp.
+ """
pass
class MemoryExpansionGasCalculator(Protocol):
- """A protocol to calculate the gas cost of memory expansion at a given fork."""
+ """
+ A protocol to calculate the gas cost of memory expansion at a given fork.
+ """
def __call__(self, *, new_bytes: int, previous_bytes: int = 0) -> int:
"""Return gas cost of expanding the memory by the given length."""
@@ -42,7 +49,10 @@ def __call__(self, *, new_bytes: int, previous_bytes: int = 0) -> int:
class CalldataGasCalculator(Protocol):
- """A protocol to calculate the transaction gas cost of calldata at a given fork."""
+ """
+ A protocol to calculate the transaction gas cost of calldata at a given
+ fork.
+ """
def __call__(self, *, data: BytesConvertible, floor: bool = False) -> int:
"""Return the transaction gas cost of calldata given its contents."""
@@ -50,7 +60,9 @@ def __call__(self, *, data: BytesConvertible, floor: bool = False) -> int:
class TransactionDataFloorCostCalculator(Protocol):
- """Calculate the transaction floor cost due to its calldata for a given fork."""
+ """
+ Calculate the transaction floor cost due to its calldata for a given fork.
+ """
def __call__(self, *, data: BytesConvertible) -> int:
"""Return transaction gas cost of calldata given its contents."""
@@ -68,7 +80,10 @@ def __call__(
class BaseFeeChangeCalculator(Protocol):
- """A protocol to calculate the gas that needs to be used to change the base fee."""
+ """
+ A protocol to calculate the gas that needs to be used to change the base
+ fee.
+ """
def __call__(
self,
@@ -82,7 +97,10 @@ def __call__(
class TransactionIntrinsicCostCalculator(Protocol):
- """A protocol to calculate the intrinsic gas cost of a transaction at a given fork."""
+ """
+ A protocol to calculate the intrinsic gas cost of a transaction at a given
+ fork.
+ """
def __call__(
self,
@@ -97,25 +115,32 @@ def __call__(
Return the intrinsic gas cost of a transaction given its properties.
Args:
- calldata: The data of the transaction.
- contract_creation: Whether the transaction creates a contract.
- access_list: The list of access lists for the transaction.
- authorization_list_or_count: The list of authorizations or the count of authorizations
- for the transaction.
- return_cost_deducted_prior_execution: If set to False, the returned value is equal to
- the minimum gas required for the transaction to be valid. If set to True, the
- returned value is equal to the cost that is deducted from the gas limit before
- the transaction starts execution.
-
- Returns:
- Gas cost of a transaction
+ calldata: The data of the transaction.
+ contract_creation: Whether the transaction creates a contract.
+ access_list: The list of access lists for the transaction.
+ authorization_list_or_count: The list of authorizations or the count
+ of authorizations for the transaction.
+ return_cost_deducted_prior_execution: If set to False, the returned
+ value is equal to the minimum
+ gas required for the
+ transaction to be valid. If
+ set to True, the returned
+ value is equal to the cost
+ that is deducted from the gas
+ limit before the transaction
+ starts execution.
+
+ Returns: Gas cost of a transaction
"""
pass
class BlobGasPriceCalculator(Protocol):
- """A protocol to calculate the blob gas price given the excess blob gas at a given fork."""
+ """
+ A protocol to calculate the blob gas price given the excess blob gas at a
+ given fork.
+ """
def __call__(self, *, excess_blob_gas: int) -> int:
"""Return the blob gas price given the excess blob gas."""
@@ -123,7 +148,9 @@ def __call__(self, *, excess_blob_gas: int) -> int:
class ExcessBlobGasCalculator(Protocol):
- """A protocol to calculate the excess blob gas for a block at a given fork."""
+ """
+ A protocol to calculate the excess blob gas for a block at a given fork.
+ """
def __call__(
self,
@@ -134,7 +161,10 @@ def __call__(
parent_blob_count: int | None = None,
parent_base_fee_per_gas: int,
) -> int:
- """Return the excess blob gas given the parent's excess blob gas and blob gas used."""
+ """
+ Return the excess blob gas given the parent's excess blob gas and blob
+ gas used.
+ """
pass
@@ -143,7 +173,10 @@ class BaseForkMeta(ABCMeta):
@abstractmethod
def name(cls) -> str:
- """Return the name of the fork (e.g., Berlin), must be implemented by subclasses."""
+ """
+ Return the name of the fork (e.g., Berlin), must be implemented by
+ subclasses.
+ """
pass
def __repr__(cls) -> str:
@@ -152,12 +185,18 @@ def __repr__(cls) -> str:
@staticmethod
def _maybe_transitioned(fork_cls: "BaseForkMeta") -> "BaseForkMeta":
- """Return the transitioned fork, if a transition fork, otherwise return `fork_cls`."""
+ """
+ Return the transitioned fork, if a transition fork, otherwise return
+ `fork_cls`.
+ """
return fork_cls.transitions_to() if hasattr(fork_cls, "transitions_to") else fork_cls
@staticmethod
def _is_subclass_of(a: "BaseForkMeta", b: "BaseForkMeta") -> bool:
- """Check if `a` is a subclass of `b`, taking fork transitions into account."""
+ """
+ Check if `a` is a subclass of `b`, taking fork transitions into
+ account.
+ """
a = BaseForkMeta._maybe_transitioned(a)
b = BaseForkMeta._maybe_transitioned(b)
return issubclass(a, b)
@@ -167,7 +206,10 @@ def __gt__(cls, other: "BaseForkMeta") -> bool:
return cls is not other and BaseForkMeta._is_subclass_of(cls, other)
def __ge__(cls, other: "BaseForkMeta") -> bool:
- """Compare if a fork is newer than or equal to some other fork (cls >= other)."""
+ """
+ Compare if a fork is newer than or equal to some other fork (cls >=
+ other).
+ """
return cls is other or BaseForkMeta._is_subclass_of(cls, other)
def __lt__(cls, other: "BaseForkMeta") -> bool:
@@ -176,7 +218,10 @@ def __lt__(cls, other: "BaseForkMeta") -> bool:
return cls is not other and BaseForkMeta._is_subclass_of(other, cls)
def __le__(cls, other: "BaseForkMeta") -> bool:
- """Compare if a fork is older than or equal to some other fork (cls <= other)."""
+ """
+ Compare if a fork is older than or equal to some other fork (cls <=
+ other).
+ """
return cls is other or BaseForkMeta._is_subclass_of(other, cls)
@@ -209,7 +254,10 @@ def __init_subclass__(
ignore: bool = False,
bpo_fork: bool = False,
) -> None:
- """Initialize new fork with values that don't carry over to subclass forks."""
+ """
+ Initialize new fork with values that don't carry over to subclass
+ forks.
+ """
cls._transition_tool_name = transition_tool_name
cls._solc_name = solc_name
cls._ignore = ignore
@@ -288,7 +336,10 @@ def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
def memory_expansion_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> MemoryExpansionGasCalculator:
- """Return a callable that calculates the gas cost of memory expansion for the fork."""
+ """
+ Return a callable that calculates the gas cost of memory expansion for
+ the fork.
+ """
pass
@classmethod
@@ -297,8 +348,8 @@ def calldata_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> CalldataGasCalculator:
"""
- Return callable that calculates the transaction gas cost for its calldata
- depending on its contents.
+ Return callable that calculates the transaction gas cost for its
+ calldata depending on its contents.
"""
pass
@@ -307,7 +358,9 @@ def calldata_gas_calculator(
def base_fee_per_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BaseFeePerGasCalculator:
- """Return a callable that calculates the base fee per gas at a given fork."""
+ """
+ Return a callable that calculates the base fee per gas at a given fork.
+ """
pass
@classmethod
@@ -316,8 +369,8 @@ def base_fee_change_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BaseFeeChangeCalculator:
"""
- Return a callable that calculates the gas that needs to be used to change the
- base fee.
+ Return a callable that calculates the gas that needs to be used to
+ change the base fee.
"""
pass
@@ -344,7 +397,10 @@ def max_refund_quotient(cls) -> int:
def transaction_data_floor_cost_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> TransactionDataFloorCostCalculator:
- """Return a callable that calculates the transaction floor cost due to its calldata."""
+ """
+ Return a callable that calculates the transaction floor cost due to its
+ calldata.
+ """
pass
@classmethod
@@ -352,7 +408,10 @@ def transaction_data_floor_cost_calculator(
def transaction_intrinsic_cost_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
- """Return callable that calculates the intrinsic gas cost of a transaction for the fork."""
+ """
+ Return callable that calculates the intrinsic gas cost of a transaction
+ for the fork.
+ """
pass
@classmethod
@@ -360,7 +419,9 @@ def transaction_intrinsic_cost_calculator(
def blob_gas_price_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BlobGasPriceCalculator:
- """Return a callable that calculates the blob gas price at a given fork."""
+ """
+ Return a callable that calculates the blob gas price at a given fork.
+ """
pass
@classmethod
@@ -368,7 +429,10 @@ def blob_gas_price_calculator(
def excess_blob_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
- """Return a callable that calculates the excess blob gas for a block at a given fork."""
+ """
+ Return a callable that calculates the excess blob gas for a block at a
+ given fork.
+ """
pass
@classmethod
@@ -416,7 +480,10 @@ def max_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
@classmethod
@abstractmethod
def blob_reserve_price_active(cls, block_number: int = 0, timestamp: int = 0) -> bool:
- """Return whether the fork uses a reserve price mechanism for blobs or not."""
+ """
+ Return whether the fork uses a reserve price mechanism for blobs or
+ not.
+ """
pass
@classmethod
@@ -428,7 +495,10 @@ def blob_base_cost(cls, block_number: int = 0, timestamp: int = 0) -> int:
@classmethod
@abstractmethod
def full_blob_tx_wrapper_version(cls, block_number: int = 0, timestamp: int = 0) -> int | None:
- """Return the version of the full blob transaction wrapper at a given fork."""
+ """
+ Return the version of the full blob transaction wrapper at a given
+ fork.
+ """
pass
@classmethod
@@ -455,19 +525,27 @@ def tx_types(cls, block_number: int = 0, timestamp: int = 0) -> List[int]:
@classmethod
@abstractmethod
def contract_creating_tx_types(cls, block_number: int = 0, timestamp: int = 0) -> List[int]:
- """Return list of the transaction types supported by the fork that can create contracts."""
+ """
+ Return list of the transaction types supported by the fork that can
+ create contracts.
+ """
pass
@classmethod
@abstractmethod
def transaction_gas_limit_cap(cls, block_number: int = 0, timestamp: int = 0) -> int | None:
- """Return the transaction gas limit cap, or None if no limit is imposed."""
+ """
+ Return the transaction gas limit cap, or None if no limit is imposed.
+ """
pass
@classmethod
@abstractmethod
def block_rlp_size_limit(cls, block_number: int = 0, timestamp: int = 0) -> int | None:
- """Return the maximum RLP size of a block in bytes, or None if no limit is imposed."""
+ """
+ Return the maximum RLP size of a block in bytes, or None if no limit is
+ imposed.
+ """
pass
@classmethod
@@ -489,8 +567,9 @@ def pre_allocation(cls) -> Mapping:
"""
Return required pre-allocation of accounts for any kind of test.
- This method must always call the `fork_to` method when transitioning, because the
- allocation can only be set at genesis, and thus cannot be changed at transition time.
+ This method must always call the `fork_to` method when transitioning,
+ because the allocation can only be set at genesis, and thus cannot be
+ changed at transition time.
"""
pass
@@ -501,8 +580,9 @@ def pre_allocation_blockchain(cls) -> Mapping:
"""
Return required pre-allocation of accounts for any blockchain tests.
- This method must always call the `fork_to` method when transitioning, because the
- allocation can only be set at genesis, and thus cannot be changed at transition time.
+ This method must always call the `fork_to` method when transitioning,
+ because the allocation can only be set at genesis, and thus cannot be
+ changed at transition time.
"""
pass
@@ -513,8 +593,8 @@ def engine_new_payload_version(
cls, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
- Return `None` if this fork's payloads cannot be sent over the engine API,
- or the payload version if it can.
+ Return `None` if this fork's payloads cannot be sent over the engine
+ API, or the payload version if it can.
"""
pass
@@ -522,8 +602,8 @@ def engine_new_payload_version(
@abstractmethod
def engine_new_payload_blob_hashes(cls, block_number: int = 0, timestamp: int = 0) -> bool:
"""
- Return true if the engine api version requires new payload calls to include
- blob hashes.
+ Return true if the engine api version requires new payload calls to
+ include blob hashes.
"""
pass
@@ -531,15 +611,18 @@ def engine_new_payload_blob_hashes(cls, block_number: int = 0, timestamp: int =
@abstractmethod
def engine_new_payload_beacon_root(cls, block_number: int = 0, timestamp: int = 0) -> bool:
"""
- Return true if the engine api version requires new payload calls to include a parent
- beacon block root.
+ Return true if the engine api version requires new payload calls to
+ include a parent beacon block root.
"""
pass
@classmethod
@abstractmethod
def engine_new_payload_requests(cls, block_number: int = 0, timestamp: int = 0) -> bool:
- """Return true if the engine api version requires new payload calls to include requests."""
+ """
+ Return true if the engine api version requires new payload calls to
+ include requests.
+ """
pass
@classmethod
@@ -548,8 +631,8 @@ def engine_new_payload_target_blobs_per_block(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
- Return true if the engine api version requires new payload calls to include
- target blobs per block.
+ Return true if the engine api version requires new payload calls to
+ include target blobs per block.
"""
pass
@@ -559,8 +642,8 @@ def engine_execution_payload_block_access_list(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
- Return `True` if the engine api version requires execution payload to include a
- `block_access_list`.
+ Return `True` if the engine api version requires execution payload to
+ include a `block_access_list`.
"""
pass
@@ -569,7 +652,10 @@ def engine_execution_payload_block_access_list(
def engine_payload_attribute_target_blobs_per_block(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
- """Return true if the payload attributes include the target blobs per block."""
+ """
+ Return true if the payload attributes include the target blobs per
+ block.
+ """
pass
@classmethod
@@ -577,7 +663,9 @@ def engine_payload_attribute_target_blobs_per_block(
def engine_payload_attribute_max_blobs_per_block(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
- """Return true if the payload attributes include the max blobs per block."""
+ """
+ Return true if the payload attributes include the max blobs per block.
+ """
pass
@classmethod
@@ -585,7 +673,10 @@ def engine_payload_attribute_max_blobs_per_block(
def engine_forkchoice_updated_version(
cls, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
- """Return `None` if the forks canonical chain cannot be set using the forkchoice method."""
+ """
+ Return `None` if the forks canonical chain cannot be set using the
+ forkchoice method.
+ """
pass
@classmethod
@@ -594,15 +685,18 @@ def engine_get_payload_version(
cls, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
- Return `None` if the forks canonical chain cannot build a payload using the engine
- API.
+ Return `None` if the forks canonical chain cannot build a payload using
+ the engine API.
"""
pass
@classmethod
@abstractmethod
def engine_get_blobs_version(cls, block_number: int = 0, timestamp: int = 0) -> Optional[int]:
- """Return `None` if the fork does not support the engine get blobs version."""
+ """
+ Return `None` if the fork does not support the engine get blobs
+ version.
+ """
pass
# EVM information abstract methods
@@ -615,7 +709,10 @@ def evm_code_types(cls, block_number: int = 0, timestamp: int = 0) -> List[EVMCo
@classmethod
@abstractmethod
def max_code_size(cls) -> int:
- """Return the maximum code size allowed to be deployed in a contract creation."""
+ """
+ Return the maximum code size allowed to be deployed in a contract
+ creation.
+ """
pass
@classmethod
@@ -627,7 +724,10 @@ def max_stack_height(cls) -> int:
@classmethod
@abstractmethod
def max_initcode_size(cls) -> int:
- """Return the maximum initcode size allowed to be used in a contract creation."""
+ """
+ Return the maximum initcode size allowed to be used in a contract
+ creation.
+ """
pass
@classmethod
@@ -635,7 +735,10 @@ def max_initcode_size(cls) -> int:
def call_opcodes(
cls, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
- """Return list of tuples with the call opcodes and its corresponding EVM code type."""
+ """
+ Return list of tuples with the call opcodes and its corresponding EVM
+ code type.
+ """
pass
@classmethod
@@ -651,7 +754,10 @@ def valid_opcodes(
def create_opcodes(
cls, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
- """Return list of tuples with the create opcodes and its corresponding EVM code type."""
+ """
+ Return list of tuples with the create opcodes and its corresponding EVM
+ code type.
+ """
pass
@classmethod
@@ -669,15 +775,18 @@ def name(cls) -> str:
@classmethod
def fork_at(cls, block_number: int = 0, timestamp: int = 0) -> Type["BaseFork"]:
"""
- Return fork at the given block number and timestamp.
- Useful only for transition forks, and it's a no-op for normal forks.
+ Return fork at the given block number and timestamp. Useful only for
+ transition forks, and it's a no-op for normal forks.
"""
return cls
@classmethod
@abstractmethod
def transition_tool_name(cls, block_number: int = 0, timestamp: int = 0) -> str:
- """Return fork name as it's meant to be passed to the transition tool for execution."""
+ """
+ Return fork name as it's meant to be passed to the transition tool for
+ execution.
+ """
pass
@classmethod
diff --git a/src/ethereum_test_forks/forks/forks.py b/src/ethereum_test_forks/forks/forks.py
index c40484a34dd..3287a1ec1bd 100644
--- a/src/ethereum_test_forks/forks/forks.py
+++ b/src/ethereum_test_forks/forks/forks.py
@@ -34,7 +34,10 @@ class Frontier(BaseFork, solc_name="homestead"):
@classmethod
def transition_tool_name(cls, block_number: int = 0, timestamp: int = 0) -> str:
- """Return fork name as it's meant to be passed to the transition tool for execution."""
+ """
+ Return fork name as it's meant to be passed to the transition tool for
+ execution.
+ """
if cls._transition_tool_name is not None:
return cls._transition_tool_name
return cls.name()
@@ -78,7 +81,9 @@ def header_blob_gas_used_required(cls, block_number: int = 0, timestamp: int = 0
@classmethod
def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
- """Return dataclass with the defined gas costs constants for genesis."""
+ """
+ Return dataclass with the defined gas costs constants for genesis.
+ """
return GasCosts(
G_JUMPDEST=1,
G_BASE=2,
@@ -126,7 +131,10 @@ def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
def memory_expansion_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> MemoryExpansionGasCalculator:
- """Return callable that calculates the gas cost of memory expansion for the fork."""
+ """
+ Return callable that calculates the gas cost of memory expansion for
+ the fork.
+ """
gas_costs = cls.gas_costs(block_number, timestamp)
def fn(*, new_bytes: int, previous_bytes: int = 0) -> int:
@@ -147,8 +155,8 @@ def calldata_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> CalldataGasCalculator:
"""
- Return callable that calculates the transaction gas cost for its calldata
- depending on its contents.
+ Return callable that calculates the transaction gas cost for its
+ calldata depending on its contents.
"""
gas_costs = cls.gas_costs(block_number, timestamp)
@@ -169,7 +177,9 @@ def fn(*, data: BytesConvertible, floor: bool = False) -> int:
def base_fee_per_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BaseFeePerGasCalculator:
- """Return a callable that calculates the base fee per gas at a given fork."""
+ """
+ Return a callable that calculates the base fee per gas at a given fork.
+ """
raise NotImplementedError(f"Base fee per gas calculator is not supported in {cls.name()}")
@classmethod
@@ -177,8 +187,8 @@ def base_fee_change_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BaseFeeChangeCalculator:
"""
- Return a callable that calculates the gas that needs to be used to change the
- base fee.
+ Return a callable that calculates the gas that needs to be used to
+ change the base fee.
"""
raise NotImplementedError(f"Base fee change calculator is not supported in {cls.name()}")
@@ -212,7 +222,10 @@ def fn(*, data: BytesConvertible) -> int:
def transaction_intrinsic_cost_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
- """Return callable that calculates the intrinsic gas cost of a transaction for the fork."""
+ """
+ Return callable that calculates the intrinsic gas cost of a transaction
+ for the fork.
+ """
gas_costs = cls.gas_costs(block_number, timestamp)
calldata_gas_calculator = cls.calldata_gas_calculator(block_number, timestamp)
@@ -246,14 +259,19 @@ def fn(
def blob_gas_price_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BlobGasPriceCalculator:
- """Return a callable that calculates the blob gas price at a given fork."""
+ """
+ Return a callable that calculates the blob gas price at a given fork.
+ """
raise NotImplementedError(f"Blob gas price calculator is not supported in {cls.name()}")
@classmethod
def excess_blob_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
- """Return a callable that calculates the excess blob gas for a block at a given fork."""
+ """
+ Return a callable that calculates the excess blob gas for a block at a
+ given fork.
+ """
raise NotImplementedError(f"Excess blob gas calculator is not supported in {cls.name()}")
@classmethod
@@ -290,7 +308,10 @@ def max_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
@classmethod
def blob_reserve_price_active(cls, block_number: int = 0, timestamp: int = 0) -> bool:
- """Return whether the fork uses a reserve price mechanism for blobs or not."""
+ """
+ Return whether the fork uses a reserve price mechanism for blobs or
+ not.
+ """
raise NotImplementedError(f"Blob reserve price is not supported in {cls.name()}")
@classmethod
@@ -372,21 +393,28 @@ def engine_new_payload_target_blobs_per_block(
def engine_payload_attribute_target_blobs_per_block(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
- """At genesis, payload attributes do not include the target blobs per block."""
+ """
+ At genesis, payload attributes do not include the target blobs per
+ block.
+ """
return False
@classmethod
def engine_payload_attribute_max_blobs_per_block(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
- """At genesis, payload attributes do not include the max blobs per block."""
+ """
+ At genesis, payload attributes do not include the max blobs per block.
+ """
return False
@classmethod
def engine_forkchoice_updated_version(
cls, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
- """At genesis, forkchoice updates cannot be sent through the engine API."""
+ """
+ At genesis, forkchoice updates cannot be sent through the engine API.
+ """
return cls.engine_new_payload_version(block_number, timestamp)
@classmethod
@@ -446,8 +474,12 @@ def evm_code_types(cls, block_number: int = 0, timestamp: int = 0) -> List[EVMCo
@classmethod
def max_code_size(cls) -> int:
- """At genesis, there is no upper bound for code size (bounded by block gas limit)."""
- """However, the default is set to the limit of EIP-170 (Spurious Dragon)"""
+ """
+ At genesis, there is no upper bound for code size (bounded by block gas
+ limit).
+
+ However, the default is set to the limit of EIP-170 (Spurious Dragon)
+ """
return 0x6000
@classmethod
@@ -738,17 +770,17 @@ class Byzantium(Homestead):
@classmethod
def get_reward(cls, block_number: int = 0, timestamp: int = 0) -> int:
"""
- At Byzantium, the block reward is reduced to
- 3_000_000_000_000_000_000 wei.
+ At Byzantium, the block reward is reduced to 3_000_000_000_000_000_000
+ wei.
"""
return 3_000_000_000_000_000_000
@classmethod
def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""
- At Byzantium, pre-compiles for bigint modular exponentiation, addition and scalar
- multiplication on elliptic curve alt_bn128, and optimal ate pairing check on
- elliptic curve alt_bn128 are introduced.
+ At Byzantium, pre-compiles for bigint modular exponentiation, addition
+ and scalar multiplication on elliptic curve alt_bn128, and optimal ate
+ pairing check on elliptic curve alt_bn128 are introduced.
"""
return [
Address(5, label="MODEXP"),
@@ -759,8 +791,12 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[Address]
@classmethod
def max_code_size(cls) -> int:
- # NOTE: Move this to Spurious Dragon once this fork is introduced. See EIP-170.
- """At Spurious Dragon, an upper bound was introduced for max contract code size."""
+ # NOTE: Move this to Spurious Dragon once this fork is introduced. See
+ # EIP-170.
+ """
+ At Spurious Dragon, an upper bound was introduced for max contract code
+ size.
+ """
return 0x6000
@classmethod
@@ -845,8 +881,8 @@ def valid_opcodes(
@classmethod
def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
"""
- On Istanbul, the non-zero transaction data byte cost is reduced to 16 due to
- EIP-2028.
+ On Istanbul, the non-zero transaction data byte cost is reduced to 16
+ due to EIP-2028.
"""
return replace(
super(Istanbul, cls).gas_costs(block_number, timestamp),
@@ -878,7 +914,10 @@ def contract_creating_tx_types(cls, block_number: int = 0, timestamp: int = 0) -
def transaction_intrinsic_cost_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
- """At Berlin, the transaction intrinsic cost needs to take the access list into account."""
+ """
+ At Berlin, the transaction intrinsic cost needs to take the access list
+ into account.
+ """
super_fn = super(Berlin, cls).transaction_intrinsic_cost_calculator(
block_number, timestamp
)
@@ -964,19 +1003,20 @@ def base_fee_per_gas_calculator(
expected_base_fee_per_gas = parent_base_fee_per_gas
elif parent_gas_used > parent_gas_target:
gas_used_delta = parent_gas_used - parent_gas_target
- base_fee_per_gas_delta = max(
- parent_base_fee_per_gas * gas_used_delta // parent_gas_target \
- // BASE_FEE_MAX_CHANGE_DENOMINATOR,
- 1,
- )
- expected_base_fee_per_gas = parent_base_fee_per_gas + base_fee_per_gas_delta
+ base_fee_per_gas_delta = max( parent_base_fee_per_gas
+ * gas_used_delta // parent_gas_target //
+ BASE_FEE_MAX_CHANGE_DENOMINATOR, 1, )
+ expected_base_fee_per_gas = parent_base_fee_per_gas +
+ base_fee_per_gas_delta
else:
gas_used_delta = parent_gas_target - parent_gas_used
base_fee_per_gas_delta = (
- parent_base_fee_per_gas * gas_used_delta // \
- parent_gas_target // BASE_FEE_MAX_CHANGE_DENOMINATOR
- )
- expected_base_fee_per_gas = parent_base_fee_per_gas - base_fee_per_gas_delta
+ parent_base_fee_per_gas * gas_used_delta //
+ parent_gas_target //
+ BASE_FEE_MAX_CHANGE_DENOMINATOR
+ )
+ expected_base_fee_per_gas = parent_base_fee_per_gas -
+ base_fee_per_gas_delta
"""
base_fee_max_change_denominator = cls.base_fee_max_change_denominator(
block_number, timestamp
@@ -1016,8 +1056,8 @@ def base_fee_change_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> BaseFeeChangeCalculator:
"""
- Return a callable that calculates the gas that needs to be used to change the
- base fee.
+ Return a callable that calculates the gas that needs to be used to
+ change the base fee.
"""
base_fee_max_change_denominator = cls.base_fee_max_change_denominator(
block_number, timestamp
@@ -1150,8 +1190,12 @@ class Cancun(Shanghai):
"FIELD_ELEMENTS_PER_BLOB": 4096,
"BYTES_PER_FIELD_ELEMENT": 32,
"CELL_LENGTH": 2048,
- "BLS_MODULUS": 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001, # EIP-2537: Main subgroup order = q, due to this BLS_MODULUS every blob byte (uint256) must be smaller than 116 # noqa: E501
- # https://github.com/ethereum/consensus-specs/blob/cc6996c22692d70e41b7a453d925172ee4b719ad/specs/deneb/polynomial-commitments.md?plain=1#L78
+ # EIP-2537: Main subgroup order = q, due to this BLS_MODULUS
+ # every blob byte (uint256) must be smaller than 116
+ "BLS_MODULUS": 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001,
+ # https://github.com/ethereum/consensus-specs/blob/
+ # cc6996c22692d70e41b7a453d925172ee4b719ad/specs/deneb/
+ # polynomial-commitments.md?plain=1#L78
"BYTES_PER_PROOF": 48,
"BYTES_PER_COMMITMENT": 48,
"KZG_ENDIANNESS": "big",
@@ -1203,7 +1247,10 @@ def fn(*, excess_blob_gas) -> int:
def excess_blob_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
- """Return a callable that calculates the excess blob gas for a block at Cancun."""
+ """
+ Return a callable that calculates the excess blob gas for a block at
+ Cancun.
+ """
target_blobs_per_block = cls.target_blobs_per_block(block_number, timestamp)
blob_gas_per_blob = cls.blob_gas_per_blob(block_number, timestamp)
target_blob_gas_per_block = target_blobs_per_block * blob_gas_per_blob
@@ -1214,7 +1261,8 @@ def fn(
parent_excess_blobs: int | None = None,
parent_blob_gas_used: int | None = None,
parent_blob_count: int | None = None,
- parent_base_fee_per_gas: int, # Required for Osaka as using this as base
+ # Required for Osaka as using this as base
+ parent_base_fee_per_gas: int,
) -> int:
del parent_base_fee_per_gas
@@ -1253,12 +1301,18 @@ def supports_blobs(cls, block_number: int = 0, timestamp: int = 0) -> bool:
@classmethod
def target_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
- """Blobs are enabled starting from Cancun, with a static target of 3 blobs per block."""
+ """
+ Blobs are enabled starting from Cancun, with a static target of 3 blobs
+ per block.
+ """
return 3
@classmethod
def max_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
- """Blobs are enabled starting from Cancun, with a static max of 6 blobs per block."""
+ """
+ Blobs are enabled starting from Cancun, with a static max of 6 blobs
+ per block.
+ """
return 6
@classmethod
@@ -1268,12 +1322,18 @@ def blob_reserve_price_active(cls, block_number: int = 0, timestamp: int = 0) ->
@classmethod
def full_blob_tx_wrapper_version(cls, block_number: int = 0, timestamp: int = 0) -> int | None:
- """Pre-Osaka forks don't use tx wrapper versions for full blob transactions."""
+ """
+ Pre-Osaka forks don't use tx wrapper versions for full blob
+ transactions.
+ """
return None
@classmethod
def max_blobs_per_tx(cls, block_number: int = 0, timestamp: int = 0) -> int:
- """Blobs are enabled starting from Cancun, with a static max equal to the max per block."""
+ """
+ Blobs are enabled starting from Cancun, with a static max equal to the
+ max per block.
+ """
return cls.max_blobs_per_block(block_number, timestamp)
@classmethod
@@ -1313,8 +1373,8 @@ def system_contracts(cls, block_number: int = 0, timestamp: int = 0) -> List[Add
@classmethod
def pre_allocation_blockchain(cls) -> Mapping:
"""
- Cancun requires pre-allocation of the beacon root contract for EIP-4788 on blockchain
- type tests.
+ Cancun requires pre-allocation of the beacon root contract for EIP-4788
+ on blockchain type tests.
"""
new_allocation = {
0x000F3DF6D732807EF1319FB7B8BB8522D0BEAC02: {
@@ -1406,8 +1466,8 @@ def tx_types(cls, block_number: int = 0, timestamp: int = 0) -> List[int]:
@classmethod
def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
"""
- On Prague, the standard token cost and the floor token costs are introduced due to
- EIP-7623.
+ On Prague, the standard token cost and the floor token costs are
+ introduced due to EIP-7623.
"""
return replace(
super(Prague, cls).gas_costs(block_number, timestamp),
@@ -1419,7 +1479,10 @@ def gas_costs(cls, block_number: int = 0, timestamp: int = 0) -> GasCosts:
@classmethod
def system_contracts(cls, block_number: int = 0, timestamp: int = 0) -> List[Address]:
- """Prague introduces the system contracts for EIP-6110, EIP-7002, EIP-7251 and EIP-2935."""
+ """
+ Prague introduces the system contracts for EIP-6110, EIP-7002, EIP-7251
+ and EIP-2935.
+ """
return [
Address(
0x00000000219AB540356CBB839CBE05303D7705FA,
@@ -1441,7 +1504,10 @@ def system_contracts(cls, block_number: int = 0, timestamp: int = 0) -> List[Add
@classmethod
def max_request_type(cls, block_number: int = 0, timestamp: int = 0) -> int:
- """At Prague, three request types are introduced, hence the max request type is 2."""
+ """
+ At Prague, three request types are introduced, hence the max request
+ type is 2.
+ """
return 2
@classmethod
@@ -1449,8 +1515,8 @@ def calldata_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> CalldataGasCalculator:
"""
- Return a callable that calculates the transaction gas cost for its calldata
- depending on its contents.
+ Return a callable that calculates the transaction gas cost for its
+ calldata depending on its contents.
"""
gas_costs = cls.gas_costs(block_number, timestamp)
@@ -1471,7 +1537,10 @@ def fn(*, data: BytesConvertible, floor: bool = False) -> int:
def transaction_data_floor_cost_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> TransactionDataFloorCostCalculator:
- """On Prague, due to EIP-7623, the transaction data floor cost is introduced."""
+ """
+ On Prague, due to EIP-7623, the transaction data floor cost is
+ introduced.
+ """
calldata_gas_calculator = cls.calldata_gas_calculator(block_number, timestamp)
gas_costs = cls.gas_costs(block_number, timestamp)
@@ -1541,8 +1610,9 @@ def max_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
@classmethod
def pre_allocation_blockchain(cls) -> Mapping:
"""
- Prague requires pre-allocation of the beacon chain deposit contract for EIP-6110,
- the exits contract for EIP-7002, and the history storage contract for EIP-2935.
+ Prague requires pre-allocation of the beacon chain deposit contract for
+ EIP-6110, the exits contract for EIP-7002, and the history storage
+ contract for EIP-2935.
"""
new_allocation = {}
@@ -1610,7 +1680,9 @@ def header_requests_required(cls, block_number: int = 0, timestamp: int = 0) ->
@classmethod
def engine_new_payload_requests(cls, block_number: int = 0, timestamp: int = 0) -> bool:
- """From Prague, new payloads include the requests hash as a parameter."""
+ """
+ From Prague, new payloads include the requests hash as a parameter.
+ """
return True
@classmethod
@@ -1624,7 +1696,9 @@ def engine_new_payload_version(
def engine_forkchoice_updated_version(
cls, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
- """At Prague, version number of NewPayload and ForkchoiceUpdated diverge."""
+ """
+ At Prague, version number of NewPayload and ForkchoiceUpdated diverge.
+ """
return 3
@@ -1698,7 +1772,9 @@ def precompiles(cls, block_number: int = 0, timestamp: int = 0) -> List[Address]
def excess_blob_gas_calculator(
cls, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
- """Return a callable that calculates the excess blob gas for a block."""
+ """
+ Return a callable that calculates the excess blob gas for a block.
+ """
target_blobs_per_block = cls.target_blobs_per_block(block_number, timestamp)
blob_gas_per_blob = cls.blob_gas_per_blob(block_number, timestamp)
target_blob_gas_per_block = target_blobs_per_block * blob_gas_per_blob
@@ -1722,7 +1798,8 @@ def fn(
if parent_excess_blob_gas + parent_blob_gas_used < target_blob_gas_per_block:
return 0
- # EIP-7918: Apply reserve price when execution costs dominate blob costs
+ # EIP-7918: Apply reserve price when execution costs dominate blob
+ # costs
current_blob_base_fee = cls.blob_gas_price_calculator()(
excess_blob_gas=parent_excess_blob_gas
)
@@ -1745,7 +1822,10 @@ def fn(
@classmethod
def max_blobs_per_tx(cls, block_number: int = 0, timestamp: int = 0) -> int:
- """Blobs in Osaka, have a static max of 6 blobs per tx. Differs from the max per block."""
+ """
+ Blobs in Osaka, have a static max of 6 blobs per tx. Differs from the
+ max per block.
+ """
return 6
@classmethod
@@ -1836,7 +1916,10 @@ def max_blobs_per_block(cls, block_number: int = 0, timestamp: int = 0) -> int:
class BPO5(BPO4, bpo_fork=True):
- """BPO5 fork - Blob Parameter Only fork 5 (Required to parse Fusaka devnet genesis files)."""
+ """
+ BPO5 fork - Blob Parameter Only fork 5 (Required to parse Fusaka devnet
+ genesis files).
+ """
pass
@@ -1846,7 +1929,9 @@ class Amsterdam(Osaka):
@classmethod
def header_bal_hash_required(cls, block_number: int = 0, timestamp: int = 0) -> bool:
- """From Amsterdam, header must contain block access list hash (EIP-7928)."""
+ """
+ From Amsterdam, header must contain block access list hash (EIP-7928).
+ """
return True
@classmethod
@@ -1866,8 +1951,8 @@ def engine_execution_payload_block_access_list(
cls, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
- From Amsterdam, engine execution payload includes `block_access_list` as
- a parameter.
+ From Amsterdam, engine execution payload includes `block_access_list`
+ as a parameter.
"""
return True
diff --git a/src/ethereum_test_forks/helpers.py b/src/ethereum_test_forks/helpers.py
index 62796f3f530..3b138342800 100644
--- a/src/ethereum_test_forks/helpers.py
+++ b/src/ethereum_test_forks/helpers.py
@@ -20,7 +20,10 @@
class InvalidForkError(Exception):
- """Invalid fork error raised when the fork specified is not found or incompatible."""
+ """
+ Invalid fork error raised when the fork specified is not found or
+ incompatible.
+ """
def __init__(self, message):
"""Initialize the InvalidForkError exception."""
@@ -54,8 +57,8 @@ def __init__(self, message):
def get_forks() -> List[Type[BaseFork]]:
"""
- Return list of all the fork classes implemented by
- `ethereum_test_forks` ordered chronologically by deployment.
+ Return list of all the fork classes implemented by `ethereum_test_forks`
+ ordered chronologically by deployment.
"""
return all_forks[:]
@@ -177,8 +180,8 @@ def get_selected_fork_set(
transition_forks: bool = True,
) -> Set[Type[BaseFork]]:
"""
- Process sets derived from `--fork`, `--until` and `--from` to return an unified fork
- set.
+ Process sets derived from `--fork`, `--until` and `--from` to return an
+ unified fork set.
"""
selected_fork_set = set()
if single_fork:
@@ -200,8 +203,7 @@ def transition_fork_from_to(
fork_from: Type[BaseFork], fork_to: Type[BaseFork]
) -> Type[BaseFork] | None:
"""
- Return transition fork that transitions to and from the specified
- forks.
+ Return transition fork that transitions to and from the specified forks.
"""
for transition_fork in get_transition_forks():
if not issubclass(transition_fork, TransitionBaseClass):
@@ -231,8 +233,8 @@ def forks_from_until(
fork_from: Type[BaseFork], fork_until: Type[BaseFork]
) -> List[Type[BaseFork]]:
"""
- Return specified fork and all forks after it until and including the
- second specified fork.
+ Return specified fork and all forks after it until and including the second
+ specified fork.
"""
prev_fork = fork_until
@@ -266,13 +268,13 @@ def get_relative_fork_markers(
"""
Return a list of marker names for a given fork.
- For a base fork (e.g. `Shanghai`), return [ `Shanghai` ].
- For a transition fork (e.g. `ShanghaiToCancunAtTime15k` which transitions to `Cancun`),
+ For a base fork (e.g. `Shanghai`), return [ `Shanghai` ]. For a transition
+ fork (e.g. `ShanghaiToCancunAtTime15k` which transitions to `Cancun`),
return [ `ShanghaiToCancunAtTime15k`, `Cancun` ].
- If `strict_mode` is set to `True`, raise an `InvalidForkError` if the fork is not found,
- otherwise, simply return the provided (str) `fork_identifier` (this is required to run
- `consume` with forks that are unknown to EEST).
+ If `strict_mode` is set to `True`, raise an `InvalidForkError` if the fork
+ is not found, otherwise, simply return the provided (str) `fork_identifier`
+ (this is required to run `consume` with forks that are unknown to EEST).
"""
all_forks = set(get_forks()) | set(get_transition_forks())
if isinstance(fork_identifier, str):
@@ -302,7 +304,10 @@ def get_fork_by_name(fork_name: str) -> Type[BaseFork] | None:
class ForkRangeDescriptor(BaseModel):
- """Fork descriptor parsed from string normally contained in ethereum/tests fillers."""
+ """
+ Fork descriptor parsed from string normally contained in ethereum/tests
+ fillers.
+ """
greater_equal: Type[BaseFork] | None = None
less_than: Type[BaseFork] | None = None
@@ -323,8 +328,10 @@ def validate_fork_range_descriptor(cls, v: Any, handler: ValidatorFunctionWrapHa
Validate the fork range descriptor from a string.
Examples:
- - ">=Osaka" validates to {greater_equal=Osaka, less_than=None}
- - ">=Prague=Osaka" validates to {greater_equal=Osaka, less_than=None}
+
+ - ">=PragueB, when filling, and given the from/until markers,
- we expect the following logic:
+ E.g. given transition fork A->B, when filling, and given the from/until
+ markers, we expect the following logic:
Marker Comparison A->B Included
--------- ------------ ---------------
@@ -223,7 +224,8 @@ def test_transition_fork_comparison():
assert BerlinToLondonAt5 >= London
assert BerlinToLondonAt5 <= London
- # Comparisons between transition forks is done against the `transitions_to` fork
+ # Comparisons between transition forks is done against the `transitions_to`
+ # fork
assert BerlinToLondonAt5 < ParisToShanghaiAtTime15k
assert ParisToShanghaiAtTime15k > BerlinToLondonAt5
assert BerlinToLondonAt5 == BerlinToLondonAt5
@@ -353,8 +355,9 @@ class FutureFork(Osaka):
"""
Dummy fork used for testing.
- Contains no changes to the blob parameters from the parent fork in order to confirm that
- it's added to the blob schedule even if it doesn't have any changes.
+ Contains no changes to the blob parameters from the parent fork in order to
+ confirm that it's added to the blob schedule even if it doesn't have any
+ changes.
"""
pass
diff --git a/src/ethereum_test_forks/transition_base_fork.py b/src/ethereum_test_forks/transition_base_fork.py
index 81c3e3f6576..60b43b63a9b 100644
--- a/src/ethereum_test_forks/transition_base_fork.py
+++ b/src/ethereum_test_forks/transition_base_fork.py
@@ -24,7 +24,9 @@ def transitions_from(cls) -> Type[BaseFork]:
def base_fork_abstract_methods() -> List[str]:
- """Return list of all abstract methods that must be implemented by a fork."""
+ """
+ Return list of all abstract methods that must be implemented by a fork.
+ """
return list(BaseFork.__abstractmethods__)
diff --git a/src/ethereum_test_rpc/__init__.py b/src/ethereum_test_rpc/__init__.py
index 85e533d89bd..888cdf3c633 100644
--- a/src/ethereum_test_rpc/__init__.py
+++ b/src/ethereum_test_rpc/__init__.py
@@ -1,4 +1,6 @@
-"""JSON-RPC methods and helper functions for EEST consume based hive simulators."""
+"""
+JSON-RPC methods and helper functions for EEST consume based hive simulators.
+"""
from .rpc import (
AdminRPC,
diff --git a/src/ethereum_test_rpc/rpc.py b/src/ethereum_test_rpc/rpc.py
index 1f289b628ff..a09055ba123 100644
--- a/src/ethereum_test_rpc/rpc.py
+++ b/src/ethereum_test_rpc/rpc.py
@@ -1,4 +1,6 @@
-"""JSON-RPC methods and helper functions for EEST consume based hive simulators."""
+"""
+JSON-RPC methods and helper functions for EEST consume based hive simulators.
+"""
import time
from itertools import count
@@ -30,13 +32,18 @@
class SendTransactionExceptionError(Exception):
- """Represent an exception that is raised when a transaction fails to be sent."""
+ """
+ Represent an exception that is raised when a transaction fails to be sent.
+ """
tx: Transaction | None = None
tx_rlp: Bytes | None = None
def __init__(self, *args, tx: Transaction | None = None, tx_rlp: Bytes | None = None):
- """Initialize SendTransactionExceptionError class with the given transaction."""
+ """
+ Initialize SendTransactionExceptionError class with the given
+ transaction.
+ """
super().__init__(*args)
self.tx = tx
self.tx_rlp = tx_rlp
@@ -51,7 +58,10 @@ def __str__(self):
class BaseRPC:
- """Represents a base RPC class for every RPC call used within EEST based hive simulators."""
+ """
+ Represents a base RPC class for every RPC call used within EEST based hive
+ simulators.
+ """
namespace: ClassVar[str]
response_validation_context: Any | None
@@ -68,7 +78,9 @@ def __init__(
self.response_validation_context = response_validation_context
def __init_subclass__(cls, namespace: str | None = None) -> None:
- """Set namespace of the RPC class to the lowercase of the class name."""
+ """
+ Set namespace of the RPC class to the lowercase of the class name.
+ """
if namespace is None:
namespace = cls.__name__
if namespace.endswith("RPC"):
@@ -85,7 +97,10 @@ def post_request(
request_id: int | str | None = None,
timeout: int | None = None,
) -> Any:
- """Send JSON-RPC POST request to the client RPC server at port defined in the url."""
+ """
+ Send JSON-RPC POST request to the client RPC server at port defined in
+ the url.
+ """
if extra_headers is None:
extra_headers = {}
if params is None:
@@ -123,8 +138,8 @@ def post_request(
class EthRPC(BaseRPC):
"""
- Represents an `eth_X` RPC class for every default ethereum RPC method used within EEST based
- hive simulators.
+ Represents an `eth_X` RPC class for every default ethereum RPC method used
+ within EEST based hive simulators.
"""
transaction_wait_timeout: int = 60
@@ -137,12 +152,18 @@ def __init__(
transaction_wait_timeout: int = 60,
**kwargs,
):
- """Initialize EthRPC class with the given url and transaction wait timeout."""
+ """
+ Initialize EthRPC class with the given url and transaction wait
+ timeout.
+ """
super().__init__(*args, **kwargs)
self.transaction_wait_timeout = transaction_wait_timeout
def config(self, timeout: int | None = None):
- """`eth_config`: Returns information about a fork configuration of the client."""
+ """
+ `eth_config`: Returns information about a fork configuration of the
+ client.
+ """
try:
response = self.post_request(method="config", timeout=timeout)
if response is None:
@@ -165,7 +186,10 @@ def chain_id(self) -> int:
return int(response, 16)
def get_block_by_number(self, block_number: BlockNumberType = "latest", full_txs: bool = True):
- """`eth_getBlockByNumber`: Returns information about a block by block number."""
+ """
+ `eth_getBlockByNumber`: Returns information about a block by block
+ number.
+ """
block = hex(block_number) if isinstance(block_number, int) else block_number
params = [block, full_txs]
response = self.post_request(method="getBlockByNumber", params=params)
@@ -180,7 +204,9 @@ def get_block_by_hash(self, block_hash: Hash, full_txs: bool = True):
return response
def get_balance(self, address: Address, block_number: BlockNumberType = "latest") -> int:
- """`eth_getBalance`: Returns the balance of the account of given address."""
+ """
+ `eth_getBalance`: Returns the balance of the account of given address.
+ """
block = hex(block_number) if isinstance(block_number, int) else block_number
params = [f"{address}", block]
@@ -200,7 +226,10 @@ def get_code(self, address: Address, block_number: BlockNumberType = "latest") -
def get_transaction_count(
self, address: Address, block_number: BlockNumberType = "latest"
) -> int:
- """`eth_getTransactionCount`: Returns the number of transactions sent from an address."""
+ """
+ `eth_getTransactionCount`: Returns the number of transactions sent from
+ an address.
+ """
block = hex(block_number) if isinstance(block_number, int) else block_number
params = [f"{address}", block]
@@ -226,7 +255,10 @@ def get_transaction_by_hash(self, transaction_hash: Hash) -> TransactionByHashRe
def get_storage_at(
self, address: Address, position: Hash, block_number: BlockNumberType = "latest"
) -> Hash:
- """`eth_getStorageAt`: Returns the value from a storage position at a given address."""
+ """
+ `eth_getStorageAt`: Returns the value from a storage position at a
+ given address.
+ """
block = hex(block_number) if isinstance(block_number, int) else block_number
params = [f"{address}", f"{position}", block]
@@ -234,7 +266,10 @@ def get_storage_at(
return Hash(response)
def gas_price(self) -> int:
- """`eth_gasPrice`: Returns the number of transactions sent from an address."""
+ """
+ `eth_gasPrice`: Returns the number of transactions sent from an
+ address.
+ """
response = self.post_request(method="gasPrice")
return int(response, 16)
@@ -247,7 +282,7 @@ def send_raw_transaction(
response = self.post_request(
method="sendRawTransaction",
params=[transaction_rlp.hex()],
- request_id=request_id, # noqa: E501
+ request_id=request_id,
)
result_hash = Hash(response)
@@ -263,7 +298,7 @@ def send_transaction(self, transaction: Transaction) -> Hash:
response = self.post_request(
method="sendRawTransaction",
params=[transaction.rlp().hex()],
- request_id=transaction.metadata_string(), # noqa: E501
+ request_id=transaction.metadata_string(),
)
result_hash = Hash(response)
@@ -274,15 +309,18 @@ def send_transaction(self, transaction: Transaction) -> Hash:
raise SendTransactionExceptionError(str(e), tx=transaction) from e
def send_transactions(self, transactions: List[Transaction]) -> List[Hash]:
- """Use `eth_sendRawTransaction` to send a list of transactions to the client."""
+ """
+ Use `eth_sendRawTransaction` to send a list of transactions to the
+ client.
+ """
return [self.send_transaction(tx) for tx in transactions]
def storage_at_keys(
self, account: Address, keys: List[Hash], block_number: BlockNumberType = "latest"
) -> Dict[Hash, Hash]:
"""
- Retrieve the storage values for the specified keys at a given address and block
- number.
+ Retrieve the storage values for the specified keys at a given address
+ and block number.
"""
results: Dict[Hash, Hash] = {}
for key in keys:
@@ -291,7 +329,10 @@ def storage_at_keys(
return results
def wait_for_transaction(self, transaction: Transaction) -> TransactionByHashResponse:
- """Use `eth_getTransactionByHash` to wait until a transaction is included in a block."""
+ """
+ Use `eth_getTransactionByHash` to wait until a transaction is included
+ in a block.
+ """
tx_hash = transaction.hash
start_time = time.time()
while True:
@@ -310,8 +351,8 @@ def wait_for_transactions(
self, transactions: List[Transaction]
) -> List[TransactionByHashResponse]:
"""
- Use `eth_getTransactionByHash` to wait until all transactions in list are included in a
- block.
+ Use `eth_getTransactionByHash` to wait until all transactions in list
+ are included in a block.
"""
tx_hashes = [tx.hash for tx in transactions]
responses: List[TransactionByHashResponse] = []
@@ -345,15 +386,18 @@ def send_wait_transaction(self, transaction: Transaction):
return self.wait_for_transaction(transaction)
def send_wait_transactions(self, transactions: List[Transaction]):
- """Send list of transactions and waits until all of them are included in a block."""
+ """
+ Send list of transactions and waits until all of them are included in a
+ block.
+ """
self.send_transactions(transactions)
return self.wait_for_transactions(transactions)
class DebugRPC(EthRPC):
"""
- Represents an `debug_X` RPC class for every default ethereum RPC method used within EEST based
- hive simulators.
+ Represents an `debug_X` RPC class for every default ethereum RPC method
+ used within EEST based hive simulators.
"""
def trace_call(self, tr: dict[str, str], block_number: str):
@@ -364,8 +408,8 @@ def trace_call(self, tr: dict[str, str], block_number: str):
class EngineRPC(BaseRPC):
"""
- Represents an Engine API RPC class for every Engine API method used within EEST based hive
- simulators.
+ Represents an Engine API RPC class for every Engine API method used within
+ EEST based hive simulators.
"""
jwt_secret: bytes
@@ -389,7 +433,10 @@ def post_request(
request_id: int | str | None = None,
timeout: int | None = None,
) -> Any:
- """Send JSON-RPC POST request to the client RPC server at port defined in the url."""
+ """
+ Send JSON-RPC POST request to the client RPC server at port defined in
+ the url.
+ """
if extra_headers is None:
extra_headers = {}
jwt_token = encode(
@@ -410,7 +457,10 @@ def post_request(
)
def new_payload(self, *params: Any, version: int) -> PayloadStatus:
- """`engine_newPayloadVX`: Attempts to execute the given payload on an execution client."""
+ """
+ `engine_newPayloadVX`: Attempts to execute the given payload on an
+ execution client.
+ """
method = f"newPayloadV{version}"
params_list = [to_json(param) for param in params]
@@ -426,7 +476,10 @@ def forkchoice_updated(
*,
version: int,
) -> ForkchoiceUpdateResponse:
- """`engine_forkchoiceUpdatedVX`: Updates the forkchoice state of the execution client."""
+ """
+ `engine_forkchoiceUpdatedVX`: Updates the forkchoice state of the
+ execution client.
+ """
method = f"forkchoiceUpdatedV{version}"
if payload_attributes is None:
@@ -468,7 +521,9 @@ def get_blobs(
*,
version: int,
) -> GetBlobsResponse | None:
- """`engine_getBlobsVX`: Retrieves blobs from an execution layers tx pool."""
+ """
+ `engine_getBlobsVX`: Retrieves blobs from an execution layers tx pool.
+ """
method = f"getBlobsV{version}"
params = [f"{h}" for h in versioned_hashes]
diff --git a/src/ethereum_test_rpc/rpc_types.py b/src/ethereum_test_rpc/rpc_types.py
index 8d874d07fb6..d904d441cc6 100644
--- a/src/ethereum_test_rpc/rpc_types.py
+++ b/src/ethereum_test_rpc/rpc_types.py
@@ -55,7 +55,8 @@ class TransactionByHashResponse(Transaction):
transaction_hash: Hash = Field(..., alias="hash")
sender: EOA | None = Field(None, alias="from")
- # The to field can have different names in different clients, so we use AliasChoices.
+ # The to field can have different names in different clients, so we use
+ # AliasChoices.
to: Address | None = Field(..., validation_alias=AliasChoices("to_address", "to", "toAddress"))
v: HexNumber = Field(0, validation_alias=AliasChoices("v", "yParity")) # type: ignore
@@ -64,8 +65,8 @@ class TransactionByHashResponse(Transaction):
@classmethod
def adapt_clients_response(cls, data: Any) -> Any:
"""
- Perform modifications necessary to adapt the response returned by clients
- so it can be parsed by our model.
+ Perform modifications necessary to adapt the response returned by
+ clients so it can be parsed by our model.
"""
if isinstance(data, dict):
if "gasPrice" in data and "maxFeePerGas" in data:
@@ -75,8 +76,8 @@ def adapt_clients_response(cls, data: Any) -> Any:
def model_post_init(self, __context):
"""
- Check that the transaction hash returned by the client matches the one calculated by
- us.
+ Check that the transaction hash returned by the client matches the one
+ calculated by us.
"""
Transaction.model_post_init(self, __context)
assert self.transaction_hash == self.hash
diff --git a/src/ethereum_test_rpc/tests/test_types.py b/src/ethereum_test_rpc/tests/test_types.py
index c0a6892e9af..0a42a078b35 100644
--- a/src/ethereum_test_rpc/tests/test_types.py
+++ b/src/ethereum_test_rpc/tests/test_types.py
@@ -99,7 +99,9 @@
@pytest.fixture
def eth_config_response() -> EthConfigResponse:
- """Get the `eth_config` response from the client to be verified by all tests."""
+ """
+ Get the `eth_config` response from the client to be verified by all tests.
+ """
return EthConfigResponse.model_validate(eth_config_dict)
diff --git a/src/ethereum_test_specs/base.py b/src/ethereum_test_specs/base.py
index a82c0d94568..3fee6ba4bc1 100644
--- a/src/ethereum_test_specs/base.py
+++ b/src/ethereum_test_specs/base.py
@@ -1,4 +1,6 @@
-"""Base test class and helper functions for Ethereum state and blockchain tests."""
+"""
+Base test class and helper functions for Ethereum state and blockchain tests.
+"""
import hashlib
from abc import abstractmethod
@@ -43,8 +45,8 @@ def __str__(self):
def verify_result(result: Result, env: Environment):
"""
- Verify that values in the t8n result match the expected values.
- Raises exception on unexpected values.
+ Verify that values in the t8n result match the expected values. Raises
+ exception on unexpected values.
"""
if env.withdrawals is not None:
assert result.withdrawals_root == to_hex(Withdrawal.list_root(env.withdrawals))
@@ -61,7 +63,9 @@ class OpMode(StrEnum):
class BaseTest(BaseModel):
- """Represents a base Ethereum test which must return a single test fixture."""
+ """
+ Represents a base Ethereum test which must return a single test fixture.
+ """
model_config = ConfigDict(extra="forbid")
@@ -93,7 +97,10 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the appropriate marker is
+ used.
+ """
return False
@classmethod
@@ -132,7 +139,10 @@ def discard_execute_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard an execute format from executing if the appropriate marker is used."""
+ """
+ Discard an execute format from executing if the appropriate marker is
+ used.
+ """
return False
@abstractmethod
@@ -189,10 +199,11 @@ def is_tx_gas_heavy_test(self) -> bool:
def is_exception_test(self) -> bool | None:
"""
- Check if the test is an exception test (invalid block, invalid transaction).
+ Check if the test is an exception test (invalid block, invalid
+ transaction).
- `None` is returned if it's not possible to determine if the test is negative or not.
- This is the case when the test is not run in pytest.
+ `None` is returned if it's not possible to determine if the test is
+ negative or not. This is the case when the test is not run in pytest.
"""
if self._request is not None and hasattr(self._request, "node"):
return self._request.node.get_closest_marker("exception_test") is not None
@@ -231,7 +242,8 @@ def get_genesis_environment(self, fork: Fork) -> Environment:
"""
Get the genesis environment for pre-allocation groups.
- Must be implemented by subclasses to provide the appropriate environment.
+ Must be implemented by subclasses to provide the appropriate
+ environment.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement genesis environment access for use with "
@@ -241,7 +253,10 @@ def get_genesis_environment(self, fork: Fork) -> Environment:
def update_pre_alloc_groups(
self, pre_alloc_groups: PreAllocGroups, fork: Fork, test_id: str
) -> PreAllocGroups:
- """Create or update the pre-allocation group with the pre from the current spec."""
+ """
+ Create or update the pre-allocation group with the pre from the current
+ spec.
+ """
if not hasattr(self, "pre"):
raise AttributeError(
f"{self.__class__.__name__} does not have a 'pre' field. Pre-allocation groups "
@@ -261,7 +276,8 @@ def update_pre_alloc_groups(
group.test_ids.append(str(test_id))
pre_alloc_groups[pre_alloc_hash] = group
else:
- # Create new group - use Environment instead of expensive genesis generation
+ # Create new group - use Environment instead of expensive genesis
+ # generation
genesis_env = self.get_genesis_environment(fork)
pre_alloc = Alloc.merge(
Alloc.model_validate(fork.pre_allocation_blockchain()),
diff --git a/src/ethereum_test_specs/base_static.py b/src/ethereum_test_specs/base_static.py
index cdd60d6164a..3dd61bbebc6 100644
--- a/src/ethereum_test_specs/base_static.py
+++ b/src/ethereum_test_specs/base_static.py
@@ -1,4 +1,6 @@
-"""Base class to parse test cases written in static formats."""
+"""
+Base class to parse test cases written in static formats.
+"""
import re
from abc import abstractmethod
@@ -25,8 +27,8 @@ class BaseStaticTest(BaseModel):
@classmethod
def __pydantic_init_subclass__(cls, **kwargs):
"""
- Register all subclasses of BaseStaticTest with a static test format name set
- as possible static test format.
+ Register all subclasses of BaseStaticTest with a static test format
+ name set as possible static test format.
"""
if cls.format_name:
# Register the new fixture format
@@ -55,8 +57,9 @@ def fill_function(self) -> Callable:
This method should be implemented by the subclasses.
- The function returned can be optionally decorated with the `@pytest.mark.parametrize`
- decorator to parametrize the test with the number of sub test cases.
+ The function returned can be optionally decorated with the
+ `@pytest.mark.parametrize` decorator to parametrize the test with the
+ number of sub test cases.
Example:
```
@@ -68,7 +71,7 @@ def test_state_filler(
fork: Fork,
pre: Alloc,
n: int,
- m: int,
+ m: int
):
\"\"\"Generate a test from a static state filler.\"\"\"
assert n == 1
@@ -85,35 +88,46 @@ def test_state_filler(
sender=sender,
)
state_test(env=env, pre=pre, post={}, tx=tx)
-
- return test_state_filler
```
- To aid the generation of the test, the function can be defined and then the decorator be
- applied after defining the function:
+ To aid the generation of the test, the function can be defined and then
+ the decorator be applied after defining the function:
```
def test_state_filler(
- state_test: StateTestFiller,
+ state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
n: int,
m: int,
):
- ...
- test_state_filler = pytest.mark.parametrize("n", [1])(test_state_filler)
- test_state_filler = pytest.mark.parametrize("m", [1, 2])(test_state_filler)
+
+ ...
+
+ test_state_filler = pytest.mark.parametrize("n",
+ [1])(test_state_filler
+ )
+ test_state_filler = pytest.mark.parametrize("m",
+ [1, 2])(test_state_filler
+ )
+
if self.valid_from:
- test_state_filler = pytest.mark.valid_from(self.valid_from)(test_state_filler)
+ test_state_filler = pytest.mark.valid_from(
+ self.valid_from
+ )(test_state_filler)
+
if self.valid_until:
- test_state_filler = pytest.mark.valid_until(self.valid_until)(test_state_filler)
+ test_state_filler = pytest.mark.valid_until(
+ self.valid_until
+ )(test_state_filler)
+
return test_state_filler
```
- The function can contain the following parameters on top of the spec type parameter
- (`state_test` in the example above):
- - `fork`: The fork for which the test is currently being filled.
- - `pre`: The pre-state of the test.
+ The function can contain the following parameters on top of the spec
+ type parameter (`state_test` in the example above): - `fork`: The fork
+ for which the test is currently being filled. - `pre`: The pre-state of
+ the test.
"""
raise NotImplementedError
@@ -143,8 +157,8 @@ def remove_comments_from_model(cls, data: Any) -> Any:
def remove_comments(v: str) -> str:
"""
- Split by line and then remove the comments (starting with #) at the end of each line if
- any.
+ Split by line and then remove the comments (starting with #) at the end of
+ each line if any.
"""
return "\n".join([line.split("#")[0].strip() for line in v.splitlines()])
diff --git a/src/ethereum_test_specs/benchmark.py b/src/ethereum_test_specs/benchmark.py
index 440faa8b844..c6fdd1b644b 100644
--- a/src/ethereum_test_specs/benchmark.py
+++ b/src/ethereum_test_specs/benchmark.py
@@ -54,7 +54,10 @@ def generate_transaction(self, pre: Alloc, gas_limit: int, fork: Fork) -> Transa
def generate_repeated_code(
self, repeated_code: Bytecode, setup: Bytecode, fork: Fork
) -> Bytecode:
- """Calculate the maximum number of iterations that can fit in the code size limit."""
+ """
+ Calculate the maximum number of iterations that
+ can fit in the code size limit.
+ """
assert len(repeated_code) > 0, "repeated_code cannot be empty"
max_code_size = fork.max_code_size()
@@ -114,7 +117,10 @@ class BenchmarkTest(BaseTest):
@classmethod
def pytest_parameter_name(cls) -> str:
- """Return the parameter name used in pytest to select this spec type."""
+ """
+ Return the parameter name used in pytest
+ to select this spec type.
+ """
return "benchmark_test"
@classmethod
@@ -124,7 +130,10 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the
+ appropriate marker is used.
+ """
if "blockchain_test_only" in [m.name for m in markers]:
return fixture_format != BlockchainFixture
if "blockchain_test_engine_only" in [m.name for m in markers]:
@@ -136,7 +145,10 @@ def get_genesis_environment(self, fork: Fork) -> Environment:
return self.env
def split_transaction(self, tx: Transaction, gas_limit_cap: int | None) -> List[Transaction]:
- """Split a transaction that exceeds the gas limit cap into multiple transactions."""
+ """
+ Split a transaction that exceeds the gas
+ limit cap into multiple transactions.
+ """
if gas_limit_cap is None:
tx.gas_limit = HexNumber(self.gas_benchmark_value)
return [tx]
diff --git a/src/ethereum_test_specs/blockchain.py b/src/ethereum_test_specs/blockchain.py
index 796a34ce4df..6669ab2f274 100644
--- a/src/ethereum_test_specs/blockchain.py
+++ b/src/ethereum_test_specs/blockchain.py
@@ -130,27 +130,22 @@ class Header(CamelModel):
"""
EMPTY_FIELD: ClassVar[Removable] = Removable()
"""
- Sentinel object used to specify that a header field must be empty during verification.
+ Sentinel object used to specify that a header field must be empty during
+ verification.
- This can be used in a test to explicitly skip a field in a block's RLP encoding.
- included in the (json) output when the model is serialized. For example:
- ```
- header_modifier = Header(
- excess_blob_gas=Header.REMOVE_FIELD,
- )
- block = Block(
- timestamp=TIMESTAMP,
- rlp_modifier=header_modifier,
- exception=BlockException.INCORRECT_BLOCK_FORMAT,
- engine_api_error_code=EngineAPIError.InvalidParams,
- )
- ```
+ This can be used in a test to explicitly skip a field in a block's RLP
+ encoding. included in the (json) output when the model is serialized. For
+ example: ``` header_modifier = Header( excess_blob_gas=Header.REMOVE_FIELD,
+ ) block = Block( timestamp=TIMESTAMP, rlp_modifier=header_modifier,
+ exception=BlockException.INCORRECT_BLOCK_FORMAT,
+ engine_api_error_code=EngineAPIError.InvalidParams, ) ```
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
- # explicitly set Removable items to None so they are not included in the serialization
- # (in combination with exclude_None=True in model.dump()).
+ # explicitly set Removable items to None so they are not included in
+ # the serialization (in combination with exclude_None=True in
+ # model.dump()).
json_encoders={
Removable: lambda x: None,
},
@@ -165,7 +160,9 @@ def validate_withdrawals_root(cls, value):
return value
def apply(self, target: FixtureHeader) -> FixtureHeader:
- """Produce a fixture header copy with the set values from the modifier."""
+ """
+ Produce a fixture header copy with the set values from the modifier.
+ """
return target.copy(
**{
k: (v if v is not Header.REMOVE_FIELD else None)
@@ -200,9 +197,7 @@ class Block(Header):
"""Block type used to describe block properties in test specs."""
header_verify: Header | None = None
- """
- If set, the block header will be verified against the specified values.
- """
+ # If set, the block header will be verified against the specified values.
rlp_modifier: Header | None = None
"""
An RLP modifying header which values would be used to override the ones
@@ -210,46 +205,34 @@ class Block(Header):
"""
expected_block_access_list: BlockAccessListExpectation | None = None
"""
- If set, the block access list will be verified and potentially corrupted for invalid tests.
+ If set, the block access list will be verified and potentially corrupted
+ for invalid tests.
"""
exception: BLOCK_EXCEPTION_TYPE = None
- """
- If set, the block is expected to be rejected by the client.
- """
+ # If set, the block is expected to be rejected by the client.
skip_exception_verification: bool = False
"""
- Skip verifying that the exception is returned by the transition tool.
- This could be because the exception is inserted in the block after the transition tool
- evaluates it.
+ Skip verifying that the exception is returned by the transition tool. This
+ could be because the exception is inserted in the block after the
+ transition tool evaluates it.
"""
engine_api_error_code: EngineAPIError | None = None
"""
- If set, the block is expected to produce an error response from the Engine API.
+ If set, the block is expected to produce an error response from the Engine
+ API.
"""
txs: List[Transaction] = Field(default_factory=list)
- """
- List of transactions included in the block.
- """
+ """List of transactions included in the block."""
ommers: List[Header] | None = None
- """
- List of ommer headers included in the block.
- """
+ """List of ommer headers included in the block."""
withdrawals: List[Withdrawal] | None = None
- """
- List of withdrawals to perform for this block.
- """
+ """List of withdrawals to perform for this block."""
requests: List[Bytes] | None = None
- """
- Custom list of requests to embed in this block.
- """
+ """Custom list of requests to embed in this block."""
expected_post_state: Alloc | None = None
- """
- Post state for verification after block execution in BlockchainTest
- """
+ """Post state for verification after block execution in BlockchainTest"""
block_access_list: Bytes | None = Field(None)
- """
- EIP-7928: Block-level access lists (serialized).
- """
+ """EIP-7928: Block-level access lists (serialized)."""
def set_environment(self, env: Environment) -> Environment:
"""
@@ -259,8 +242,8 @@ def set_environment(self, env: Environment) -> Environment:
new_env_values: Dict[str, Any] = {}
"""
- Values that need to be set in the environment and are `None` for
- this block need to be set to their defaults.
+ Values that need to be set in the environment and are `None` for this
+ block need to be set to their defaults.
"""
new_env_values["difficulty"] = self.difficulty
new_env_values["prev_randao"] = self.prev_randao
@@ -310,10 +293,7 @@ def set_environment(self, env: Environment) -> Environment:
class BuiltBlock(CamelModel):
- """
- Model that contains all properties to build a full block or
- payload.
- """
+ """Model that contains all properties to build a full block or payload."""
header: FixtureHeader
env: Environment
@@ -414,7 +394,8 @@ def verify_block_exception(self, transition_tool_exceptions_reliable: bool):
"prev_randao": 0,
}
"""
-Default values for the genesis environment that are used to create all genesis headers.
+Default values for the genesis environment that are used to create all genesis
+headers.
"""
@@ -428,8 +409,8 @@ class BlockchainTest(BaseTest):
chain_id: int = 1
exclude_full_post_state_in_output: bool = False
"""
- Exclude the post state from the fixture output.
- In this case, the state verification is only performed based on the state root.
+ Exclude the post state from the fixture output. In this case, the state
+ verification is only performed based on the state root.
"""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
@@ -458,7 +439,10 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the appropriate marker is
+ used.
+ """
marker_names = [m.name for m in markers]
if fixture_format != BlockchainFixture and "blockchain_test_only" in marker_names:
return True
@@ -516,7 +500,9 @@ def generate_block_data(
previous_alloc: Alloc,
last_block: bool,
) -> BuiltBlock:
- """Generate common block data for both make_fixture and make_hive_fixture."""
+ """
+ Generate common block data for both make_fixture and make_hive_fixture.
+ """
env = block.set_environment(previous_env)
env = env.set_fork_requirements(fork)
txs = [tx.with_signature_and_sender() for tx in block.txs]
@@ -546,10 +532,11 @@ def generate_block_data(
slow_request=self.is_tx_gas_heavy_test(),
)
- # One special case of the invalid transactions is the blob gas used, since this value
- # is not included in the transition tool result, but it is included in the block header,
- # and some clients check it before executing the block by simply counting the type-3 txs,
- # we need to set the correct value by default.
+ # One special case of the invalid transactions is the blob gas used,
+ # since this value is not included in the transition tool result, but
+ # it is included in the block header, and some clients check it before
+ # executing the block by simply counting the type-3 txs, we need to set
+ # the correct value by default.
blob_gas_used: int | None = None
if (blob_gas_per_blob := fork.blob_gas_per_blob(env.number, env.timestamp)) > 0:
blob_gas_used = blob_gas_per_blob * count_blobs(txs)
@@ -628,7 +615,8 @@ def generate_block_data(
header = block.rlp_modifier.apply(header)
header.fork = fork # Deleted during `apply` because `exclude=True`
- # Process block access list - apply transformer if present for invalid tests
+ # Process block access list - apply transformer if present for invalid
+ # tests
t8n_bal = transition_tool_output.result.block_access_list
bal = t8n_bal
if block.expected_block_access_list is not None and t8n_bal is not None:
@@ -668,15 +656,14 @@ def generate_block_data(
and block.expected_block_access_list._modifier is not None
)
):
- # Only verify block level exception if:
- # - No transaction exception was raised, because these are not
- # reported as block exceptions.
- # - No RLP modifier was specified, because the modifier is what
- # normally produces the block exception.
- # - No requests were specified, because modified requests are also
- # what normally produces the block exception.
- # - No BAL modifier was specified, because modified BAL also
- # produces block exceptions.
+ # Only verify block level exception if: - No transaction
+ # exception was raised, because these are not reported as block
+ # exceptions. - No RLP modifier was specified, because the
+ # modifier is what normally produces the block exception. - No
+ # requests were specified, because modified requests are also
+ # what normally produces the block exception. - No BAL modifier
+ # was specified, because modified BAL also produces block
+ # exceptions.
built_block.verify_block_exception(
transition_tool_exceptions_reliable=t8n.exception_mapper.reliable,
)
@@ -739,7 +726,8 @@ def make_fixture(
)
fixture_blocks.append(built_block.get_fixture_block())
- # BAL verification already done in to_fixture_bal() if expected_block_access_list set
+ # BAL verification already done in to_fixture_bal() if
+ # expected_block_access_list set
if block.exception is None:
# Update env, alloc and last block hash for the next block.
@@ -838,8 +826,8 @@ def make_hive_fixture(
# Add format-specific fields
if fixture_format == BlockchainEngineXFixture:
- # For Engine X format, exclude pre (will be provided via shared state)
- # and prepare for state diff optimization
+ # For Engine X format, exclude pre (will be provided via shared
+ # state) and prepare for state diff optimization
fixture_data.update(
{
"post_state": alloc if not self.exclude_full_post_state_in_output else None,
@@ -852,9 +840,9 @@ def make_hive_fixture(
assert genesis.header.block_hash != head_hash, (
"Invalid payload tests negative test via sync is not supported yet."
)
- # Most clients require the header to start the sync process, so we create an empty
- # block on top of the last block of the test to send it as new payload and trigger the
- # sync process.
+ # Most clients require the header to start the sync process, so we
+ # create an empty block on top of the last block of the test to
+ # send it as new payload and trigger the sync process.
sync_built_block = self.generate_block_data(
t8n=t8n,
fork=fork,
diff --git a/src/ethereum_test_specs/eof.py b/src/ethereum_test_specs/eof.py
index 35f58984897..d64a98cde1d 100644
--- a/src/ethereum_test_specs/eof.py
+++ b/src/ethereum_test_specs/eof.py
@@ -53,7 +53,9 @@ def __init__(self, message):
@staticmethod
def format_code(code: Bytes, max_length=60) -> str:
- """Avoid printing long bytecode strings in the terminal upon test failure."""
+ """
+ Avoid printing long bytecode strings in the terminal upon test failure.
+ """
if len(code) > max_length:
half_length = max_length // 2 - 5 # Floor; adjust for ellipsis
return f"{code[:half_length].hex()}...{code[-half_length:].hex()}"
@@ -84,7 +86,10 @@ class ExpectedEOFExceptionError(EOFBaseExceptionError):
"""
def __init__(self, *, code: Bytes, expected: str):
- """Initialize the exception with the code and the expected exception message."""
+ """
+ Initialize the exception with the code and the expected exception
+ message.
+ """
message = (
"Expected EOF code to be invalid, but no exception was raised:\n"
f" Code: {self.format_code(code)}\n"
@@ -95,10 +100,15 @@ def __init__(self, *, code: Bytes, expected: str):
class EOFExceptionMismatchError(EOFBaseExceptionError):
- """Exception used when the actual EOF exception differs from the expected one."""
+ """
+ Exception used when the actual EOF exception differs from the expected one.
+ """
def __init__(self, code: Bytes, expected: str, got: str):
- """Initialize the exception with the code, the expected/actual exception message."""
+ """
+ Initialize the exception with the code, the expected/actual exception
+ message.
+ """
message = (
"EOF code raised a different exception than expected:\n"
f" Code: {self.format_code(code)}\n"
@@ -166,88 +176,92 @@ class EOFTest(BaseTest):
"""
Filler type that generates a test for EOF container validation.
- A state test is also automatically generated where the container is wrapped in a
- contract-creating transaction to test deployment/validation on the instantiated blockchain.
+ A state test is also automatically generated where the container is wrapped
+ in a contract-creating transaction to test deployment/validation on the
+ instantiated blockchain.
"""
container: Container
"""
EOF container that will be tested for validity.
- The only supported type at the moment is `ethereum_test_types.eof.v1.Container`.
+ The only supported type at the moment is
+ `ethereum_test_types.eof.v1.Container`.
- If an invalid container needs to be tested, and it cannot be generated using the
- Container class features, the `raw_bytes` field can be used to provide the raw
- container bytes.
+ If an invalid container needs to be tested, and it cannot be generated
+ using the Container class features, the `raw_bytes` field can be used to
+ provide the raw container bytes.
"""
expect_exception: EOFExceptionInstanceOrList | None = None
"""
- Expected exception that the container should raise when parsed by an EOF parser.
+ Expected exception that the container should raise when parsed by an EOF
+ parser.
- Can be a single exception or a list of exceptions that the container is expected to raise,
- in which case the test will pass if any of the exceptions are raised.
+ Can be a single exception or a list of exceptions that the container is
+ expected to raise, in which case the test will pass if any of the
+ exceptions are raised.
- The list of supported exceptions can be found in the `ethereum_test_exceptions.EOFException`
- class.
+ The list of supported exceptions can be found in the
+ `ethereum_test_exceptions.EOFException` class.
"""
container_kind: ContainerKind = ContainerKind.RUNTIME
"""
Container kind type that the container should be treated as.
- The container kind can be one of the following:
- - `ContainerKind.INITCODE`: The container is an initcode container.
- - `ContainerKind.RUNTIME`: The container is a runtime container.
+ The container kind can be one of the following: - `ContainerKind.INITCODE`:
+ The container is an initcode container. - `ContainerKind.RUNTIME`: The
+ container is a runtime container.
The default value is `ContainerKind.RUNTIME`.
"""
deployed_container: Container | None = None
"""
- To be used when the container is an initcode container and the expected deployed container is
- known.
-
- The value is only used when a State Test is generated from this EOF test to set the expected
- deployed container that should be found in the post state.
-
- If this field is not set, and the container is valid:
- - If the container kind is `ContainerKind.RUNTIME`, the deployed container is assumed to be
- the container itself, and an initcode container that wraps the container is generated
- automatically.
- - If the container kind is `ContainerKind.INITCODE`, `model_post_init` will attempt to infer
- the deployed container from the sections of the init-container, and the first
- container-type section will be used. An error will be raised if the deployed container
- cannot be inferred.
-
- If the value is set to `None`, it is assumed that the container is invalid and the test will
- expect that no contract is created.
-
- It is considered an error if:
- - The `deployed_container` field is set and the `container_kind` field is not set to
- `ContainerKind.INITCODE`.
- - The `deployed_container` field is set and the `expect_exception` is not `None`.
-
- The deployed container is **not** executed at any point during the EOF validation test nor
- the generated State Test. For container runtime testing use the `EOFStateTest` class.
+ To be used when the container is an initcode container and the expected
+ deployed container is known.
+
+ The value is only used when a State Test is generated from this EOF test to
+ set the expected deployed container that should be found in the post state.
+
+ If this field is not set, and the container is valid: - If the container
+ kind is `ContainerKind.RUNTIME`, the deployed container is assumed to be
+ the container itself, and an initcode container that wraps the container is
+ generated automatically. - If the container kind is
+ `ContainerKind.INITCODE`, `model_post_init` will attempt to infer the
+ deployed container from the sections of the init-container, and the first
+ container-type section will be used. An error will be raised if the
+ deployed container cannot be inferred.
+
+ If the value is set to `None`, it is assumed that the container is invalid
+ and the test will expect that no contract is created.
+
+ It is considered an error if: - The `deployed_container` field is set and
+ the `container_kind` field is not set to `ContainerKind.INITCODE`. - The
+ `deployed_container` field is set and the `expect_exception` is not `None`.
+
+ The deployed container is **not** executed at any point during the EOF
+ validation test nor the generated State Test. For container runtime testing
+ use the `EOFStateTest` class.
"""
pre: Alloc | None = None
"""
Pre alloc object that is used during State Test generation.
- This field is automatically set by the test filler when generating a State Test from this EOF
- test and should otherwise be left unset.
+ This field is automatically set by the test filler when generating a State
+ Test from this EOF test and should otherwise be left unset.
"""
post: Alloc | None = None
"""
Post alloc object that is used during State Test generation.
- This field is automatically set by the test filler when generating a State Test from this EOF
- test and is normally not set by the user.
+ This field is automatically set by the test filler when generating a State
+ Test from this EOF test and is normally not set by the user.
"""
sender: EOA | None = None
"""
Sender EOA object that is used during State Test generation.
- This field is automatically set by the `model_post_init` method and should otherwise be left
- unset.
+ This field is automatically set by the `model_post_init` method and should
+ otherwise be left unset.
"""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
@@ -281,7 +295,10 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the appropriate marker is
+ used.
+ """
if "eof_test_only" in [m.name for m in markers]:
return fixture_format != EOFFixture
return False
@@ -369,7 +386,9 @@ def make_eof_test_fixture(
return fixture
def verify_result(self, result: CompletedProcess, expected_result: Result, code: Bytes):
- """Check that the reported exception string matches the expected error."""
+ """
+ Check that the reported exception string matches the expected error.
+ """
evmone_exception_mapper = EvmoneExceptionMapper()
actual_exception_str = result.stdout.strip()
actual_exception: EOFExceptionWithMessage | UndefinedException | None = None
@@ -409,12 +428,13 @@ def generate_eof_contract_create_transaction(self) -> Transaction:
if self.container_kind == ContainerKind.INITCODE:
initcode = self.container
if "deployed_container" in self.model_fields_set:
- # In the case of an initcontainer where we know the deployed container,
- # we can use the initcontainer as-is.
+ # In the case of an initcontainer where we know the deployed
+ # container, we can use the initcontainer as-is.
deployed_container = self.deployed_container
elif self.expect_exception is None:
- # We have a valid init-container, but we don't know the deployed container.
- # Try to infer the deployed container from the sections of the init-container.
+ # We have a valid init-container, but we don't know the
+ # deployed container. Try to infer the deployed container from
+ # the sections of the init-container.
assert self.container.raw_bytes is None, (
"deployed_container must be set for initcode containers with raw_bytes."
)
@@ -505,34 +525,28 @@ def execute(
class EOFStateTest(EOFTest, Transaction):
"""
- Filler type that generates an EOF test for container validation, and also tests the container
- during runtime using a state test (and blockchain test).
+ Filler type that generates an EOF test for container validation, and also
+ tests the container during runtime using a state test (and blockchain
+ test).
- In the state or blockchain test, the container is first deployed to the pre-allocation and
- then a transaction is sent to the deployed container.
+ In the state or blockchain test, the container is first deployed to the
+ pre-allocation and then a transaction is sent to the deployed container.
- Container deployment/validation is **not** tested like in the `EOFTest` unless the container
- under test is an initcode container.
+ Container deployment/validation is **not** tested like in the `EOFTest`
+ unless the container under test is an initcode container.
- All fields from `ethereum_test_types.Transaction` are available for use in the test.
+ All fields from `ethereum_test_types.Transaction` are available for use in
+ the test.
"""
gas_limit: HexNumber = Field(HexNumber(10_000_000), serialization_alias="gas")
- """
- Gas limit for the transaction that deploys the container.
- """
+ """Gas limit for the transaction that deploys the container."""
tx_sender_funding_amount: int = 1_000_000_000_000_000_000_000
- """
- Amount of funds to send to the sender EOA before the transaction.
- """
+ """Amount of funds to send to the sender EOA before the transaction."""
env: Environment = Field(default_factory=Environment)
- """
- Environment object that is used during State Test generation.
- """
+ """Environment object that is used during State Test generation."""
container_post: Account = Field(default_factory=Account)
- """
- Account object used to verify the container post state.
- """
+ """Account object used to verify the container post state."""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
EOFFixture
@@ -580,7 +594,8 @@ def model_post_init(self, __context):
# Run transaction model validation
Transaction.model_post_init(self, __context)
- self.post[compute_eofcreate_address(self.to, 0)] = None # Expect failure.
+ self.post[compute_eofcreate_address(self.to, 0)] = None # Expect
+ # failure.
elif self.expect_exception is not None and self.container_kind == ContainerKind.INITCODE:
# Invalid EOF initcode
self.to = self.pre.deploy_contract(
@@ -591,7 +606,8 @@ def model_post_init(self, __context):
# Run transaction model validation
Transaction.model_post_init(self, __context)
- self.post[compute_eofcreate_address(self.to, 0)] = None # Expect failure.
+ self.post[compute_eofcreate_address(self.to, 0)] = None # Expect
+ # failure.
elif self.container_kind == ContainerKind.INITCODE:
self.to = self.pre.deploy_contract(
Op.TXCREATE(tx_initcode_hash=self.container.hash) + Op.STOP
@@ -634,8 +650,8 @@ def generate(
"""Generate the BlockchainTest fixture."""
if fixture_format == EOFFixture:
if Bytes(self.container) in existing_tests:
- # Gracefully skip duplicate tests because one EOFStateTest can generate multiple
- # state fixtures with the same data.
+ # Gracefully skip duplicate tests because one EOFStateTest can
+ # generate multiple state fixtures with the same data.
pytest.skip(f"Duplicate EOF container on EOFStateTest: {self.node_id()}")
return self.make_eof_test_fixture(fork=fork)
elif fixture_format in StateTest.supported_fixture_formats:
diff --git a/src/ethereum_test_specs/helpers.py b/src/ethereum_test_specs/helpers.py
index 1f877f21d30..27b0979310d 100644
--- a/src/ethereum_test_specs/helpers.py
+++ b/src/ethereum_test_specs/helpers.py
@@ -23,7 +23,9 @@ class ExecutionContext(StrEnum):
class UnexpectedExecutionSuccessError(Exception):
- """Exception used when the transaction expected to fail succeeded instead."""
+ """
+ Exception used when the transaction expected to fail succeeded instead.
+ """
def __init__(self, execution_context: ExecutionContext, **kwargs):
"""Initialize the unexpected success exception."""
@@ -35,7 +37,9 @@ def __init__(self, execution_context: ExecutionContext, **kwargs):
class UnexpectedExecutionFailError(Exception):
- """Exception used when a transaction/block expected to succeed failed instead."""
+ """
+ Exception used when a transaction/block expected to succeed failed instead.
+ """
def __init__(
self,
@@ -54,7 +58,10 @@ def __init__(
class UndefinedExecutionExceptionError(Exception):
- """Exception used when a client's exception message isn't present in its `ExceptionMapper`."""
+ """
+ Exception used when a client's exception message isn't present in its
+ `ExceptionMapper`.
+ """
def __init__(
self,
@@ -100,7 +107,10 @@ def __init__(
class TransactionReceiptMismatchError(Exception):
- """Exception used when the actual transaction receipt differs from the expected one."""
+ """
+ Exception used when the actual transaction receipt differs from the
+ expected one.
+ """
def __init__(
self,
@@ -259,9 +269,9 @@ def verify_transactions(
transition_tool_exceptions_reliable: bool,
) -> List[int]:
"""
- Verify accepted and rejected (if any) transactions against the expected outcome.
- Raises exception on unexpected rejections, unexpected successful txs, or successful txs with
- unexpected receipt values.
+ Verify accepted and rejected (if any) transactions against the expected
+ outcome. Raises exception on unexpected rejections, unexpected successful
+ txs, or successful txs with unexpected receipt values.
"""
rejected_txs: Dict[int, ExceptionWithMessage | UndefinedException] = {
rejected_tx.index: rejected_tx.error for rejected_tx in result.rejected_transactions
diff --git a/src/ethereum_test_specs/state.py b/src/ethereum_test_specs/state.py
index b368b41298c..24b0cb08169 100644
--- a/src/ethereum_test_specs/state.py
+++ b/src/ethereum_test_specs/state.py
@@ -46,7 +46,9 @@
class StateTest(BaseTest):
- """Filler type that tests transactions over the period of a single block."""
+ """
+ Filler type that tests transactions over the period of a single block.
+ """
env: Environment = Field(default_factory=Environment)
pre: Alloc
@@ -70,7 +72,8 @@ class StateTest(BaseTest):
f"A {fixture_format.format_name} generated from a state_test",
)
for fixture_format in BlockchainTest.supported_fixture_formats
- # Exclude sync fixtures from state tests - they don't make sense for state tests
+ # Exclude sync fixtures from state tests - they don't make sense for
+ # state tests
if not (
(hasattr(fixture_format, "__name__") and "Sync" in fixture_format.__name__)
or (hasattr(fixture_format, "format") and "Sync" in fixture_format.format.__name__)
@@ -173,26 +176,33 @@ def discard_fixture_format_by_marks(
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
- """Discard a fixture format from filling if the appropriate marker is used."""
+ """
+ Discard a fixture format from filling if the appropriate marker is
+ used.
+ """
if "state_test_only" in [m.name for m in markers]:
return fixture_format != StateFixture
return False
def _generate_blockchain_genesis_environment(self, *, fork: Fork) -> Environment:
- """Generate the genesis environment for the BlockchainTest formatted test."""
+ """
+ Generate the genesis environment for the BlockchainTest formatted test.
+ """
assert self.env.number >= 1, (
"genesis block number cannot be negative, set state test env.number to at least 1"
)
assert self.env.timestamp >= 1, (
"genesis timestamp cannot be negative, set state test env.timestamp to at least 1"
)
- # There's only a handful of values that we need to set in the genesis for the
- # environment values at block 1 to make sense:
+ # There's only a handful of values that we need to set in the genesis
+ # for the environment values at block 1 to make sense:
# - Number: Needs to be N minus 1
- # - Timestamp: Needs to be zero, because the subsequent block can come at any time.
+ # - Timestamp: Needs to be zero, because the subsequent
+ # block can come at any time.
# - Gas Limit: Changes from parent to child, needs to be set in genesis
# - Base Fee Per Gas: Block's base fee depends on the parent's value
- # - Excess Blob Gas: Block's excess blob gas value depends on the parent's value
+ # - Excess Blob Gas: Block's excess blob gas value depends on
+ # the parent's value
kwargs: Dict[str, Any] = {
"number": self.env.number - 1,
"timestamp": 0,
@@ -208,11 +218,13 @@ def _generate_blockchain_genesis_environment(self, *, fork: Fork) -> Environment
)
if self.env.excess_blob_gas:
- # The excess blob gas environment value means the value of the context (block header)
- # where the transaction is executed. In a blockchain test, we need to indirectly
- # set the excess blob gas by setting the excess blob gas of the genesis block
- # to the expected value plus the TARGET_BLOB_GAS_PER_BLOCK, which is the value
- # that will be subtracted from the excess blob gas when the first block is mined.
+ # The excess blob gas environment value means the value of the
+ # context (block header) where the transaction is executed. In a
+ # blockchain test, we need to indirectly set the excess blob gas by
+ # setting the excess blob gas of the genesis block to the expected
+ # value plus the TARGET_BLOB_GAS_PER_BLOCK, which is the value that
+ # will be subtracted from the excess blob gas when the first block
+ # is mined.
kwargs["excess_blob_gas"] = self.env.excess_blob_gas + (
fork.target_blobs_per_block() * fork.blob_gas_per_blob()
)
@@ -220,7 +232,10 @@ def _generate_blockchain_genesis_environment(self, *, fork: Fork) -> Environment
return Environment(**kwargs)
def _generate_blockchain_blocks(self, *, fork: Fork) -> List[Block]:
- """Generate the single block that represents this state test in a BlockchainTest format."""
+ """
+ Generate the single block that represents this state test in a
+ BlockchainTest format.
+ """
kwargs = {
"number": self.env.number,
"timestamp": self.env.timestamp,
@@ -261,7 +276,8 @@ def make_state_test_fixture(
) -> StateFixture:
"""Create a fixture from the state test definition."""
# We can't generate a state test fixture that names a transition fork,
- # so we get the fork at the block number and timestamp of the state test
+ # so we get the fork at the block number and timestamp of the state
+ # test
fork = fork.fork_at(self.env.number, self.env.timestamp)
env = self.env.set_fork_requirements(fork)
@@ -315,8 +331,9 @@ def make_state_test_fixture(
assert base_tool_output.result.traces is not None, "Traces not found."
- # First try reducing the gas limit only by one, if the validation fails, it means
- # that the traces change even with the slightest modification to the gas.
+ # First try reducing the gas limit only by one, if the validation
+ # fails, it means that the traces change even with the slightest
+ # modification to the gas.
if self.verify_modified_gas_limit(
t8n=t8n,
base_tool_output=base_tool_output,
diff --git a/src/ethereum_test_specs/static_state/account.py b/src/ethereum_test_specs/static_state/account.py
index 81f16cdee1f..978312f9b21 100644
--- a/src/ethereum_test_specs/static_state/account.py
+++ b/src/ethereum_test_specs/static_state/account.py
@@ -169,7 +169,8 @@ def setup(self, pre: Alloc, all_dependencies: Dict[str, Tag]) -> TagDict:
# Step 3: Get topological order
resolution_order = self._topological_sort(dep_graph)
- # Step 4: Pre-deploy all contract tags and pre-fund EOAs to get addresses
+ # Step 4: Pre-deploy all contract tags and pre-fund EOAs to get
+ # addresses
for tag_name in resolution_order:
if tag_name in tag_to_address:
tag = tag_to_address[tag_name]
@@ -181,7 +182,8 @@ def setup(self, pre: Alloc, all_dependencies: Dict[str, Tag]) -> TagDict:
)
resolved_accounts[tag_name] = deployed_address
elif isinstance(tag, SenderTag):
- # Create EOA to get address - use amount=1 to ensure account is created
+ # Create EOA to get address - use amount=1 to ensure
+ # account is created
eoa = pre.fund_eoa(amount=1, label=tag_name)
# Store the EOA object for SenderKeyTag resolution
resolved_accounts[tag_name] = eoa
@@ -247,7 +249,8 @@ def setup(self, pre: Alloc, all_dependencies: Dict[str, Tag]) -> TagDict:
if all_dependencies[extra_dependency].type != "eoa":
raise ValueError(f"Contract dependency {extra_dependency} not found in pre")
- # Create new EOA - this will have a dynamically generated key and address
+ # Create new EOA - this will have a dynamically generated key
+ # and address
eoa = pre.fund_eoa(amount=0, label=extra_dependency)
resolved_accounts[extra_dependency] = eoa
diff --git a/src/ethereum_test_specs/static_state/common/common.py b/src/ethereum_test_specs/static_state/common/common.py
index 340f08971d8..d10e6f254ed 100644
--- a/src/ethereum_test_specs/static_state/common/common.py
+++ b/src/ethereum_test_specs/static_state/common/common.py
@@ -69,7 +69,8 @@ def validate_from_string(cls, code: Any) -> Any:
"""Validate from string, separating label from code source."""
if isinstance(code, str):
label_marker = ":label"
- # Only look for label at the beginning of the string (possibly after whitespace)
+ # Only look for label at the beginning of the string (possibly
+ # after whitespace)
stripped_code = code.lstrip()
# Parse :label into code options
@@ -123,7 +124,8 @@ def replace_tags(raw_code, keep_prefix: bool) -> str:
substitution_address = f"{tag.resolve(tags)}"
if not keep_prefix and substitution_address.startswith("0x"):
substitution_address = substitution_address[2:]
- # Use the original string if available, otherwise construct a pattern
+ # Use the original string if available, otherwise construct a
+ # pattern
if hasattr(tag, "original_string") and tag.original_string:
raw_code = raw_code.replace(tag.original_string, substitution_address)
else:
@@ -192,9 +194,11 @@ def replace_tags(raw_code, keep_prefix: bool) -> str:
[parameter_str],
[
[
- int(t.lower(), 0) & ((1 << 256) - 1) # treat big ints as 256bits
+ # treat big ints as 256bits
+ int(t.lower(), 0) & ((1 << 256) - 1)
if parameter_types[t_index] == "uint"
- else int(t.lower(), 0) > 0 # treat positive values as True
+ # treat positive values as True
+ else int(t.lower(), 0) > 0
if parameter_types[t_index] == "bool"
else False and ValueError("unhandled parameter_types")
for t_index, t in enumerate(tokens[1:])
@@ -217,15 +221,18 @@ def replace_tags(raw_code, keep_prefix: bool) -> str:
# - using lllc
result = subprocess.run(["lllc", tmp_path], capture_output=True, text=True)
- # - using docker:
- # If the running machine does not have lllc installed, we can use docker to
- # run lllc, but we need to start a container first, and the process is generally
- # slower.
+ # - using docker: If the running machine does not have lllc
+ # installed, we can use docker to run lllc, but we need to
+ # start a container first, and the process is generally slower.
+ #
# from .docker import get_lllc_container_id
- # result = subprocess.run(
- # ["docker", "exec", get_lllc_container_id(), "lllc", tmp_path[5:]],
+ # result = subprocess.run( ["docker",
+ # "exec",
+ # get_lllc_container_id(),
+ # "lllc",
+ # tmp_path[5:]],
# capture_output=True,
- # text=True,
+ # text=True
# )
compiled_code = "".join(result.stdout.splitlines())
@@ -245,15 +252,16 @@ def tag_dependencies(self) -> Mapping[str, Tag]:
class AddressTag:
"""
Represents an address tag like:
- - .
- - .
- - .
+ - .
+ - .
+ - .
"""
def __init__(self, tag_type: str, tag_name: str, original_string: str):
"""Initialize address tag."""
self.tag_type = tag_type # "eoa", "contract", or "coinbase"
- self.tag_name = tag_name # e.g., "sender", "target", or address for 2-part tags
+ # e.g., "sender", "target", or address for 2-part tags
+ self.tag_name = tag_name
self.original_string = original_string
def __str__(self) -> str:
@@ -317,8 +325,8 @@ def parse_address_or_tag(value: Any) -> Union[Address, AddressTag]:
def parse_address_or_tag_for_access_list(value: Any) -> Union[Address, str]:
"""
- Parse either a regular address or an address tag, keeping tags as strings for later
- resolution.
+ Parse either a regular address or an address tag, keeping tags as strings
+ for later resolution.
"""
if not isinstance(value, str):
# Non-string values should be converted to Address normally
@@ -344,7 +352,9 @@ def parse_address_or_tag_for_access_list(value: Any) -> Union[Address, str]:
class AccessListInFiller(CamelModel, TagDependentData):
- """Access List for transactions in fillers that can contain address tags."""
+ """
+ Access List for transactions in fillers that can contain address tags.
+ """
address: AddressOrTagInFiller
storage_keys: List[Hash] = Field(default_factory=list)
diff --git a/src/ethereum_test_specs/static_state/common/compile_yul.py b/src/ethereum_test_specs/static_state/common/compile_yul.py
index 6e548d46b26..632d0cd31ad 100644
--- a/src/ethereum_test_specs/static_state/common/compile_yul.py
+++ b/src/ethereum_test_specs/static_state/common/compile_yul.py
@@ -51,19 +51,21 @@ def safe_solc_command(
def compile_yul(source_file: str, evm_version: str | None = None, optimize: str | None = None):
"""
- Compiles a Yul source file using solc and returns the binary representation.
+ Compiles a Yul source file using solc and returns the binary
+ representation.
- Parameters_:
- source_file (str): Path to the Yul source file.
- evm_version (str, optional): The EVM version to use (e.g., 'istanbul'). Defaults to None.
- optimize (any, optional): If provided (non-None), optimization flags are not added.
- If None, additional optimization flags will be included.
+ Arguments:
+ source_file (str): Path to the Yul source file.
+ evm_version(str, optional): The EVM version to use (e.g., 'istanbul').
+ Defaults to None.
+ optimize (any, optional): If provided (non-None), optimization flags
+ are not added. If None, additional
+ optimization flags will be included.
- Returns_:
- str: The binary representation prefixed with "0x".
+ Returns: str: The binary representation prefixed with "0x".
+
+ Raises: Exception: If the solc output contains an error message.
- Raises_:
- Exception: If the solc output contains an error message.
"""
cmd = safe_solc_command(source_file, evm_version, optimize)
@@ -77,7 +79,8 @@ def compile_yul(source_file: str, evm_version: str | None = None, optimize: str
if "Error" in out:
raise Exception(f"Yul compilation error:\n{out}")
- # Search for the "Binary representation:" line and get the following line as the binary
+ # Search for the "Binary representation:" line and get the following line
+ # as the binary
lines = out.splitlines()
binary_line = ""
for i, line in enumerate(lines):
diff --git a/src/ethereum_test_specs/static_state/common/tags.py b/src/ethereum_test_specs/static_state/common/tags.py
index d1c75dc8885..100ae06107c 100644
--- a/src/ethereum_test_specs/static_state/common/tags.py
+++ b/src/ethereum_test_specs/static_state/common/tags.py
@@ -20,7 +20,8 @@ class Tag(BaseModel, Generic[T]):
name: str
type: ClassVar[str] = ""
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<\w+:(\w+)(:[^>]+)?")
- original_string: str | None = None # Store the original tag string for replacement
+ # Store the original tag string for replacement
+ original_string: str | None = None
def __hash__(self) -> int:
"""Hash based on original string for use as dict key."""
@@ -64,12 +65,18 @@ class ContractTag(AddressTag):
type: ClassVar[str] = "contract"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"]+)(?::(0x[a-fA-F0-9]+))?>")
- debug_address: Address | None = None # Optional hard-coded address for debugging
+ # Optional hard-coded address for debugging
+ debug_address: Address | None = None
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
- """Validate the contract tag from string: or ."""
+ """
+ Validate the contract tag from string:
+
+ or
+ .
+ """
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
name_or_addr = m.group(1)
@@ -77,8 +84,9 @@ def validate_from_string(cls, data: Any) -> Any:
# Check if it's a 2-part format with an address
if name_or_addr.startswith("0x") and len(name_or_addr) == 42:
- # For 2-part format, use the full address as the name
- # This ensures all references to the same address get the same tag name
+ # For 2-part format, use the full address as the name This
+ # ensures all references to the same address get the same
+ # tag name
return {
"name": name_or_addr,
"debug_address": Address(name_or_addr),
@@ -146,7 +154,8 @@ class SenderTag(AddressTag):
type: ClassVar[str] = "eoa"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"")
- debug_address: Address | None = None # Optional hard-coded address for debugging
+ # Optional hard-coded address for debugging
+ debug_address: Address | None = None
@model_validator(mode="before")
@classmethod
diff --git a/src/ethereum_test_specs/static_state/expect_section.py b/src/ethereum_test_specs/static_state/expect_section.py
index 226b3311bec..ccd7f7aa45d 100644
--- a/src/ethereum_test_specs/static_state/expect_section.py
+++ b/src/ethereum_test_specs/static_state/expect_section.py
@@ -106,7 +106,10 @@ class AccountInExpectSection(BaseModel, TagDependentData):
@model_validator(mode="wrap")
@classmethod
def validate_should_not_exist(cls, v: Any, handler: ValidatorFunctionWrapHandler):
- """Validate the "shouldnotexist" field, which makes this validator return `None`."""
+ """
+ Validate the "shouldnotexist" field, which makes this validator return
+ `None`.
+ """
if isinstance(v, dict):
if "shouldnotexist" in v:
return None
@@ -251,8 +254,8 @@ class ResultInFiller(EthereumTestRootModel, TagDependentData):
"""
Post section in state test filler.
- A value of `None` for an address means that the account should not be in the state trie
- at the end of the test.
+ A value of `None` for an address means that the account should not be in
+ the state trie at the end of the test.
"""
root: Dict[AddressOrCreateTagInFiller, AccountInExpectSection | None]
diff --git a/src/ethereum_test_specs/static_state/general_transaction.py b/src/ethereum_test_specs/static_state/general_transaction.py
index e3e44be62ea..cafec7778c3 100644
--- a/src/ethereum_test_specs/static_state/general_transaction.py
+++ b/src/ethereum_test_specs/static_state/general_transaction.py
@@ -86,7 +86,9 @@ def __getitem__(self, label_or_index: int | str):
raise KeyError(f"Label/index {label_or_index} not found in data indexes")
def __contains__(self, label_or_index: int | str):
- """Return True if the LabeledDataList contains the given label/index."""
+ """
+ Return True if the LabeledDataList contains the given label/index.
+ """
if isinstance(label_or_index, int):
return label_or_index < len(self.root)
if isinstance(label_or_index, str):
diff --git a/src/ethereum_test_specs/static_state/state_static.py b/src/ethereum_test_specs/static_state/state_static.py
index 7801d404b91..322691b4cf6 100644
--- a/src/ethereum_test_specs/static_state/state_static.py
+++ b/src/ethereum_test_specs/static_state/state_static.py
@@ -69,7 +69,9 @@ def parse_indexes(
indexes: Union[int, str, list[Union[int, str]], list[str], list[int]],
do_hint: bool = False,
) -> List[int] | int:
- """Parse indexes and replace all ranges and labels into tx indexes."""
+ """
+ Parse indexes and replace all ranges and labels into tx indexes.
+ """
result: List[int] | int = []
if do_hint:
@@ -129,8 +131,9 @@ def fill_function(self) -> Callable:
for expect in self.expect:
if expect.has_index(d.index, g, v) and expect.expect_exception is not None:
exception_test = True
- # TODO: This does not take into account exceptions that only happen on
- # specific forks, but this requires a covariant parametrize
+ # TODO: This does not take into account exceptions that
+ # only happen on specific forks, but this requires a
+ # covariant parametrize
marks = [pytest.mark.exception_test] if exception_test else []
id_label = ""
if len(self.transaction.data) > 1 or d.label is not None:
diff --git a/src/ethereum_test_specs/tests/test_benchmark.py b/src/ethereum_test_specs/tests/test_benchmark.py
index bd4a699720b..6511e427fc5 100644
--- a/src/ethereum_test_specs/tests/test_benchmark.py
+++ b/src/ethereum_test_specs/tests/test_benchmark.py
@@ -1,4 +1,7 @@
-"""Tests for the BenchmarkTest class and its transaction splitting functionality."""
+"""
+Tests for the BenchmarkTest class and its
+transaction splitting functionality.
+"""
import pytest
@@ -20,7 +23,10 @@
],
)
def test_split_transaction(gas_benchmark_value_millions: int, expected_splits: int):
- """Test that transaction splitting works correctly for Osaka fork gas cap."""
+ """
+ Test that transaction splitting works
+ correctly for Osaka fork gas cap.
+ """
gas_benchmark_value = gas_benchmark_value_millions * 1_000_000
gas_limit_cap = 16_000_000 # Osaka's transaction gas limit cap
@@ -100,6 +106,7 @@ def test_split_transaction_edge_cases(gas_benchmark_value: int, gas_limit_cap: i
# When no cap, gas_limit should be benchmark value
assert split_txs[0].gas_limit == gas_benchmark_value
else:
- # When cap > benchmark, gas_limit should be min of tx.gas_limit and benchmark
+ # When cap > benchmark, gas_limit should be
+ # min of tx.gas_limit and benchmark
assert benchmark_test.tx is not None, "Transaction should not be None"
assert split_txs[0].gas_limit == min(benchmark_test.tx.gas_limit, gas_benchmark_value)
diff --git a/src/ethereum_test_specs/tests/test_expect.py b/src/ethereum_test_specs/tests/test_expect.py
index 9f2b00acafc..0009064fc12 100644
--- a/src/ethereum_test_specs/tests/test_expect.py
+++ b/src/ethereum_test_specs/tests/test_expect.py
@@ -118,7 +118,10 @@ def state_test( # noqa: D103
indirect=["pre", "post"],
)
def test_post_storage_value_mismatch(expected_exception, state_test, default_t8n, fork):
- """Test post state `Account.storage` exceptions during state test fixture generation."""
+ """
+ Test post state `Account.storage` exceptions during state test fixture
+ generation.
+ """
with pytest.raises(Storage.KeyValueMismatchError) as e_info:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
assert e_info.value == expected_exception
@@ -136,8 +139,8 @@ def test_post_storage_value_mismatch(expected_exception, state_test, default_t8n
)
def test_post_nonce_value_mismatch(pre: Alloc, post: Alloc, state_test, default_t8n, fork):
"""
- Test post state `Account.nonce` verification and exceptions during state test
- fixture generation.
+ Test post state `Account.nonce` verification and exceptions during state
+ test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
@@ -167,8 +170,8 @@ def test_post_nonce_value_mismatch(pre: Alloc, post: Alloc, state_test, default_
)
def test_post_code_value_mismatch(pre: Alloc, post: Alloc, state_test, default_t8n, fork):
"""
- Test post state `Account.code` verification and exceptions during state test
- fixture generation.
+ Test post state `Account.code` verification and exceptions during state
+ test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
@@ -198,8 +201,8 @@ def test_post_code_value_mismatch(pre: Alloc, post: Alloc, state_test, default_t
)
def test_post_balance_value_mismatch(pre: Alloc, post: Alloc, state_test, default_t8n, fork):
"""
- Test post state `Account.balance` verification and exceptions during state test
- fixture generation.
+ Test post state `Account.balance` verification and exceptions during state
+ test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
@@ -332,8 +335,8 @@ def test_transaction_expectation(
fixture_format: FixtureFormat,
):
"""
- Test a transaction that has an unexpected error, expected error, or expected a specific
- value in its receipt.
+ Test a transaction that has an unexpected error, expected error, or
+ expected a specific value in its receipt.
"""
if (
exception_type == ExecutionExceptionMismatchError
diff --git a/src/ethereum_test_specs/tests/test_fixtures.py b/src/ethereum_test_specs/tests/test_fixtures.py
index 665b37b0675..45afb3864cd 100644
--- a/src/ethereum_test_specs/tests/test_fixtures.py
+++ b/src/ethereum_test_specs/tests/test_fixtures.py
@@ -42,8 +42,8 @@ def fixture_hash(fork: Fork) -> bytes:
def test_check_helper_fixtures():
"""
Test that the framework's pydantic models serialization and deserialization
- work correctly and that they are compatible with the helper fixtures defined
- in ./fixtures/ by using the check_fixtures.py script.
+ work correctly and that they are compatible with the helper fixtures
+ defined in ./fixtures/ by using the check_fixtures.py script.
"""
runner = CliRunner()
args = [
diff --git a/src/ethereum_test_specs/transaction.py b/src/ethereum_test_specs/transaction.py
index 0dbf927201c..0511891e579 100644
--- a/src/ethereum_test_specs/transaction.py
+++ b/src/ethereum_test_specs/transaction.py
@@ -23,7 +23,9 @@
class TransactionTest(BaseTest):
- """Filler type that tests the transaction over the period of a single block."""
+ """
+ Filler type that tests the transaction over the period of a single block.
+ """
tx: Transaction
pre: Alloc | None = None
diff --git a/src/ethereum_test_tools/tests/test_code.py b/src/ethereum_test_tools/tests/test_code.py
index 40d47821421..37040cfbb28 100644
--- a/src/ethereum_test_tools/tests/test_code.py
+++ b/src/ethereum_test_tools/tests/test_code.py
@@ -185,7 +185,9 @@ def test_initcode(initcode: Initcode, bytecode: bytes): # noqa: D103
],
)
def test_opcodes_if(conditional_bytecode: bytes, expected: bytes):
- """Test that the if opcode macro is transformed into bytecode as expected."""
+ """
+ Test that the if opcode macro is transformed into bytecode as expected.
+ """
assert bytes(conditional_bytecode) == expected
@@ -514,7 +516,9 @@ def test_opcodes_if(conditional_bytecode: bytes, expected: bytes):
def test_switch(
tx_data: bytes, switch_bytecode: bytes, expected_storage: Mapping, default_t8n: TransitionTool
):
- """Test that the switch opcode macro gets executed as using the t8n tool."""
+ """
+ Test that the switch opcode macro gets executed as using the t8n tool.
+ """
code_address = Address(0x1000)
pre = Alloc(
{
diff --git a/src/ethereum_test_tools/tools_code/generators.py b/src/ethereum_test_tools/tools_code/generators.py
index 7af682ab6c4..350eb9b0f3b 100644
--- a/src/ethereum_test_tools/tools_code/generators.py
+++ b/src/ethereum_test_tools/tools_code/generators.py
@@ -18,12 +18,11 @@ class Initcode(Bytecode):
The execution gas cost of the initcode is calculated, and also the
deployment gas costs for the deployed code.
- The initcode can be padded to a certain length if necessary, which
- does not affect the deployed code.
+ The initcode can be padded to a certain length if necessary, which does not
+ affect the deployed code.
- Other costs such as the CREATE2 hashing costs or the initcode_word_cost
- of EIP-3860 are *not* taken into account by any of these calculated
- costs.
+ Other costs such as the CREATE2 hashing costs or the initcode_word_cost of
+ EIP-3860 are *not* taken into account by any of these calculated costs.
"""
deploy_code: SupportsBytes | Bytes
@@ -131,8 +130,8 @@ class CodeGasMeasure(Bytecode):
"""
Helper class used to generate bytecode that measures gas usage of a
bytecode, taking into account and subtracting any extra overhead gas costs
- required to execute.
- By default, the result gas calculation is saved to storage key 0.
+ required to execute. By default, the result gas calculation is saved to
+ storage key 0.
"""
code: Bytecode
@@ -199,8 +198,8 @@ def __new__(
):
"""
Assemble the conditional bytecode by generating the necessary jump and
- jumpdest opcodes surrounding the condition and the two possible execution
- paths.
+ jumpdest opcodes surrounding the condition and the two possible
+ execution paths.
In the future, PC usage should be replaced by using RJUMP and RJUMPI
"""
@@ -213,15 +212,16 @@ def __new__(
# First we append a jumpdest to the start of the true branch
if_true = Op.JUMPDEST + if_true
- # Then we append the unconditional jump to the end of the false branch, used to skip
- # the true branch
+ # Then we append the unconditional jump to the end of the false
+ # branch, used to skip the true branch
if_false += Op.JUMP(Op.ADD(Op.PC, len(if_true) + 3))
- # Then we need to do the conditional jump by skipping the false branch
+ # Then we need to do the conditional jump by skipping the false
+ # branch
condition = Op.JUMPI(Op.ADD(Op.PC, len(if_false) + 3), condition)
- # Finally we append the condition, false and true branches, plus the jumpdest at the
- # very end
+ # Finally we append the condition, false and true branches, plus
+ # the jumpdest at the very end
bytecode = condition + if_false + if_true + Op.JUMPDEST
elif evm_code_type == EVMCodeType.EOF_V1:
@@ -284,8 +284,8 @@ def is_terminating(self) -> bool:
class CalldataCase(Case):
"""
- Small helper class to represent a single case whose condition depends
- on the value of the contract's calldata in a Switch case statement.
+ Small helper class to represent a single case whose condition depends on
+ the value of the contract's calldata in a Switch case statement.
By default the calldata is read from position zero, but this can be
overridden using `position`.
@@ -305,12 +305,14 @@ class Switch(Bytecode):
Helper class used to generate switch-case expressions in EVM bytecode.
Switch-case behavior:
- - If no condition is met in the list of BytecodeCases conditions,
- the `default_action` bytecode is executed.
- - If multiple conditions are met, the action from the first valid
- condition is the only one executed.
- - There is no fall through; it is not possible to execute multiple
- actions.
+ - If no condition is met in the list of BytecodeCases
+ conditions, the `default_action` bytecode is executed.
+
+ - If multiple conditions are met, the action from the first valid
+ condition is the only one executed.
+
+ - There is no fall through; it is not possible to execute
+ multiple actions.
"""
default_action: Bytecode | Op | None
@@ -338,55 +340,57 @@ def __new__(
evm_code_type: EVMCodeType = EVMCodeType.LEGACY,
):
"""
- Assemble the bytecode by looping over the list of cases and adding
- the necessary [R]JUMPI and JUMPDEST opcodes in order to replicate
+ Assemble the bytecode by looping over the list of cases and adding the
+ necessary [R]JUMPI and JUMPDEST opcodes in order to replicate
switch-case behavior.
"""
- # The length required to jump over subsequent actions to the final JUMPDEST at the end
- # of the switch-case block:
- # - add 6 per case for the length of the JUMPDEST and JUMP(ADD(PC, action_jump_length))
- # bytecode
- # - add 3 to the total to account for this action's JUMP; the PC within the call
- # requires a "correction" of 3.
+ # The length required to jump over subsequent actions to the final
+ # JUMPDEST at the end of the switch-case block:
+ # - add 6 per case for the length of the JUMPDEST and
+ # JUMP(ADD(PC, action_jump_length)) bytecode
+ #
+ # - add 3 to the total to account for this action's JUMP;
+ # the PC within the call requires a "correction" of 3.
bytecode = Bytecode()
- # All conditions get prepended to this bytecode; if none are met, we reach the default
+ # All conditions get prepended to this bytecode; if none are met, we
+ # reach the default
if evm_code_type == EVMCodeType.LEGACY:
action_jump_length = sum(len(case.action) + 6 for case in cases) + 3
bytecode = default_action + Op.JUMP(Op.ADD(Op.PC, action_jump_length))
- # The length required to jump over the default action and its JUMP bytecode
+ # The length required to jump over the default action and its JUMP
+ # bytecode
condition_jump_length = len(bytecode) + 3
elif evm_code_type == EVMCodeType.EOF_V1:
action_jump_length = sum(
len(case.action) + (len(Op.RJUMP[0]) if not case.is_terminating else 0)
for case in cases
- # On not terminating cases, we need to add 3 bytes for the RJUMP
+ # On not terminating cases, we need to add 3 bytes for the
+ # RJUMP
)
bytecode = default_action + Op.RJUMP[action_jump_length]
- # The length required to jump over the default action and its JUMP bytecode
+ # The length required to jump over the default action and its JUMP
+ # bytecode
condition_jump_length = len(bytecode)
- # Reversed: first case in the list has priority; it will become the outer-most onion layer.
- # We build up layers around the default_action, after 1 iteration of the loop, a simplified
- # representation of the bytecode is:
+ # Reversed: first case in the list has priority; it will become the
+ # outer-most onion layer. We build up layers around the default_action,
+ # after 1 iteration of the loop, a simplified representation of the
+ # bytecode is:
#
- # JUMPI(case[n-1].condition)
- # + default_action + JUMP()
- # + JUMPDEST + case[n-1].action + JUMP()
+ # JUMPI(case[n-1].condition)
+ # + default_action + JUMP()
+ # + JUMPDEST + case[n-1].action + JUMP()
#
# and after n=len(cases) iterations:
#
- # JUMPI(case[0].condition)
- # + JUMPI(case[1].condition)
- # ...
- # + JUMPI(case[n-1].condition)
- # + default_action + JUMP()
- # + JUMPDEST + case[n-1].action + JUMP()
- # + ...
- # + JUMPDEST + case[1].action + JUMP()
- # + JUMPDEST + case[0].action + JUMP()
- #
+ # JUMPI(case[0].condition)
+ # + JUMPI(case[1].condition)
+ # ...
+ # + JUMPI(case[n-1].condition) + default_action + JUMP() + JUMPDEST +
+ # case[n-1].action + JUMP() + ... + JUMPDEST + case[1].action + JUMP()
+ # + JUMPDEST + case[0].action + JUMP()
for case in reversed(cases):
action = case.action
if evm_code_type == EVMCodeType.LEGACY:
diff --git a/src/ethereum_test_tools/utility/generators.py b/src/ethereum_test_tools/utility/generators.py
index 95c9094e91c..f155cb3ba41 100644
--- a/src/ethereum_test_tools/utility/generators.py
+++ b/src/ethereum_test_tools/utility/generators.py
@@ -43,7 +43,10 @@ def param(self):
class ContractAddressHasBalance(StrEnum):
- """Represents whether the target deployment test has a balance before deployment."""
+ """
+ Represents whether the target deployment test has a balance before
+ deployment.
+ """
ZERO_BALANCE = "zero_balance"
NONZERO_BALANCE = "nonzero_balance"
@@ -51,8 +54,8 @@ class ContractAddressHasBalance(StrEnum):
class SystemContractDeployTestFunction(Protocol):
"""
- Represents a function to be decorated with the `generate_system_contract_deploy_test`
- decorator.
+ Represents a function to be decorated with the
+ `generate_system_contract_deploy_test` decorator.
"""
def __call__(
@@ -64,19 +67,21 @@ def __call__(
test_type: DeploymentTestType,
) -> Generator[Block, None, None]:
"""
- Args:
- fork (Fork): The fork to test.
- pre (Alloc): The pre state of the blockchain.
- post (Alloc): The post state of the blockchain.
- test_type (DeploymentTestType): The type of deployment test currently being filled.
+ Arguments:
+ fork (Fork): The fork to test.
+ pre (Alloc): The pre state of the blockchain.
+ post (Alloc): The post state of the blockchain.
+ test_type(DeploymentTestType): The type of deployment test
+ currently being filled.
Yields:
- Block: To add after the block where the contract was deployed (e.g. can contain extra
- transactions to execute after the system contract has been deployed, and/or a header
- object to verify that the headers are correct).
+ Block: To add after the block where the contract was deployed
+ (e.g. can contain extra transactions to execute after
+ the system contract has been deployed, and/or a header
+ object to verify that the headers are correct).
"""
- ...
+ pass
def generate_system_contract_deploy_test(
@@ -92,27 +97,44 @@ def generate_system_contract_deploy_test(
Generates following test cases:
- | before/after fork | fail on | invalid block |
- | | empty block | |
- --------------------------------------|-------------------|-------------|---------------|
- `deploy_before_fork-nonzero_balance` | before | False | False |
- `deploy_before_fork-zero_balance` | before | True | False |
- `deploy_on_fork_block-nonzero_balance`| on fork block | False | False |
- `deploy_on_fork_block-zero_balance` | on fork block | True | False |
- `deploy_after_fork-nonzero_balance` | after | False | False |
- `deploy_after_fork-zero_balance` | after | True | True |
-
- The `has balance` parametrization does not have an effect on the expectation of the test.
-
- Args:
- fork (Fork): The fork to test.
- tx_json_path (Path): Path to the JSON file with the transaction to deploy the system
- contract.
- Providing a JSON file is useful to copy-paste the transaction from the EIP.
- expected_deploy_address (Address): The expected address of the deployed contract.
- fail_on_empty_code (bool): If True, the test is expected to fail on empty code.
- expected_system_contract_storage (Dict | None): The expected storage of the system
- contract.
+ | before/after fork | fail on | invalid block |
+ empty block | |
+ --------------------|-------------------|--------------|----------------|
+ `deploy_before_fork-| before | False | False |
+ nonzero_balance`
+
+ `deploy_before_fork-| before | True | False |
+ zero_balance`
+
+ `deploy_on_fork_ | on fork block | False | False |
+ block-nonzero_
+ balance`
+
+ `deploy_on_fork_ | on fork block | True | False |
+ block-zero_balance`
+
+ `deploy_after_fork | after | False | False |
+ -nonzero_balance`
+
+ `deploy_after_fork | after | True | True |
+ -zero_balance`
+
+
+ The `has balance` parametrization does not have an effect on the
+ expectation of the test.
+
+ Arguments:
+ fork (Fork): The fork to test.
+ tx_json_path (Path): Path to the JSON file with the transaction to
+ deploy the system contract. Providing a JSON
+ file is useful to copy-paste the transaction
+ from the EIP.
+ expected_deploy_address (Address): The expected address of the deployed
+ contract.
+ fail_on_empty_code (bool): If True, the test is expected to fail
+ on empty code.
+ expected_system_contract_storage (Dict | None): The expected storage of
+ the system contract.
"""
with open(tx_json_path, mode="r") as f:
@@ -213,7 +235,8 @@ def wrapper(
)
balance = 1 if has_balance == ContractAddressHasBalance.NONZERO_BALANCE else 0
pre[expected_deploy_address] = Account(
- code=b"", # Remove the code that is automatically allocated on the fork
+ code=b"", # Remove the code that is automatically allocated on
+ # the fork
nonce=0,
balance=balance,
)
@@ -227,7 +250,8 @@ def wrapper(
fork_pre_allocation = fork.pre_allocation_blockchain()
assert expected_deploy_address_int in fork_pre_allocation
expected_code = fork_pre_allocation[expected_deploy_address_int]["code"]
- # Note: balance check is omitted; it may be modified by the underlying, decorated test
+ # Note: balance check is omitted; it may be modified by the
+ # underlying, decorated test
account_kwargs = {
"code": expected_code,
"nonce": 1,
@@ -240,8 +264,8 @@ def wrapper(
nonce=1,
)
- # Extra blocks (if any) returned by the decorated function to add after the
- # contract is deployed.
+ # Extra blocks (if any) returned by the decorated function to add
+ # after the contract is deployed.
if test_type != DeploymentTestType.DEPLOY_AFTER_FORK or not fail_on_empty_code:
# Only fill more blocks if the deploy block does not fail.
blocks += list(func(fork=fork, pre=pre, post=post, test_type=test_type))
@@ -265,14 +289,15 @@ def generate_system_contract_error_test(
max_gas_limit: int,
):
"""
- Generate a test that verifies the correct behavior when a system contract fails execution.
+ Generate a test that verifies the correct behavior when a system contract
+ fails execution.
Parametrizations required:
- - system_contract (Address): The address of the system contract to deploy.
- - valid_from (Fork): The fork from which the test is valid.
+ - system_contract (Address): The address of the system contract to deploy.
+ - valid_from (Fork): The fork from which the test is valid.
- Args:
- max_gas_limit (int): The maximum gas limit for the system transaction.
+ Arguments:
+ max_gas_limit (int): The maximum gas limit for the system transaction.
"""
@@ -288,25 +313,27 @@ def wrapper(
):
modified_system_contract_code = Bytecode()
- # Depending on the test case, we need to modify the system contract code accordingly.
+ # Depending on the test case, we need to modify the system contract
+ # code accordingly.
if (
test_type == SystemContractTestType.GAS_LIMIT
or test_type == SystemContractTestType.OUT_OF_GAS_ERROR
):
# Run code so that it reaches the gas limit.
gas_costs = fork.gas_costs()
- # The code works by storing N values to storage, and N is calculated based on the
- # gas costs for the given fork.
- # This code will only work once, so if the system contract is re-executed
- # in a subsequent block, it will consume less gas.
+ # The code works by storing N values to storage, and N is
+ # calculated based on the gas costs for the given fork. This
+ # code will only work once, so if the system contract is re-
+ # executed in a subsequent block, it will consume less gas.
gas_used_per_storage = (
gas_costs.G_STORAGE_SET + gas_costs.G_COLD_SLOAD + (gas_costs.G_VERY_LOW * 2)
)
modified_system_contract_code += sum(
Op.SSTORE(i, 1) for i in range(max_gas_limit // gas_used_per_storage)
)
- # If the gas limit is not divisible by the gas used per storage, we need to add
- # some NO-OP (JUMPDEST) to the code that each consume 1 gas.
+ # If the gas limit is not divisible by the gas used per
+ # storage, we need to add some NO-OP (JUMPDEST) to the code
+ # that each consume 1 gas.
assert gas_costs.G_JUMPDEST == 1, (
f"JUMPDEST gas cost should be 1, but got {gas_costs.G_JUMPDEST}. "
"Generator `generate_system_contract_error_test` needs to be updated."
@@ -316,8 +343,9 @@ def wrapper(
)
if test_type == SystemContractTestType.OUT_OF_GAS_ERROR:
- # If the test type is OUT_OF_GAS_ERROR, we need to add a JUMPDEST to the code
- # to ensure that we go over the limit by one gas.
+ # If the test type is OUT_OF_GAS_ERROR, we need to add a
+ # JUMPDEST to the code to ensure that we go over the limit
+ # by one gas.
modified_system_contract_code += Op.JUMPDEST
modified_system_contract_code += Op.STOP
elif test_type == SystemContractTestType.REVERT_ERROR:
@@ -335,7 +363,8 @@ def wrapper(
balance=0,
)
- # Simple test transaction to verify the block failed to modify the state.
+ # Simple test transaction to verify the block failed to modify the
+ # state.
value_receiver = pre.fund_eoa(amount=0)
test_tx = Transaction(
to=value_receiver,
diff --git a/src/ethereum_test_tools/utility/pytest.py b/src/ethereum_test_tools/utility/pytest.py
index 4e01a6cd4a7..ca93f55e09b 100644
--- a/src/ethereum_test_tools/utility/pytest.py
+++ b/src/ethereum_test_tools/utility/pytest.py
@@ -8,8 +8,8 @@
class UnknownParameterInCasesError(Exception):
"""
- Exception raised when a test case contains parameters
- that are not present in the defaults.
+ Exception raised when a test case contains parameters that are not present
+ in the defaults.
"""
def __init__(self) -> None:
@@ -34,66 +34,76 @@ def extend_with_defaults(
The function returns a dictionary that can be directly unpacked and passed
to the `@pytest.mark.parametrize` decorator.
- Args:
- defaults (Dict[str, Any]): A dictionary of default parameter names and
- their values. These values will be added to each case unless the case
- already defines a value for each parameter.
- cases (List[ParameterSet]): A list of `pytest.param` objects representing
- different test cases. Its first argument must be a dictionary defining
- parameter names and values.
- parametrize_kwargs (Any): Additional keyword arguments to be passed to
- `@pytest.mark.parametrize`. These arguments are not modified by this
- function and are passed through unchanged.
+ Arguments:
+ defaults (Dict[str, Any]): A dictionary of default parameter names
+ and their values. These values will be added
+ to each case unless the case already defines
+ a value for each parameter.
+ cases (List[ParameterSet]): A list of `pytest.param` objects
+ representing different test cases.
+ Its first argument must be a dictionary
+ defining parameter names and values.
+ parametrize_kwargs (Any): Additional keyword arguments to be passed to
+ `@pytest.mark.parametrize`. These arguments are
+ not modified by this function and are passed
+ through unchanged.
Returns:
- Dict[str, Any]: A dictionary with the following structure:
- `argnames`: A list of parameter names.
- `argvalues`: A list of test cases with modified parameter values.
- `parametrize_kwargs`: Additional keyword arguments passed through unchanged.
+ Dict[str, Any]: A dictionary with the following structure:
+ `argnames`: A list of parameter names.
+ `argvalues`: A list of test cases with modified parameter values.
+ `parametrize_kwargs`: Additional keyword arguments passed
+ through unchanged.
Example:
- ```python
- @pytest.mark.parametrize(**extend_with_defaults(
- defaults=dict(
- min_value=0, # default minimum value is 0
- max_value=100, # default maximum value is 100
- average=50, # default average value is 50
+ ```python
+ @pytest.mark.parametrize(**extend_with_defaults(
+ defaults=dict(
+ min_value=0, # default minimum value is 0
+ max_value=100, # default maximum value is 100
+ average=50, # default average value is 50
+ ),
+
+ cases=[
+ pytest.param(
+ dict(), # use default
+ values id='default_case',
+ ),
+
+ pytest.param(
+ dict(min_value=10), # override with min_value=10
+ id='min_value_10',
+ ),
+
+ pytest.param(
+ dict(max_value=200), # override with max_value=200
+ id='max_value_200',
+ ),
+
+ pytest.param(
+ dict(min_value=-10, max_value=50), # override both min_value
+ # and max_value
+ id='min_-10_max_50',
+ ),
+
+ pytest.param(
+ # all defaults are overridden
+ dict(min_value=20, max_value=80, average=50),
+ id="min_20_max_80_avg_50",
),
- cases=[
- pytest.param(
- dict(), # use default values
- id='default_case',
- ),
- pytest.param(
- dict(min_value=10), # override with min_value=10
- id='min_value_10',
- ),
- pytest.param(
- dict(max_value=200), # override with max_value=200
- id='max_value_200',
- ),
- pytest.param(
- dict(min_value=-10, max_value=50), # override both min_value
- # and max_value
- id='min_-10_max_50',
- ),
- pytest.param(
- dict(min_value=20, max_value=80, average=50), # all defaults
- # are overridden
- id="min_20_max_80_avg_50",
- ),
- pytest.param(
- dict(min_value=100, max_value=0), # invalid range
- id='invalid_range',
- marks=pytest.mark.xfail(reason='invalid range'),
- )
- ],
- ))
- def test_range(min_value, max_value, average):
- assert min_value <= max_value
- assert min_value <= average <= max_value
- ```
+
+ pytest.param(
+ dict(min_value=100, max_value=0), # invalid range
+ id='invalid_range',
+ marks=pytest.mark.xfail(reason='invalid range'),
+ )
+ ],
+ ))
+ def test_range(min_value, max_value, average):
+ assert min_value <= max_value
+ assert min_value <= average <= max_value
+ ```
The above test will execute with the following sets of parameters:
@@ -103,14 +113,15 @@ def test_range(min_value, max_value, average):
"max_value_200": {"min_value": 0, "max_value": 200, "average": 50}
"min_-10_max_50": {"min_value": -10, "max_value": 50, "average": 50}
"min_20_max_80_avg_50": {"min_value": 20, "max_value": 80, "average": 50}
- "invalid_range": {"min_value": 100, "max_value": 0, "average": 50} # expected to fail
+ # expected to fail
+ "invalid_range": {"min_value": 100, "max_value": 0, "average": 50}
```
Notes:
- - Each case in `cases` must contain exactly one value, which is a dictionary
- of parameter values.
- - The function performs an in-place update of the `cases` list, so the
- original `cases` list is modified.
+ - Each case in `cases` must contain exactly one value, which is a
+ dictionary of parameter values.
+ - The function performs an in-place update of the `cases` list, so
+ the original `cases` list is modified.
"""
for i, case in enumerate(cases):
@@ -120,7 +131,8 @@ def test_range(min_value, max_value, average):
)
if set(case.values[0].keys()) - set(defaults.keys()):
raise UnknownParameterInCasesError()
- # Overwrite values in defaults if the parameter is present in the test case values
+ # Overwrite values in defaults if the parameter is present in the test
+ # case values
merged_params = {**defaults, **case.values[0]} # type: ignore
cases[i] = pytest.param(*merged_params.values(), id=case.id, marks=case.marks)
diff --git a/src/ethereum_test_tools/utility/tests/test_pytest.py b/src/ethereum_test_tools/utility/tests/test_pytest.py
index f8f242c92f2..0db71cd0eac 100644
--- a/src/ethereum_test_tools/utility/tests/test_pytest.py
+++ b/src/ethereum_test_tools/utility/tests/test_pytest.py
@@ -6,7 +6,8 @@
from ethereum_test_tools.utility.pytest import UnknownParameterInCasesError
-# TODO: This is from the docstring in extend_with_defaults; should be tested automatically
+# TODO: This is from the docstring in extend_with_defaults; should be tested
+# automatically
@pytest.mark.parametrize(
**extend_with_defaults(
defaults={
@@ -28,13 +29,13 @@
id="max_value_200",
),
pytest.param(
- {"min_value": -10, "max_value": 50}, # override both min_value
- # and max_value
+ # override both min_value and max_value
+ {"min_value": -10, "max_value": 50},
id="min_-10_max_50",
),
pytest.param(
- {"min_value": 20, "max_value": 80, "average": 50}, # all defaults
- # are overridden
+ # all defaults are overridden
+ {"min_value": 20, "max_value": 80, "average": 50},
id="min_20_max_80_avg_50",
),
pytest.param(
diff --git a/src/ethereum_test_tools/utility/versioning.py b/src/ethereum_test_tools/utility/versioning.py
index 1d0578fd94e..58b91bcd1fa 100644
--- a/src/ethereum_test_tools/utility/versioning.py
+++ b/src/ethereum_test_tools/utility/versioning.py
@@ -9,10 +9,11 @@ def get_current_commit_hash_or_tag(repo_path=".", shorten_hash=False):
"""
Get the latest commit tag or commit hash from the repository.
- If a tag points to the current commit, return the tag name.
- If no tag exists:
- - If shorten_hash is True, return the first 8 characters of the commit hash.
- - Otherwise, return the full commit hash.
+ If a tag points to the current commit, return the tag name. If no tag
+ exists:
+ - If shorten_hash is True, return the first 8 characters of the
+ commit hash.
+ - Otherwise, return the full commit hash.
"""
try:
repo = Repo(repo_path)
diff --git a/src/ethereum_test_types/account_types.py b/src/ethereum_test_types/account_types.py
index 53befb4592e..51a17ebe15d 100644
--- a/src/ethereum_test_types/account_types.py
+++ b/src/ethereum_test_types/account_types.py
@@ -50,8 +50,8 @@ class State:
def set_account(state: State, address: Bytes20, account: Optional[FrontierAccount]) -> None:
"""
- Set the `Account` object at an address. Setting to `None` deletes
- the account (but not its storage, see `destroy_account()`).
+ Set the `Account` object at an address. Setting to `None` deletes the
+ account (but not its storage, see `destroy_account()`).
"""
trie_set(state._main_trie, address, account)
@@ -93,9 +93,11 @@ def get_storage_root(address: Bytes20) -> Bytes32:
class EOA(Address):
"""
- An Externally Owned Account (EOA) is an account controlled by a private key.
+ An Externally Owned Account (EOA) is an account controlled by a private
+ key.
- The EOA is defined by its address and (optionally) by its corresponding private key.
+ The EOA is defined by its address and (optionally) by its corresponding
+ private key.
"""
key: Hash | None
@@ -354,7 +356,10 @@ def fund_eoa(
delegation: Address | Literal["Self"] | None = None,
nonce: NumberConvertible | None = None,
) -> EOA:
- """Add a previously unused EOA to the pre-alloc with the balance specified by `amount`."""
+ """
+ Add a previously unused EOA to the pre-alloc with the balance specified
+ by `amount`.
+ """
raise NotImplementedError("fund_eoa is not implemented in the base class")
def fund_address(self, address: Address, amount: NumberConvertible):
@@ -370,7 +375,7 @@ def empty_account(self) -> Address:
"""
Return a previously unused account guaranteed to be empty.
- This ensures the account has zero balance, zero nonce, no code, and no storage.
- The account is not a precompile or a system contract.
+ This ensures the account has zero balance, zero nonce, no code, and no
+ storage. The account is not a precompile or a system contract.
"""
raise NotImplementedError("empty_account is not implemented in the base class")
diff --git a/src/ethereum_test_types/blob_types.py b/src/ethereum_test_types/blob_types.py
index 3adc860c860..fa2cc59610a 100644
--- a/src/ethereum_test_types/blob_types.py
+++ b/src/ethereum_test_types/blob_types.py
@@ -50,7 +50,8 @@ class Blob(CamelModel):
data: Bytes
commitment: Bytes
proof: List[Bytes] | Bytes # Bytes < Osaka, List[Bytes] >= Osaka
- cells: List[Bytes] | None # None (in json: null) < Osaka, List[Bytes] >= Osaka
+ # None (in json: null) < Osaka, List[Bytes] >= Osaka
+ cells: List[Bytes] | None
versioned_hash: Hash
name: str
@@ -72,13 +73,18 @@ def trusted_setup(cls):
@staticmethod
def get_filename(fork: Fork, seed: int) -> str:
- """Return filename this blob would have as string (with .json extension)."""
+ """
+ Return filename this blob would have as string (with .json extension).
+ """
amount_cell_proofs: int = cast(int, fork.get_blob_constant("AMOUNT_CELL_PROOFS"))
return "blob_" + str(seed) + "_cell_proofs_" + str(amount_cell_proofs) + ".json"
@staticmethod
def get_filepath(fork: Fork, seed: int):
- """Return the Path to the blob that would be created with these parameters."""
+ """
+ Return the Path to the blob that would be created with these
+ parameters.
+ """
# determine amount of cell proofs for this fork (0 or 128)
would_be_filename: str = Blob.get_filename(fork, seed)
@@ -87,7 +93,10 @@ def get_filepath(fork: Fork, seed: int):
@staticmethod
def from_fork(fork: Fork, seed: int = 0, timestamp: int = 0) -> "Blob":
- """Construct Blob instances. Fork logic is encapsulated within nested functions."""
+ """
+ Construct Blob instances. Fork logic is encapsulated within nested
+ functions.
+ """
def generate_blob_data(rng_seed: int = 0) -> Bytes:
"""Calculate blob data deterministically via provided seed."""
@@ -139,12 +148,16 @@ def get_commitment(data: Bytes) -> Bytes:
return commitment
def get_proof(fork: Fork, data: Bytes) -> List[Bytes] | Bytes:
- # determine whether this fork is = osaka by looking at amount of cell_proofs
+ # determine whether this fork is = osaka by looking at
+ # amount of cell_proofs
amount_cell_proofs = fork.get_blob_constant("AMOUNT_CELL_PROOFS")
# cancun, prague
if amount_cell_proofs == 0:
- z = 2 # 2 is one of many possible valid field elements z (https://github.com/ethereum/consensus-specs/blob/ad884507f7a1d5962cd3dfb5f7b3e41aab728c55/tests/core/pyspec/eth2spec/test/utils/kzg_tests.py#L58-L66)
+ z = 2 # 2 is one of many possible valid field elements z
+ # https://github.com/ethereum/consensus-specs/blob/ad884507f
+ # 7a1d5962cd3dfb5f7b3e41aab728c55/tests/core/pyspec/eth2spec/
+ # test/utils/kzg_tests.py#L58-L66)
z_valid_size: bytes = z.to_bytes(
cast(int, fork.get_blob_constant("BYTES_PER_FIELD_ELEMENT")), byteorder="big"
)
@@ -164,7 +177,8 @@ def get_proof(fork: Fork, data: Bytes) -> List[Bytes] | Bytes:
)
def get_cells(fork: Fork, data: Bytes) -> List[Bytes] | None:
- # determine whether this fork is = osaka by looking at amount of cell_proofs
+ # determine whether this fork is = osaka by looking at
+ # amount of cell_proofs
amount_cell_proofs = fork.get_blob_constant("AMOUNT_CELL_PROOFS")
# cancun, prague
@@ -189,8 +203,8 @@ def get_cells(fork: Fork, data: Bytes) -> List[Bytes] | None:
parents=True, exist_ok=True
) # create all necessary dirs on the way
- # handle transition forks
- # (blob related constants are needed and only available for normal forks)
+ # handle transition forks (blob related constants are needed and only
+ # available for normal forks)
fork = fork.fork_at(timestamp=timestamp)
# if this blob already exists then load from file. use lock
@@ -224,7 +238,8 @@ def get_cells(fork: Fork, data: Bytes) -> List[Bytes] | None:
seed=seed,
timestamp=timestamp,
)
- # for most effective caching temporarily persist every blob that is created in cache
+ # for most effective caching temporarily persist every blob that is
+ # created in cache
blob.write_to_file()
return blob
@@ -234,7 +249,8 @@ def from_file(file_name: str) -> "Blob":
"""
Read a .json file and reconstruct object it represents.
- You can load a blob only via its filename (with or without .json extension).
+ You can load a blob only via its filename (with or without .json
+ extension).
"""
# ensure filename was passed
assert file_name.startswith("blob_"), (
@@ -272,11 +288,15 @@ def write_to_file(self):
if output_location.exists():
logger.debug(f"Blob {output_location} already exists. It will be overwritten.")
- with open(output_location, "w", encoding="utf-8") as f: # overwrite existing
+ # overwrite existing
+ with open(output_location, "w", encoding="utf-8") as f:
f.write(json_str)
def verify_cell_kzg_proof_batch(self, cell_indices: list) -> bool:
- """Check whether all cell proofs are valid and returns True only if that is the case."""
+ """
+ Check whether all cell proofs are valid and returns True only if that
+ is the case.
+ """
amount_cell_proofs: int = cast(int, self.fork.get_blob_constant("AMOUNT_CELL_PROOFS"))
assert amount_cell_proofs > 0, (
@@ -303,12 +323,14 @@ def delete_cells_then_recover_them(self, deletion_indices: list[int]):
"""
Simulate the cell recovery process in user-specified scenario.
- Note: Requirement for successful reconstruction is having at least N of the 2N cells.
+ Note: Requirement for successful reconstruction is having at least N of
+ the 2N cells.
- Theoretical Usage: You pass a cell list with to 128 elements to this function
- along with a list of deletion indices. These cells will be deleted and then
- the ckzg recovery mechanism is used to repair the missing cells.
- If no assertion is triggered the reconstruction was successful.
+ Theoretical Usage: You pass a cell list with to 128 elements to this
+ function along with a list of deletion indices. These cells will be
+ deleted and then the ckzg recovery mechanism is used to repair the
+ missing cells. If no assertion is triggered the reconstruction was
+ successful.
"""
amount_cell_proofs: int = cast(int, self.fork.get_blob_constant("AMOUNT_CELL_PROOFS"))
@@ -370,7 +392,8 @@ class ProofCorruptionMode(Enum):
"""
Define what the proof corruption modes do.
- For Osaka and later each Bytes object in the list is manipulated this way.
+ For Osaka and later each Bytes object in the list is manipulated this
+ way.
"""
CORRUPT_FIRST_BYTE = 1 # corrupts a single byte (index 0)
@@ -382,7 +405,9 @@ def corrupt_proof(self, mode: ProofCorruptionMode):
"""Corrupt the proof field, supports different corruption modes."""
def corrupt_byte(b: bytes) -> Bytes:
- """Bit-flip all bits of provided byte using XOR to guarantee change."""
+ """
+ Bit-flip all bits of provided byte using XOR to guarantee change.
+ """
if len(b) != 1:
raise ValueError("Input must be a single byte")
return Bytes(bytes([b[0] ^ 0xFF]))
diff --git a/src/ethereum_test_types/block_access_list/__init__.py b/src/ethereum_test_types/block_access_list/__init__.py
index 6544bc71083..03cad90aae0 100644
--- a/src/ethereum_test_types/block_access_list/__init__.py
+++ b/src/ethereum_test_types/block_access_list/__init__.py
@@ -1,8 +1,9 @@
"""
Block Access List (BAL) models for EIP-7928.
-Following the established pattern in the codebase (AccessList, AuthorizationTuple),
-these are simple data classes that can be composed together.
+Following the established pattern in the codebase (AccessList,
+AuthorizationTuple), these are simple data classes that can be composed
+together.
"""
from .account_absent_values import BalAccountAbsentValues
diff --git a/src/ethereum_test_types/block_access_list/account_absent_values.py b/src/ethereum_test_types/block_access_list/account_absent_values.py
index 7bf3fd2213e..c0fea606a97 100644
--- a/src/ethereum_test_types/block_access_list/account_absent_values.py
+++ b/src/ethereum_test_types/block_access_list/account_absent_values.py
@@ -1,10 +1,11 @@
"""
BalAccountAbsentValues class for BAL testing.
-This module provides a unified class for specifying explicit absent values in Block Access Lists.
-This class uses the same change classes as BalAccountChanges to specify specific values that
-should NOT exist in the BAL. For checking complete absence, use BalAccountExpectation
-with empty lists instead.
+This module provides a unified class for specifying explicit absent values
+in Block Access Lists. This class uses the same change classes as
+BalAccountChanges to specify specific values that should NOT exist in the BAL.
+For checking complete absence, use BalAccountExpectation with empty lists
+instead.
"""
from typing import List
@@ -31,18 +32,19 @@
class BalAccountAbsentValues(CamelModel):
"""
- Represents explicit absent value expectations for a specific account in a block.
+ Represents explicit absent value expectations for a specific account
+ in a block.
- This class specifies specific changes that should NOT exist in the BAL for a
- given account.
+ This class specifies specific changes that should NOT exist in the BAL
+ for a given account.
- IMPORTANT: This class is for checking that specific values are absent, NOT for
- checking that entire categories are empty. For complete absence checks
- (e.g., "no nonce changes at all"), use BalAccountExpectation with empty lists
- instead.
+ IMPORTANT: This class is for checking that specific values are absent,
+ NOT for checking that entire categories are empty. For complete
+ absence checks (e.g., "no nonce changes at all"), use
+ BalAccountExpectation with empty lists instead.
- The validation works by checking that none of the specified explicit changes
- exist in the actual BAL.
+ The validation works by checking that none of the specified
+ explicit changes exist in the actual BAL.
Example:
# Forbid specific nonce change at tx 1 with post_nonce=5, and specific
@@ -156,8 +158,8 @@ def _validate_forbidden_changes(
def validate_against(self, account: BalAccountChange) -> None:
"""
- Validate that the account does not contain the forbidden changes specified in
- this object.
+ Validate that the account does not contain the forbidden changes
+ specified in this object.
Args:
account: The BalAccountChange to validate against
diff --git a/src/ethereum_test_types/block_access_list/expectations.py b/src/ethereum_test_types/block_access_list/expectations.py
index ffa213bd510..8bcdeba5381 100644
--- a/src/ethereum_test_types/block_access_list/expectations.py
+++ b/src/ethereum_test_types/block_access_list/expectations.py
@@ -1,7 +1,8 @@
"""
Block Access List expectation classes for test validation.
-This module contains classes for defining and validating expected BAL values in tests.
+This module contains classes for defining and validating expected
+BAL values in tests.
"""
from typing import Any, Callable, Dict, List, Optional
@@ -27,7 +28,8 @@ class BalAccountExpectation(CamelModel):
"""
Represents expected changes to a specific account in a block.
- Same as BalAccountChange but without the address field, used for expectations.
+ Same as BalAccountChange but without the address field,
+ used for expectations.
"""
nonce_changes: List[BalNonceChange] = Field(
@@ -102,13 +104,16 @@ def modify(
Create a new expectation with a modifier for invalid test cases.
Args:
- modifiers: One or more functions that take and return a BlockAccessList
+ modifiers: One or more functions that take and return
+ a BlockAccessList
Returns:
- A new BlockAccessListExpectation instance with the modifiers applied
+ A new BlockAccessListExpectation instance with
+ the modifiers applied
Example:
- from ethereum_test_types.block_access_list.modifiers import remove_nonces
+ from ethereum_test_types.block_access_list.
+ modifiers import remove_nonces
expectation = BlockAccessListExpectation(
account_expectations={...}
@@ -247,7 +252,8 @@ def _compare_account_expectations(
expected: BalAccountExpectation, actual: BalAccountChange
) -> None:
"""
- Compare expected and actual account changes using subsequence validation.
+ Compare expected and actual account changes using
+ subsequence validation.
Args:
expected: The expected account changes
@@ -305,7 +311,8 @@ def _compare_account_expectations(
expected_slot_changes = expected_slot.slot_changes
if not expected_slot_changes:
- # Empty expected means any slot_changes are acceptable
+ # Empty expected means any
+ # slot_changes are acceptable
pass
else:
# Validate slot_changes as subsequence
diff --git a/src/ethereum_test_types/block_access_list/modifiers.py b/src/ethereum_test_types/block_access_list/modifiers.py
index a9df449d152..d873de9e7dd 100644
--- a/src/ethereum_test_types/block_access_list/modifiers.py
+++ b/src/ethereum_test_types/block_access_list/modifiers.py
@@ -1,9 +1,9 @@
"""
BAL modifier functions for invalid test cases.
-This module provides modifier functions that can be used to modify Block Access Lists
-in various ways for testing invalid block scenarios. They are composable and can be
-combined to create complex modifications.
+This module provides modifier functions that can be used to modify Block Access
+Lists in various ways for testing invalid block scenarios. They are composable
+and can be combined to create complex modifications.
"""
from typing import Any, Callable, List, Optional
@@ -60,7 +60,10 @@ def _modify_field_value(
nested: bool = False,
slot: Optional[int] = None,
) -> Callable[[BlockAccessList], BlockAccessList]:
- """Abstracted helper to modify a field value for a specific account and transaction."""
+ """
+ Abstracted helper to modify a field value for a specific account and
+ transaction.
+ """
found_address = False
def transform(bal: BlockAccessList) -> BlockAccessList:
@@ -154,7 +157,9 @@ def modify_nonce(
def modify_balance(
address: Address, tx_index: int, balance: int
) -> Callable[[BlockAccessList], BlockAccessList]:
- """Set an incorrect balance value for a specific account and transaction."""
+ """
+ Set an incorrect balance value for a specific account and transaction.
+ """
return _modify_field_value(
address, tx_index, "balance_changes", BalBalanceChange, balance, "post_balance"
)
@@ -163,7 +168,10 @@ def modify_balance(
def modify_storage(
address: Address, tx_index: int, slot: int, value: int
) -> Callable[[BlockAccessList], BlockAccessList]:
- """Set an incorrect storage value for a specific account, transaction, and slot."""
+ """
+ Set an incorrect storage value for a specific account, transaction, and
+ slot.
+ """
return _modify_field_value(
address,
tx_index,
@@ -227,7 +235,8 @@ def transform(bal: BlockAccessList) -> BlockAccessList:
balance_indices[tx2] = True
storage_change.tx_index = HexNumber(tx1)
- # Note: storage_reads is just a list of StorageKey, no tx_index to swap
+ # Note: storage_reads is just a list of StorageKey, no tx_index to
+ # swap
# Swap in code changes
if new_account.code_changes:
diff --git a/src/ethereum_test_types/block_access_list/t8n.py b/src/ethereum_test_types/block_access_list/t8n.py
index 808ace06f57..848b8d796af 100644
--- a/src/ethereum_test_types/block_access_list/t8n.py
+++ b/src/ethereum_test_types/block_access_list/t8n.py
@@ -16,8 +16,10 @@ class BlockAccessList(EthereumTestRootModel[List[BalAccountChange]]):
"""
Block Access List for t8n tool communication and fixtures.
- This model represents the BAL exactly as defined in EIP-7928 - it is itself a list
- of account changes (root model), not a container. Used for:
+ This model represents the BAL exactly as defined in EIP-7928
+ - it is itself a list of account changes (root model), not a container.
+
+ Used for:
- Communication with t8n tools
- Fixture generation
- RLP encoding for hash verification
diff --git a/src/ethereum_test_types/block_types.py b/src/ethereum_test_types/block_types.py
index 268296f49a1..8e7977a3b12 100644
--- a/src/ethereum_test_types/block_types.py
+++ b/src/ethereum_test_types/block_types.py
@@ -38,7 +38,10 @@ class EnvironmentDefaults:
class WithdrawalGeneric(CamelModel, Generic[NumberBoundTypeVar]):
- """Withdrawal generic type, used as a parent class for `Withdrawal` and `FixtureWithdrawal`."""
+ """
+ Withdrawal generic type, used as a parent class for `Withdrawal` and
+ `FixtureWithdrawal`.
+ """
index: NumberBoundTypeVar
validator_index: NumberBoundTypeVar
@@ -47,8 +50,8 @@ class WithdrawalGeneric(CamelModel, Generic[NumberBoundTypeVar]):
def to_serializable_list(self) -> List[Any]:
"""
- Return list of the withdrawal's attributes in the order they should
- be serialized.
+ Return list of the withdrawal's attributes in the order they should be
+ serialized.
"""
return [
Uint(self.index),
@@ -98,8 +101,8 @@ class EnvironmentGeneric(CamelModel, Generic[NumberBoundTypeVar]):
class Environment(EnvironmentGeneric[ZeroPaddedHexNumber]):
"""
- Structure used to keep track of the context in which a block
- must be executed.
+ Structure used to keep track of the context in which a block must be
+ executed.
"""
blob_gas_used: ZeroPaddedHexNumber | None = Field(None, alias="currentBlobGasUsed")
diff --git a/src/ethereum_test_types/chain_config_types.py b/src/ethereum_test_types/chain_config_types.py
index a8c7b2e484e..d9ece600f1a 100644
--- a/src/ethereum_test_types/chain_config_types.py
+++ b/src/ethereum_test_types/chain_config_types.py
@@ -9,7 +9,8 @@ class ChainConfigDefaults:
"""
Default values for the chain configuration.
- Can be modified by modules that import this module and want to override the default values.
+ Can be modified by modules that import this module and want to override the
+ default values.
"""
chain_id: int = 1
diff --git a/src/ethereum_test_types/eof/constants.py b/src/ethereum_test_types/eof/constants.py
index fbc6e88a6d1..c047002fa29 100644
--- a/src/ethereum_test_types/eof/constants.py
+++ b/src/ethereum_test_types/eof/constants.py
@@ -1,7 +1,4 @@
-"""
-EVM Object Format generic constants.
-Applicable to all EOF versions.
-"""
+"""EVM Object Format generic constants. Applicable to all EOF versions."""
EOF_MAGIC = b"\xef\x00"
"""
@@ -9,21 +6,15 @@
avoid clashes with three contracts which were deployed on Mainnet.
"""
EOF_HEADER_TERMINATOR = b"\x00"
-"""
-Byte that terminates the header of the EOF format.
-"""
+"""Byte that terminates the header of the EOF format."""
LATEST_EOF_VERSION = 1
-"""
-Latest existing EOF version.
-"""
+"""Latest existing EOF version."""
VERSION_BYTE_LENGTH = 1
-"""
-Length of the version byte.
-"""
+"""Length of the version byte."""
MAX_RUNTIME_STACK_HEIGHT = 1024
"""
-Maximum height of the EVM runtime operand stack.
-Exceeding this value during execution will result in the stack overflow exception.
-This value applies to both legacy EVM and EOF.
+Maximum height of the EVM runtime operand stack. Exceeding this value during
+execution will result in the stack overflow exception. This value applies to
+both legacy EVM and EOF.
"""
diff --git a/src/ethereum_test_types/eof/v1/__init__.py b/src/ethereum_test_types/eof/v1/__init__.py
index 0a7385cad9c..1dddb0f71c2 100644
--- a/src/ethereum_test_types/eof/v1/__init__.py
+++ b/src/ethereum_test_types/eof/v1/__init__.py
@@ -1,4 +1,6 @@
-"""EVM Object Format Version 1 Library to generate bytecode for testing purposes."""
+"""
+EVM Object Format Version 1 Library to generate bytecode for testing purposes.
+"""
from dataclasses import dataclass
from enum import Enum, IntEnum, auto
@@ -56,7 +58,10 @@ class ContainerKind(Enum):
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Call class constructor without info and appends the serialization schema."""
+ """
+ Call class constructor without info and appends the serialization
+ schema.
+ """
return no_info_plain_validator_function(
source_type.from_str,
serialization=to_string_ser_schema(),
@@ -105,19 +110,18 @@ class Section(CopyValidateModel):
data: Bytes = Bytes(b"")
"""
- Data to be contained by this section.
- Can be SupportsBytes, another EOF container or any other abstract data.
+ Data to be contained by this section. Can be SupportsBytes, another EOF
+ container or any other abstract data.
"""
custom_size: int = 0
"""
- Custom size value to be used in the header.
- If unset, the header is built with length of the data.
+ Custom size value to be used in the header. If unset, the header is built
+ with length of the data.
"""
kind: SectionKind | int
"""
- Kind of section that is represented by this object.
- Can be any `int` outside of the values defined by `SectionKind`
- for testing purposes.
+ Kind of section that is represented by this object. Can be any `int`
+ outside of the values defined by `SectionKind` for testing purposes.
"""
force_type_listing: bool = False
"""
@@ -125,22 +129,16 @@ class Section(CopyValidateModel):
container.
"""
code_inputs: int = 0
- """
- Data stack items consumed by this code section (function)
- """
+ """Data stack items consumed by this code section (function)"""
code_outputs: int = NON_RETURNING_SECTION
"""
Data stack items produced by or expected at the end of this code section
(function)
"""
max_stack_increase: int | None = None
- """
- Maximum operand stack height increase above the code section inputs.
- """
+ """Maximum operand stack height increase above the code section inputs."""
max_stack_height: int | None = None
- """
- Maximum height data stack reaches during execution of code section.
- """
+ """Maximum height data stack reaches during execution of code section."""
auto_max_stack_height: bool = False
"""
Whether to automatically compute the best suggestion for the
@@ -152,20 +150,17 @@ class Section(CopyValidateModel):
code_outputs values for this code section.
"""
skip_header_listing: bool = False
- """
- Skip section from listing in the header
- """
+ """Skip section from listing in the header"""
skip_body_listing: bool = False
- """
- Skip section from listing in the body
- """
+ """Skip section from listing in the body"""
skip_types_body_listing: bool = False
"""
Skip section from listing in the types body (input, output, stack) bytes
"""
skip_types_header_listing: bool = False
"""
- Skip section from listing in the types header (not calculating input, output, stack size)
+ Skip section from listing in the types header (not calculating input,
+ output, stack size)
"""
@cached_property
@@ -219,27 +214,27 @@ def type_definition(self) -> bytes:
def with_max_stack_height(self, max_stack_height) -> "Section":
"""
- Create copy of the section with `max_stack_height` set to the
- specified value.
+ Create copy of the section with `max_stack_height` set to the specified
+ value.
"""
return self.copy(max_stack_height=max_stack_height)
def with_auto_max_stack_height(self) -> "Section":
- """Create copy of the section with `auto_max_stack_height` set to True."""
+ """
+ Create copy of the section with `auto_max_stack_height` set to True.
+ """
return self.copy(auto_max_stack_height=True)
def with_auto_code_inputs_outputs(self) -> "Section":
"""
- Create copy of the section with `auto_code_inputs_outputs` set to
- True.
+ Create copy of the section with `auto_code_inputs_outputs` set to True.
"""
return self.copy(auto_code_inputs_outputs=True)
@staticmethod
def list_header(sections: List["Section"]) -> bytes:
"""
- Create single code header for all code sections contained in
- the list.
+ Create single code header for all code sections contained in the list.
"""
# Allow 'types section' to use skip_header_listing flag
if sections[0].skip_header_listing:
@@ -250,7 +245,8 @@ def list_header(sections: List["Section"]) -> bytes:
h = sections[0].kind.to_bytes(HEADER_SECTION_KIND_BYTE_LENGTH, "big")
- # Count only those sections that are not marked to be skipped for header calculation
+ # Count only those sections that are not marked to be skipped for
+ # header calculation
header_registered_sections = 0
for cs in sections:
if not cs.skip_header_listing:
@@ -258,7 +254,8 @@ def list_header(sections: List["Section"]) -> bytes:
h += header_registered_sections.to_bytes(HEADER_SECTION_COUNT_BYTE_LENGTH, "big")
for cs in sections:
- # If section is marked to skip the header calculation, don't make header for it
+ # If section is marked to skip the header calculation, don't make
+ # header for it
if cs.skip_header_listing:
continue
size = cs.custom_size if "custom_size" in cs.model_fields_set else len(cs.data)
@@ -307,13 +304,9 @@ class Container(CopyValidateModel):
"""Class that represents an EOF V1 container."""
name: Optional[str] = None
- """
- Name of the container
- """
+ """Name of the container"""
sections: List[Section] = Field(default_factory=list)
- """
- List of sections in the container
- """
+ """List of sections in the container"""
magic: Bytes = Bytes(EOF_MAGIC)
"""
Custom magic value used to override the mandatory EOF value for testing
@@ -321,59 +314,46 @@ class Container(CopyValidateModel):
"""
version: Bytes = Bytes(VERSION_NUMBER_BYTES)
"""
- Custom version value used to override the mandatory EOF V1 value
- for testing purposes.
+ Custom version value used to override the mandatory EOF V1 value for
+ testing purposes.
"""
header_terminator: Bytes = Bytes(EOF_HEADER_TERMINATOR)
- """
- Bytes used to terminate the header.
- """
+ """Bytes used to terminate the header."""
extra: Bytes = Bytes(b"")
"""
- Extra data to be appended at the end of the container, which will
- not be considered part of any of the sections, for testing purposes.
+ Extra data to be appended at the end of the container, which will not be
+ considered part of any of the sections, for testing purposes.
"""
auto_type_section: AutoSection = AutoSection.AUTO
"""
- Automatically generate a `TYPE` section based on the
- included `CODE` kind sections.
+ Automatically generate a `TYPE` section based on the included `CODE` kind
+ sections.
"""
auto_data_section: bool = True
- """
- Automatically generate a `DATA` section.
- """
+ """Automatically generate a `DATA` section."""
auto_sort_sections: AutoSection = AutoSection.AUTO
"""
- Automatically sort sections for the header and body:
- Headers: type section first, all code sections, container sections, last
- data section(s)
- Body: type section first, all code sections, data section(s), last
- container sections
+ Automatically sort sections for the header and body: Headers: type section
+ first, all code sections, container sections, last data section(s) Body:
+ type section first, all code sections, data section(s), last container
+ sections
"""
skip_join_concurrent_sections_in_header: bool = False
- """
- Skip joining concurrent sections in the header (code and container)
- """
+ """Skip joining concurrent sections in the header (code and container)"""
validity_error: EOFExceptionInstanceOrList | str | None = None
- """
- Optional error expected for the container.
-
- TODO: Remove str
- """
+ """Optional error expected for the container. TODO: Remove str"""
kind: ContainerKind = ContainerKind.RUNTIME
- """
- Kind type of the container.
- """
+ """Kind type of the container."""
raw_bytes: Optional[Bytes] = None
"""
- Optional raw bytes that represent the container.
- Used to have a cohesive type among all test cases, even those that do not
- resemble a valid EOF V1 container.
+ Optional raw bytes that represent the container. Used to have a cohesive
+ type among all test cases, even those that do not resemble a valid EOF V1
+ container.
"""
expected_bytecode: Optional[Bytes] = None
"""
- Optional raw bytes of the expected constructed bytecode.
- This allows confirming that raw EOF and Container() representations are identical.
+ Optional raw bytes of the expected constructed bytecode. This allows
+ confirming that raw EOF and Container() representations are identical.
"""
@cached_property
@@ -419,7 +399,8 @@ def bytecode(self) -> bytes:
# Add headers
if header_sections:
- # Join headers of the same kind in a list of lists, only if they are next to each other
+ # Join headers of the same kind in a list of lists, only if they
+ # are next to each other
concurrent_sections: List[List[Section]] = [[header_sections[0]]]
for s in header_sections[1:]:
if (
@@ -469,7 +450,9 @@ def Init( # noqa: N802
deploy_container: "Container",
initcode_prefix: Optional[Bytecode] = None,
) -> "Container":
- """Create simple init container that deploys the specified container."""
+ """
+ Create simple init container that deploys the specified container.
+ """
if initcode_prefix is None:
initcode_prefix = Bytecode()
return cls(
@@ -498,8 +481,8 @@ def __len__(self) -> int:
def __str__(self) -> str:
"""
- Return name of the container if available, otherwise the bytecode of the container
- as a string.
+ Return name of the container if available, otherwise the bytecode of
+ the container as a string.
"""
if self.name:
return self.name
@@ -514,13 +497,9 @@ class Initcode(Bytecode):
"""
name: str = "EOF V1 Initcode"
- """
- Name used to identify the initcode.
- """
+ """Name used to identify the initcode."""
deploy_container: Container
- """
- Container to be deployed.
- """
+ """Container to be deployed."""
@cached_property
def init_container(self) -> Container:
@@ -539,7 +518,9 @@ def init_container(self) -> Container:
@cached_property
def bytecode(self) -> bytes:
- """Generate an EOF container performs `EOFCREATE` with the specified code."""
+ """
+ Generate an EOF container performs `EOFCREATE` with the specified code.
+ """
initcode = Container(
sections=[
Section.Code(
diff --git a/src/ethereum_test_types/helpers.py b/src/ethereum_test_types/helpers.py
index 802adfced2b..4378dc292b3 100644
--- a/src/ethereum_test_types/helpers.py
+++ b/src/ethereum_test_types/helpers.py
@@ -19,8 +19,8 @@
def ceiling_division(a: int, b: int) -> int:
"""
- Calculate ceil without using floating point.
- Used by many of the EVM's formulas.
+ Calculate ceil without using floating point. Used by many of the EVM's
+ formulas.
"""
return -(a // -b)
@@ -34,8 +34,8 @@ def compute_create_address(
opcode: Op = Op.CREATE,
) -> Address:
"""
- Compute address of the resulting contract created using a transaction
- or the `CREATE` opcode.
+ Compute address of the resulting contract created using a transaction or
+ the `CREATE` opcode.
"""
if opcode == Op.CREATE:
if isinstance(address, EOA):
@@ -68,7 +68,10 @@ def compute_create2_address(
def compute_eofcreate_address(
address: FixedSizeBytesConvertible, salt: FixedSizeBytesConvertible
) -> Address:
- """Compute address of the resulting contract created using the `EOFCREATE` opcode."""
+ """
+ Compute address of the resulting contract created using the `EOFCREATE`
+ opcode.
+ """
hash_bytes = Bytes(b"\xff" + b"\x00" * 12 + Address(address) + Hash(salt)).keccak256()
return Address(hash_bytes[-20:])
@@ -106,13 +109,14 @@ class TestParameterGroup(BaseModel):
def __repr__(self):
"""
- Generate repr string, intended to be used as a test id, based on the class
- name and the values of the non-default optional fields.
+ Generate repr string, intended to be used as a test id, based on the
+ class name and the values of the non-default optional fields.
"""
class_name = self.__class__.__name__
field_strings = [
f"{field}_{value}"
- # Include the field only if it is not optional or not set to its default value
+ # Include the field only if it is not optional or not set to its
+ # default value
for field, value in self.model_dump(exclude_defaults=True, exclude_unset=True).items()
]
diff --git a/src/ethereum_test_types/request_types.py b/src/ethereum_test_types/request_types.py
index e56ed231484..d38fd62b5ee 100644
--- a/src/ethereum_test_types/request_types.py
+++ b/src/ethereum_test_types/request_types.py
@@ -30,26 +30,18 @@ class DepositRequest(RequestBase, CamelModel):
"""Deposit Request type."""
pubkey: BLSPublicKey
- """
- The public key of the beacon chain validator.
- """
+ """The public key of the beacon chain validator."""
withdrawal_credentials: Hash
- """
- The withdrawal credentials of the beacon chain validator.
- """
+ """The withdrawal credentials of the beacon chain validator."""
amount: HexNumber
- """
- The amount in gwei of the deposit.
- """
+ """The amount in gwei of the deposit."""
signature: BLSSignature
"""
- The signature of the deposit using the validator's private key that matches the
- `pubkey`.
+ The signature of the deposit using the validator's private key that matches
+ the `pubkey`.
"""
index: HexNumber
- """
- The index of the deposit.
- """
+ """The index of the deposit."""
type: ClassVar[int] = 0
@@ -69,16 +61,16 @@ class WithdrawalRequest(RequestBase, CamelModel):
source_address: Address = Address(0)
"""
- The address of the execution layer account that made the withdrawal request.
+ The address of the execution layer account that made the withdrawal
+ request.
"""
validator_pubkey: BLSPublicKey
"""
- The current public key of the validator as it currently is in the beacon state.
+ The current public key of the validator as it currently is in the beacon
+ state.
"""
amount: HexNumber
- """
- The amount in gwei to be withdrawn on the beacon chain.
- """
+ """The amount in gwei to be withdrawn on the beacon chain."""
type: ClassVar[int] = 1
@@ -96,15 +88,18 @@ class ConsolidationRequest(RequestBase, CamelModel):
source_address: Address = Address(0)
"""
- The address of the execution layer account that made the consolidation request.
+ The address of the execution layer account that made the consolidation
+ request.
"""
source_pubkey: BLSPublicKey
"""
- The public key of the source validator as it currently is in the beacon state.
+ The public key of the source validator as it currently is in the beacon
+ state.
"""
target_pubkey: BLSPublicKey
"""
- The public key of the target validator as it currently is in the beacon state.
+ The public key of the target validator as it currently is in the beacon
+ state.
"""
type: ClassVar[int] = 2
diff --git a/src/ethereum_test_types/tests/test_blob_types.py b/src/ethereum_test_types/tests/test_blob_types.py
index 4aefc61a912..88cf1501f53 100644
--- a/src/ethereum_test_types/tests/test_blob_types.py
+++ b/src/ethereum_test_types/tests/test_blob_types.py
@@ -24,14 +24,14 @@ def increment_counter(timeout: float = 10):
"""
Increment counter in file, creating if doesn't exist.
- This is needed because we require the unit test 'test_transition_fork_blobs' to run
- at the end without having to include another dependency for ordering tests.
- That test has to run at the end because it assumes that no json blobs not created
- by itself are created while it is running.
-
- The hardcoded counter value in the test above has to be updated if any new blob_related
- unit tests that create json blobs are added in the future.
-
+ This is needed because we require the unit test
+ 'test_transition_fork_blobs' to run at the end without having to include
+ another dependency for ordering tests. That test has to run at the end
+ because it assumes that no json blobs not created by itself are created
+ while it is running.
+
+ The hardcoded counter value in the test above has to be updated if any new
+ blob_related unit tests that create json blobs are added in the future.
"""
file_path = CACHED_BLOBS_DIRECTORY / "blob_unit_test_counter.txt"
lock_file = file_path.with_suffix(".lock")
@@ -62,7 +62,6 @@ def wait_until_counter_reached(target: int, poll_interval: float = 0.1):
try:
current_value = int(file_path.read_text().strip())
if current_value == target:
- # file_path.unlink() # get rid to effectively reset counter to 0
return current_value
elif current_value > target:
pytest.fail(
@@ -86,8 +85,8 @@ def test_blob_creation_and_writing_and_reading(
fork,
): # noqa: F811
"""
- Generates blobs for different forks and ensures writing to file
- and reading from file works as expected.
+ Generates blobs for different forks and ensures writing to file and reading
+ from file works as expected.
"""
timestamp = 100
b = Blob.from_fork(fork=fork, seed=seed, timestamp=timestamp)
@@ -145,15 +144,20 @@ def test_transition_fork_blobs(
fork,
timestamp,
):
- """Generates blobs for transition forks (time 14999 is old fork, time 15000 is new fork)."""
- # line below guarantees that this test runs only after the other blob unit tests are done
+ """
+ Generates blobs for transition forks (time 14999 is old fork, time 15000 is
+ new fork).
+ """
+ # line below guarantees that this test runs only after the other blob unit
+ # tests are done
wait_until_counter_reached(21)
clear_blob_cache(CACHED_BLOBS_DIRECTORY)
print(f"Original fork: {fork}, Timestamp: {timestamp}")
pre_transition_fork = fork.transitions_from()
- post_transition_fork_at_15k = fork.transitions_to() # only reached if timestamp >= 15000
+ # only reached if timestamp >= 15000
+ post_transition_fork_at_15k = fork.transitions_to()
if not pre_transition_fork.supports_blobs() and timestamp < 15000:
print(
@@ -178,6 +182,7 @@ def test_transition_fork_blobs(
f"transitioned to {post_transition_fork_at_15k.name()} but is still at {b.fork.name()}"
)
- # delete counter at last iteration (otherwise re-running all unit tests will fail)
+ # delete counter at last iteration (otherwise re-running all unit tests
+ # will fail)
if timestamp == 15_000 and pre_transition_fork == Prague:
(CACHED_BLOBS_DIRECTORY / "blob_unit_test_counter.txt").unlink()
diff --git a/src/ethereum_test_types/tests/test_block_access_lists.py b/src/ethereum_test_types/tests/test_block_access_lists.py
index e8a36d73213..06650810722 100644
--- a/src/ethereum_test_types/tests/test_block_access_lists.py
+++ b/src/ethereum_test_types/tests/test_block_access_lists.py
@@ -143,7 +143,8 @@ def test_partial_validation():
account_expectations={
alice: BalAccountExpectation(
nonce_changes=[BalNonceChange(tx_index=1, post_nonce=1)],
- # balance_changes and storage_reads not set and won't be validated
+ # balance_changes and storage_reads not set and won't be
+ # validated
),
}
)
@@ -338,8 +339,8 @@ def test_expected_addresses_auto_sorted():
"""
Test that expected addresses are automatically sorted before comparison.
- The BAL *Expectation address order should not matter for the dict.
- We DO, however, validate that the actual BAL (from t8n) is sorted correctly.
+ The BAL *Expectation address order should not matter for the dict. We DO,
+ however, validate that the actual BAL (from t8n) is sorted correctly.
"""
alice = Address(0xA)
bob = Address(0xB)
@@ -815,7 +816,8 @@ def test_absent_values_with_multiple_tx_indices():
alice: BalAccountExpectation(
absent_values=BalAccountAbsentValues(
nonce_changes=[
- # wrongly forbid change at txs 1 and 2 (1 exists, so should fail)
+ # wrongly forbid change at txs 1 and 2
+ # (1 exists, so should fail)
BalNonceChange(tx_index=1, post_nonce=1),
BalNonceChange(tx_index=2, post_nonce=0),
]
@@ -961,13 +963,19 @@ def test_bal_account_absent_values_comprehensive():
],
)
def test_bal_account_absent_values_empty_list_validation_raises(field_name, field_value):
- """Test that empty lists in BalAccountAbsentValues fields raise appropriate errors."""
+ """
+ Test that empty lists in BalAccountAbsentValues fields
+ raise appropriate errors.
+ """
with pytest.raises(ValueError, match="Empty lists are not allowed"):
BalAccountAbsentValues(**{field_name: field_value})
def test_bal_account_absent_values_empty_slot_changes_raises():
- """Test that empty slot_changes in storage_changes raises appropriate error."""
+ """
+ Test that empty slot_changes in storage_changes
+ raises appropriate error.
+ """
with pytest.raises(ValueError, match="Empty lists are not allowed"):
BalAccountAbsentValues(
storage_changes=[
diff --git a/src/ethereum_test_types/tests/test_eof_v1.py b/src/ethereum_test_types/tests/test_eof_v1.py
index 21c1b8b196b..1c454ab870f 100644
--- a/src/ethereum_test_types/tests/test_eof_v1.py
+++ b/src/ethereum_test_types/tests/test_eof_v1.py
@@ -867,7 +867,8 @@ def remove_comments_from_string(input_string):
# Find the index of the first '#' character
comment_start = line.find("#")
- # If a '#' is found, slice up to that point; otherwise, take the whole line
+ # If a '#' is found, slice up to that point; otherwise, take the whole
+ # line
if comment_start != -1:
cleaned_line = line[:comment_start].rstrip()
else:
diff --git a/src/ethereum_test_types/tests/test_post_alloc.py b/src/ethereum_test_types/tests/test_post_alloc.py
index c9639b7e543..45111760fc7 100644
--- a/src/ethereum_test_types/tests/test_post_alloc.py
+++ b/src/ethereum_test_types/tests/test_post_alloc.py
@@ -10,13 +10,17 @@
@pytest.fixture()
def post(request: pytest.FixtureRequest) -> Alloc:
- """Post state: Set from the test's indirectly parametrized `post` parameter."""
+ """
+ Post state: Set from the test's indirectly parametrized `post` parameter.
+ """
return Alloc.model_validate(request.param)
@pytest.fixture()
def alloc(request: pytest.FixtureRequest) -> Alloc:
- """Alloc state: Set from the test's indirectly parametrized `alloc` parameter."""
+ """
+ Alloc state: Set from the test's indirectly parametrized `alloc` parameter.
+ """
return Alloc.model_validate(request.param)
diff --git a/src/ethereum_test_types/transaction_types.py b/src/ethereum_test_types/transaction_types.py
index 4ffe706bb02..ce1117635e0 100644
--- a/src/ethereum_test_types/transaction_types.py
+++ b/src/ethereum_test_types/transaction_types.py
@@ -81,7 +81,8 @@ class AuthorizationTupleGeneric(CamelModel, Generic[NumberBoundTypeVar], Signabl
def get_rlp_signing_prefix(self) -> bytes:
"""
- Return a prefix that has to be appended to the serialized signing object.
+ Return a prefix that has to be appended to the serialized signing
+ object.
By default, an empty string is returned.
"""
@@ -107,7 +108,10 @@ class AuthorizationTuple(AuthorizationTupleGeneric[HexNumber]):
secret_key: Hash | None = None
def model_post_init(self, __context: Any) -> None:
- """Automatically signs the authorization tuple if a secret key or sender are provided."""
+ """
+ Automatically signs the authorization tuple if a secret key or sender
+ are provided.
+ """
super().model_post_init(__context)
self.sign()
@@ -198,7 +202,9 @@ class TransactionValidateToAsEmptyString(CamelModel):
@model_validator(mode="before")
@classmethod
def validate_to_as_empty_string(cls, data: Any) -> Any:
- """If the `to` field is an empty string, set the model value to None."""
+ """
+ If the `to` field is an empty string, set the model value to None.
+ """
if (
isinstance(data, dict)
and "to" in data
@@ -210,11 +216,16 @@ def validate_to_as_empty_string(cls, data: Any) -> Any:
class TransactionFixtureConverter(TransactionValidateToAsEmptyString):
- """Handler for serializing and validating the `to` field as an empty string."""
+ """
+ Handler for serializing and validating the `to` field as an empty string.
+ """
@model_serializer(mode="wrap", when_used="json-unless-none")
def serialize_to_as_empty_string(self, serializer):
- """Serialize the `to` field as the empty string if the model value is None."""
+ """
+ Serialize the `to` field as the empty string if the model value is
+ None.
+ """
default = serializer(self)
if default is not None and "to" not in default:
default["to"] = ""
@@ -222,16 +233,18 @@ def serialize_to_as_empty_string(self, serializer):
class TransactionTransitionToolConverter(TransactionValidateToAsEmptyString):
- """Handler for serializing and validating the `to` field as an empty string."""
+ """
+ Handler for serializing and validating the `to` field as an empty string.
+ """
@model_serializer(mode="wrap", when_used="json-unless-none")
def serialize_to_as_none(self, serializer):
"""
Serialize the `to` field as `None` if the model value is None.
- This is required as we use `exclude_none=True` when serializing, but the
- t8n tool explicitly requires a value of `None` (respectively null), for
- if the `to` field should be unset (contract creation).
+ This is required as we use `exclude_none=True` when serializing, but
+ the t8n tool explicitly requires a value of `None` (respectively null),
+ for if the `to` field should be unset (contract creation).
"""
default = serializer(self)
if default is not None and "to" not in default:
@@ -250,8 +263,8 @@ class TransactionTestMetadata(CamelModel):
def to_json(self) -> str:
"""
- Convert the transaction metadata into json string for it to be embedded in the
- request id.
+ Convert the transaction metadata into json string for it to be embedded
+ in the request id.
"""
return self.model_dump_json(exclude_none=True, by_alias=True)
@@ -291,8 +304,8 @@ def __str__(self):
class InvalidSignaturePrivateKeyError(Exception):
"""
- Transaction describes both the signature and private key of
- source account.
+ Transaction describes both the signature and private key of source
+ account.
"""
def __str__(self):
@@ -522,8 +535,8 @@ def with_signature_and_sender(self, *, keep_secret_key: bool = False) -> "Transa
def get_rlp_signing_fields(self) -> List[str]:
"""
- Return the list of values included in the envelope used for signing depending on
- the transaction type.
+ Return the list of values included in the envelope used for signing
+ depending on the transaction type.
"""
field_list: List[str]
if self.ty == 6:
@@ -611,8 +624,8 @@ def get_rlp_signing_fields(self) -> List[str]:
def get_rlp_fields(self) -> List[str]:
"""
- Return the list of values included in the list used for rlp encoding depending on
- the transaction type.
+ Return the list of values included in the list used for rlp encoding
+ depending on the transaction type.
"""
fields = self.get_rlp_signing_fields()
if self.ty == 0 and self.protected:
@@ -621,8 +634,8 @@ def get_rlp_fields(self) -> List[str]:
def get_rlp_prefix(self) -> bytes:
"""
- Return the transaction type as bytes to be appended at the beginning of the
- serialized transaction if type is not 0.
+ Return the transaction type as bytes to be appended at the beginning of
+ the serialized transaction if type is not 0.
"""
if self.ty > 0:
return bytes([self.ty])
@@ -630,8 +643,8 @@ def get_rlp_prefix(self) -> bytes:
def get_rlp_signing_prefix(self) -> bytes:
"""
- Return the transaction type as bytes to be appended at the beginning of the
- serialized transaction signing envelope if type is not 0.
+ Return the transaction type as bytes to be appended at the beginning of
+ the serialized transaction signing envelope if type is not 0.
"""
if self.ty > 0:
return bytes([self.ty])
@@ -650,7 +663,10 @@ def hash(self) -> Hash:
@cached_property
def serializable_list(self) -> Any:
- """Return list of values included in the transaction as a serializable object."""
+ """
+ Return list of values included in the transaction as a serializable
+ object.
+ """
return self.rlp() if self.ty > 0 else self.to_list(signing=False)
@staticmethod
@@ -663,7 +679,9 @@ def list_root(input_txs: List["Transaction"]) -> Hash:
@staticmethod
def list_blob_versioned_hashes(input_txs: List["Transaction"]) -> List[Hash]:
- """Get list of ordered blob versioned hashes from a list of transactions."""
+ """
+ Get list of ordered blob versioned hashes from a list of transactions.
+ """
return [
blob_versioned_hash
for tx in input_txs
@@ -687,11 +705,10 @@ class NetworkWrappedTransaction(CamelModel, RLPSerializable):
Network wrapped transaction as defined in
[EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking).
- < Osaka:
- rlp([tx_payload_body, blobs, commitments, proofs])
+ < Osaka: rlp([tx_payload_body, blobs, commitments, proofs])
- >= Osaka:
- rlp([tx_payload_body, wrapper_version, blobs, commitments, cell_proofs])
+ >= Osaka: rlp([tx_payload_body, wrapper_version, blobs, commitments,
+ cell_proofs])
"""
tx: Transaction
@@ -740,15 +757,18 @@ def cell_proofs(self) -> Sequence[Bytes] | None:
def get_rlp_fields(self) -> List[str]:
"""
- Return an ordered list of field names to be included in RLP serialization.
+ Return an ordered list of field names to be included in RLP
+ serialization.
Function can be overridden to customize the logic to return the fields.
By default, rlp_fields class variable is used.
- The list can be nested list up to one extra level to represent nested fields.
+ The list can be nested list up to one extra level to represent nested
+ fields.
"""
- # only put a wrapper_version field for >=osaka (value 1), otherwise omit field
+ # only put a wrapper_version field for >=osaka (value 1), otherwise
+ # omit field
wrapper = []
if self.wrapper_version is not None:
wrapper = ["wrapper_version"]
@@ -761,11 +781,11 @@ def get_rlp_fields(self) -> List[str]:
if self.cell_proofs is not None:
rlp_cell_proofs = ["cell_proofs"]
- rlp_fields: List[
- str
- ] = [ # structure explained in https://eips.ethereum.org/EIPS/eip-7594#Networking
+ rlp_fields: List[str] = [ # structure explained in
+ # https://eips.ethereum.org/EIPS/eip-7594#Networking
"tx", # tx_payload_body
- *wrapper, # wrapper_version, which is always 1 for osaka (was non-existing before)
+ *wrapper, # wrapper_version, which is always 1 for osaka (was non-
+ # existing before)
"blobs", # Blob.data
"commitments",
*rlp_proofs,
@@ -782,8 +802,8 @@ def get_rlp_fields(self) -> List[str]:
def get_rlp_prefix(self) -> bytes:
"""
- Return the transaction type as bytes to be appended at the beginning of the
- serialized transaction if type is not 0.
+ Return the transaction type as bytes to be appended at the beginning of
+ the serialized transaction if type is not 0.
"""
if self.tx.ty > 0:
return bytes([self.tx.ty])
diff --git a/src/ethereum_test_types/trie.py b/src/ethereum_test_types/trie.py
index 8c97e519568..4d0c7ba3126 100644
--- a/src/ethereum_test_types/trie.py
+++ b/src/ethereum_test_types/trie.py
@@ -1,4 +1,6 @@
-"""The state trie is the structure responsible for storing."""
+"""
+The state trie is the structure responsible for storing.
+"""
import copy
from dataclasses import dataclass, field
@@ -58,17 +60,11 @@ def encode_account(raw_account_data: FrontierAccount, storage_root: Bytes) -> By
# note: an empty trie (regardless of whether it is secured) has root:
-#
-# keccak256(RLP(b''))
-# ==
-# 56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421 # noqa: E501,SC10
-#
+# keccak256(RLP(b'')) ==
+# 56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421
# also:
-#
-# keccak256(RLP(()))
-# ==
-# 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347 # noqa: E501,SC10
-#
+# keccak256(RLP(())) ==
+# 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347
# which is the sha3Uncles hash in block header with no uncles
EMPTY_TRIE_ROOT = Bytes32(
bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
@@ -137,9 +133,10 @@ class BranchNode:
def encode_internal_node(node: Optional[InternalNode]) -> Extended:
"""
- Encode a Merkle Trie node into its RLP form. The RLP will then be
- serialized into a `Bytes` and hashed unless it is less that 32 bytes
- when serialized.
+ Encode a Merkle Trie node into its RLP form.
+
+ The RLP will then be serialized into a `Bytes` and hashed unless it is less
+ that 32 bytes when serialized.
This function also accepts `None`, representing the absence of a node,
which is encoded to `b""`.
@@ -211,7 +208,6 @@ def trie_set(trie: Trie[K, V], key: K, value: V) -> None:
This method deletes the key if `value == trie.default`, because the Merkle
Trie represents the default value by omitting it from the trie.
-
"""
if value == trie.default:
if key in trie._data:
@@ -225,7 +221,6 @@ def trie_get(trie: Trie[K, V], key: K) -> V:
Get an item from the Merkle Trie.
This method returns `trie.default` if the key is missing.
-
"""
return trie._data.get(key, trie.default)
@@ -248,11 +243,10 @@ def nibble_list_to_compact(x: Bytes, is_leaf: bool) -> Bytes:
Highest nibble::
- +---+---+----------+--------+
- | _ | _ | is_leaf | parity |
- +---+---+----------+--------+
- 3 2 1 0
-
+ +---+---+----------+--------+
+ | _ | _ | is_leaf | parity |
+ +---+---+----------+--------+
+ 3 2 1 0
The lowest bit of the nibble encodes the parity of the length of the
remaining nibbles -- `0` when even and `1` when odd. The second lowest bit
@@ -274,7 +268,9 @@ def nibble_list_to_compact(x: Bytes, is_leaf: bool) -> Bytes:
def bytes_to_nibble_list(bytes_: Bytes) -> Bytes:
- """Convert a `Bytes` into to a sequence of nibbles (bytes with value < 16)."""
+ """
+ Convert a `Bytes` into to a sequence of nibbles (bytes with value < 16).
+ """
nibble_list = bytearray(2 * len(bytes_))
for byte_index, byte in enumerate(bytes_):
nibble_list[byte_index * 2] = (byte & 0xF0) >> 4
diff --git a/src/ethereum_test_vm/bytecode.py b/src/ethereum_test_vm/bytecode.py
index 5eea5b0cce3..298f80c6e21 100644
--- a/src/ethereum_test_vm/bytecode.py
+++ b/src/ethereum_test_vm/bytecode.py
@@ -16,9 +16,9 @@ class Bytecode:
"""
Base class to represent EVM bytecode.
- Stack calculations are automatically done after an addition operation between two bytecode
- objects. The stack height is not guaranteed to be correct, so the user must take this into
- consideration.
+ Stack calculations are automatically done after an addition operation
+ between two bytecode objects. The stack height is not guaranteed to be
+ correct, so the user must take this into consideration.
Parameters
----------
@@ -63,8 +63,8 @@ def __new__(
return instance
if isinstance(bytes_or_byte_code_base, Bytecode):
- # Required because Enum class calls the base class with the instantiated object as
- # parameter.
+ # Required because Enum class calls the base class with the
+ # instantiated object as parameter.
obj = super().__new__(cls)
obj._bytes_ = bytes_or_byte_code_base._bytes_
obj.popped_stack_items = bytes_or_byte_code_base.popped_stack_items
@@ -113,8 +113,8 @@ def __eq__(self, other):
Allow comparison between Bytecode instances and bytes objects.
Raises:
- - NotImplementedError: if the comparison is not between an Bytecode
- or a bytes object.
+ - NotImplementedError: if the comparison is not between an
+ Bytecode or a bytes object.
"""
if isinstance(other, Bytecode):
@@ -142,7 +142,9 @@ def __hash__(self):
)
def __add__(self, other: "Bytecode | bytes | int | None") -> "Bytecode":
- """Concatenate the bytecode representation with another bytecode object."""
+ """
+ Concatenate the bytecode representation with another bytecode object.
+ """
if other is None or (isinstance(other, int) and other == 0):
# Edge case for sum() function
return self
@@ -160,33 +162,40 @@ def __add__(self, other: "Bytecode | bytes | int | None") -> "Bytecode":
b_pop, b_push = other.popped_stack_items, other.pushed_stack_items
b_min, b_max = other.min_stack_height, other.max_stack_height
- # NOTE: "_pop" is understood as the number of elements required by an instruction or
- # bytecode to be popped off the stack before it starts returning (pushing).
+ # NOTE: "_pop" is understood as the number of elements required by an
+ # instruction or bytecode to be popped off the stack before it starts
+ # returning (pushing).
- # Auxiliary variables representing "stages" of the execution of `c = a + b` bytecode:
- # Assume starting point 0 as reference:
+ # Auxiliary variables representing "stages" of the execution of
+ # `c = a + b` bytecode: Assume starting point 0 as reference:
a_start = 0
- # A (potentially) pops some elements and reaches its "bottom", might be negative:
+ # A (potentially) pops some elements and reaches its "bottom", might be
+ # negative:
a_bottom = a_start - a_pop
- # After this A pushes some elements, then B pops and reaches its "bottom":
+ # After this A pushes some elements, then B pops and reaches its
+ # "bottom":
b_bottom = a_bottom + a_push - b_pop
# C's bottom is either at the bottom of A or B:
c_bottom = min(a_bottom, b_bottom)
if c_bottom == a_bottom:
- # C pops the same as A to reach its bottom, then the rest of A and B are C's "push"
+ # C pops the same as A to reach its bottom, then the rest of A and
+ # B are C's "push"
c_pop = a_pop
c_push = a_push - b_pop + b_push
else:
- # A and B are C's "pop" to reach its bottom, then pushes the same as B
+ # A and B are C's "pop" to reach its bottom, then pushes the same
+ # as B
c_pop = a_pop - a_push + b_pop
c_push = b_push
- # C's minimum required stack is either A's or B's shifted by the net stack balance of A
+ # C's minimum required stack is either A's or B's shifted by the net
+ # stack balance of A
c_min = max(a_min, b_min + a_pop - a_push)
- # C starts from c_min, then reaches max either in the spot where A reached a_max or in the
- # spot where B reached b_max, after A had completed.
+ # C starts from c_min, then reaches max either in the spot where A
+ # reached a_max or in the spot where B reached b_max, after A had
+ # completed.
c_max = max(c_min + a_max - a_min, c_min - a_pop + a_push + b_max - b_min)
return Bytecode(
@@ -199,7 +208,9 @@ def __add__(self, other: "Bytecode | bytes | int | None") -> "Bytecode":
)
def __radd__(self, other: "Bytecode | int | None") -> "Bytecode":
- """Concatenate the opcode byte representation with another bytes object."""
+ """
+ Concatenate the opcode byte representation with another bytes object.
+ """
if other is None or (isinstance(other, int) and other == 0):
# Edge case for sum() function
return self
@@ -207,7 +218,9 @@ def __radd__(self, other: "Bytecode | int | None") -> "Bytecode":
return other.__add__(self)
def __mul__(self, other: int) -> "Bytecode":
- """Concatenate another bytes object with the opcode byte representation."""
+ """
+ Concatenate another bytes object with the opcode byte representation.
+ """
if other < 0:
raise ValueError("Cannot multiply by a negative number")
if other == 0:
@@ -218,7 +231,10 @@ def __mul__(self, other: int) -> "Bytecode":
return output
def hex(self) -> str:
- """Return the hexadecimal representation of the opcode byte representation."""
+ """
+ Return the hexadecimal representation of the opcode byte
+ representation.
+ """
return bytes(self).hex()
def keccak256(self) -> Hash:
@@ -229,7 +245,10 @@ def keccak256(self) -> Hash:
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
- """Provide Pydantic core schema for Bytecode serialization and validation."""
+ """
+ Provide Pydantic core schema for Bytecode
+ serialization and validation.
+ """
return no_info_plain_validator_function(
cls,
serialization=plain_serializer_function_ser_schema(
diff --git a/src/ethereum_test_vm/evm_types.py b/src/ethereum_test_vm/evm_types.py
index bb7fe296dc6..992f580e89b 100644
--- a/src/ethereum_test_vm/evm_types.py
+++ b/src/ethereum_test_vm/evm_types.py
@@ -4,7 +4,9 @@
class EVMCodeType(str, Enum):
- """Enum representing the type of EVM code that is supported in a given fork."""
+ """
+ Enum representing the type of EVM code that is supported in a given fork.
+ """
LEGACY = "legacy"
EOF_V1 = "eof_v1"
diff --git a/src/ethereum_test_vm/opcodes.py b/src/ethereum_test_vm/opcodes.py
index 5f2aef894ba..85b79c65e5f 100644
--- a/src/ethereum_test_vm/opcodes.py
+++ b/src/ethereum_test_vm/opcodes.py
@@ -1,9 +1,10 @@
"""
Ethereum Virtual Machine opcode definitions.
-Acknowledgments: The individual opcode documentation below is due to the work by
-[smlXL](https://github.com/smlxl) on [evm.codes](https://www.evm.codes/), available as open
-source [github.com/smlxl/evm.codes](https://github.com/smlxl/evm.codes) - thank you! And thanks
+Acknowledgments: The individual opcode documentation below is due to the work
+by [smlXL](https://github.com/smlxl) on [evm.codes](https://www.evm.codes/),
+available as open source [github.com/smlxl/
+evm.codes](https://github.com/smlxl/evm.codes) - thank you! And thanks
to @ThreeHrSleep for integrating it in the docstrings.
"""
@@ -18,7 +19,8 @@
def _get_int_size(n: int) -> int:
"""Return size of an integer in bytes."""
if n < 0:
- # Negative numbers in the EVM are represented as two's complement of 32 bytes
+ # Negative numbers in the EVM are represented as two's complement
+ # of 32 bytes
return 32
byte_count = 0
while n:
@@ -45,7 +47,8 @@ def _stack_argument_to_bytecode(
if data_size > 32:
raise ValueError("Opcode stack data must be less than 32 bytes")
elif data_size == 0:
- # Pushing 0 is done with the PUSH1 opcode for compatibility reasons.
+ # Pushing 0 is done with the PUSH1 opcode
+ # for compatibility reasons.
data_size = 1
arg = arg.to_bytes(
length=data_size,
@@ -55,7 +58,8 @@ def _stack_argument_to_bytecode(
else:
arg = to_bytes(arg).lstrip(b"\0") # type: ignore
if arg == b"":
- # Pushing 0 is done with the PUSH1 opcode for compatibility reasons.
+ # Pushing 0 is done with the PUSH1 opcode for
+ # compatibility reasons.
arg = b"\x00"
data_size = len(arg)
@@ -67,20 +71,23 @@ def _stack_argument_to_bytecode(
class Opcode(Bytecode):
"""
- Represents a single Opcode instruction in the EVM, with extra metadata useful to parametrize
- tests.
+ Represents a single Opcode instruction in the EVM, with extra
+ metadata useful to parametrize tests.
Parameters
----------
- data_portion_length: number of bytes after the opcode in the bytecode
that represent data
- - data_portion_formatter: function to format the data portion of the opcode, if any
- - stack_properties_modifier: function to modify the stack properties of the opcode after the
- data portion has been processed
- - kwargs: list of keyword arguments that can be passed to the opcode, in the order they are
- meant to be placed in the stack
- - kwargs_defaults: default values for the keyword arguments if any, otherwise 0
- - unchecked_stack: whether the bytecode should ignore stack checks when being called
+ - data_portion_formatter: function to format the data portion of the
+ opcode, if any
+ - stack_properties_modifier: function to modify the stack properties of
+ the opcode after the data portion has been processed
+ - kwargs: list of keyword arguments that can be passed to the opcode,
+ in the order they are meant to be placed in the stack
+ - kwargs_defaults: default values for the keyword arguments if any,
+ otherwise 0
+ - unchecked_stack: whether the bytecode should ignore stack checks
+ when being called
"""
@@ -111,8 +118,8 @@ def __new__(
if kwargs_defaults is None:
kwargs_defaults = {}
if type(opcode_or_byte) is Opcode:
- # Required because Enum class calls the base class with the instantiated object as
- # parameter.
+ # Required because Enum class calls the base class
+ # with the instantiated object as parameter.
return opcode_or_byte
elif isinstance(opcode_or_byte, int) or isinstance(opcode_or_byte, bytes):
obj_bytes = (
@@ -147,8 +154,8 @@ def __new__(
def __getitem__(self, *args: "int | bytes | str | Iterable[int]") -> "Opcode":
"""
- Initialize a new instance of the opcode with the data portion set, and also clear
- the data portion variables to avoid reusing them.
+ Initialize a new instance of the opcode with the data portion set,
+ and also clear the data portion variables to avoid reusing them.
"""
if self.data_portion_formatter is None and self.data_portion_length == 0:
raise ValueError("Opcode does not have a data portion or has already been set")
@@ -160,8 +167,8 @@ def __getitem__(self, *args: "int | bytes | str | Iterable[int]") -> "Opcode":
else:
data_portion = self.data_portion_formatter(*args)
elif self.data_portion_length > 0:
- # For opcodes with a data portion, the first argument is the data and the rest of the
- # arguments form the stack.
+ # For opcodes with a data portion, the first argument is the
+ # data and the rest of the arguments form the stack.
assert len(args) == 1, "Opcode with data portion requires exactly one argument"
data = args[0]
if isinstance(data, bytes) or isinstance(data, SupportsBytes) or isinstance(data, str):
@@ -222,26 +229,31 @@ def __call__(
**kwargs: "int | bytes | str | Opcode | Bytecode",
) -> Bytecode:
"""
- Make all opcode instances callable to return formatted bytecode, which constitutes a data
- portion, that is located after the opcode byte, and pre-opcode bytecode, which is normally
- used to set up the stack.
+ Make all opcode instances callable to return formatted bytecode, which
+ constitutes a data portion, that is located after the opcode byte,
+ and pre-opcode bytecode, which is normally used to set up the stack.
- This useful to automatically format, e.g., call opcodes and their stack arguments as
+ This useful to automatically format, e.g., call opcodes and their
+ stack arguments as
`Opcodes.CALL(Opcodes.GAS, 0x1234, 0x0, 0x0, 0x0, 0x0, 0x0)`.
- Data sign is automatically detected but for this reason the range of the input must be:
- `[-2^(data_portion_bits-1), 2^(data_portion_bits)]` where: `data_portion_bits ==
- data_portion_length * 8`
+ Data sign is automatically detected but for this reason the range
+ of the input must be:
+ `[-2^(data_portion_bits-1), 2^(data_portion_bits)]`
+ where:
+ `data_portion_bits == data_portion_length * 8`
- For the stack, the arguments are set up in the opposite order they are given, so the first
- argument is the last item pushed to the stack.
+ For the stack, the arguments are set up in the opposite order they
+ are given, so the first argument is the last item pushed to the stack.
- The resulting stack arrangement does not take into account opcode stack element
- consumption, so the stack height is not guaranteed to be correct and the user must take
- this into consideration.
+ The resulting stack arrangement does not take into account
+ opcode stack element consumption, so the stack height is not
+ guaranteed to be correct and the user must take this into
+ consideration.
- Integers can also be used as stack elements, in which case they are automatically converted
- to PUSH operations, and negative numbers always use a PUSH32 operation.
+ Integers can also be used as stack elements, in which case they
+ are automatically converted to PUSH operations, and negative numbers
+ always use a PUSH32 operation.
Hex-strings will be automatically converted to bytes.
"""
@@ -317,8 +329,8 @@ def __new__(
if macro_or_bytes is None:
macro_or_bytes = Bytecode()
if isinstance(macro_or_bytes, Macro):
- # Required because Enum class calls the base class with the instantiated object as
- # parameter.
+ # Required because Enum class calls the base class
+ # with the instantiated object as parameter.
return macro_or_bytes
else:
instance = super().__new__(cls, macro_or_bytes)
@@ -342,8 +354,9 @@ def __call__(self, *args_t: OpcodeCallArg, **kwargs) -> Bytecode:
RJUMPV_BRANCH_OFFSET_BYTE_LENGTH = 2
-# TODO: Allowing Iterable here is a hacky way to support `range`, because Python 3.11+ will allow
-# `Op.RJUMPV[*range(5)]`. This is a temporary solution until Python 3.11+ is the minimum required
+# TODO: Allowing Iterable here is a hacky way to support `range`,
+# because Python 3.11+ will allow `Op.RJUMPV[*range(5)]`.
+# This is a temporary solution until Python 3.11+ is the minimum required
# version.
@@ -419,8 +432,9 @@ class Opcodes(Opcode, Enum):
Contains deprecated and not yet implemented opcodes.
- This enum is !! NOT !! meant to be iterated over by the tests. Instead, create a list with
- cherry-picked opcodes from this Enum within the test if iteration is needed.
+ This enum is !! NOT !! meant to be iterated over by the tests.
+ Instead, create a list with cherry-picked opcodes from this Enum
+ within the test if iteration is needed.
Do !! NOT !! remove or modify existing opcodes from this list.
"""
@@ -585,7 +599,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - c: signed integer result of the division. If the denominator is 0, the result will be 0
+ - c: signed integer result of the division. If the denominator is 0,
+ the result will be 0
----
Fork
@@ -615,7 +630,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - a % b: integer result of the integer modulo. If the denominator is 0, the result will be 0
+ - a % b: integer result of the integer modulo. If the denominator is 0,
+ the result will be 0
Fork
----
@@ -644,8 +660,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - a % b: integer result of the signed integer modulo. If the denominator is 0, the result will
- be 0
+ - a % b: integer result of the signed integer modulo. If the denominator
+ is 0, the result will be 0
Fork
----
@@ -675,8 +691,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - (a + b) % N: integer result of the addition followed by a modulo. If the denominator is 0,
- the result will be 0
+ - (a + b) % N: integer result of the addition followed by a modulo.
+ If the denominator is 0, the result will be 0
Fork
----
@@ -706,8 +722,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - (a * b) % N: integer result of the multiplication followed by a modulo. If the denominator
- is 0, the result will be 0
+ - (a * b) % N: integer result of the multiplication followed by a modulo.
+ If the denominator is 0, the result will be 0
Fork
----
@@ -1083,8 +1099,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - y: the indicated byte at the least significant position. If the byte offset is out of range,
- the result is 0
+ - y: the indicated byte at the least significant position.
+ If the byte offset is out of range, the result is 0
Fork
----
@@ -1286,7 +1302,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - balance: balance of the given account in wei. Returns 0 if the account doesn't exist
+ - balance: balance of the given account in wei. Returns 0 if the
+ account doesn't exist
Fork
----
@@ -1315,8 +1332,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - address: the 20-byte address of the sender of the transaction. It can only be an account
- without code
+ - address: the 20-byte address of the sender of the transaction.
+ It can only be an account without code
Fork
----
@@ -1344,8 +1361,9 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - address: the 20-byte address of the caller account. This is the account that did the last
- call (except delegate call)
+ - address: the 20-byte address of the caller account.
+ This is the account that did the last
+ call (except delegate call)
Fork
----
@@ -1365,7 +1383,8 @@ class Opcodes(Opcode, Enum):
Description
----
- Get deposited value by the instruction/transaction responsible for this execution
+ Get deposited value by the instruction/transaction responsible
+ for this execution
Inputs
----
@@ -1401,8 +1420,9 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - data[offset]: 32-byte value starting from the given offset of the calldata. All bytes after
- the end of the calldata are set to 0
+ - data[offset]: 32-byte value starting from the given offset of
+ the calldata. All bytes after the end of the calldata
+ are set to 0
Fork
----
@@ -1614,7 +1634,8 @@ class Opcodes(Opcode, Enum):
----
- minimum_word_size = (size + 31) / 32
- static_gas = 0
- - dynamic_gas = 3 * minimum_word_size + memory_expansion_cost + address_access_cost
+ - dynamic_gas = 3 * minimum_word_size +
+ memory_expansion_cost + address_access_cost
Source: [evm.codes/#3C](https://www.evm.codes/#3C)
"""
@@ -1655,7 +1676,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- dest_offset: byte offset in the memory where the result will be copied
- - offset: byte offset in the return data from the last executed sub context to copy
+ - offset: byte offset in the return data from the last
+ executed sub context to copy
- size: byte size to copy
Fork
@@ -1686,8 +1708,9 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - hash: hash of the chosen account's code, the empty hash (0xc5d24601...) if the account has no
- code, or 0 if the account does not exist or has been destroyed
+ - hash: hash of the chosen account's code, the empty hash (0xc5d24601...)
+ if the account has no code, or 0 if the account does not exist or
+ has been destroyed
Fork
----
@@ -1712,12 +1735,14 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - blockNumber: block number to get the hash from. Valid range is the last 256 blocks (not
- including the current one). Current block number can be queried with NUMBER
+ - blockNumber: block number to get the hash from. Valid range is the
+ last 256 blocks (not including the current one). Current
+ block number can be queried with NUMBER
Outputs
----
- - hash: hash of the chosen block, or 0 if the block number is not in the valid range
+ - hash: hash of the chosen block, or 0 if the block number is not
+ in the valid range
Fork
----
@@ -1957,7 +1982,8 @@ class Opcodes(Opcode, Enum):
Description
----
- Returns the versioned hash of a single blob contained in the type-3 transaction
+ Returns the versioned hash of a single blob contained in
+ the type-3 transaction
Inputs
----
@@ -1975,7 +2001,8 @@ class Opcodes(Opcode, Enum):
----
3
- Source: [eips.ethereum.org/EIPS/eip-4844](https://eips.ethereum.org/EIPS/eip-4844)
+ Source: [eips.ethereum.org/EIPS/
+ eip-4844](https://eips.ethereum.org/EIPS/eip-4844)
"""
BLOBBASEFEE = Opcode(0x4A, popped_stack_items=0, pushed_stack_items=1)
@@ -2003,7 +2030,8 @@ class Opcodes(Opcode, Enum):
----
2
- Source: [eips.ethereum.org/EIPS/eip-7516](https://eips.ethereum.org/EIPS/eip-7516)
+ Source: [eips.ethereum.org/EIPS/eip-7516](https://eips.ethereum.org/
+ EIPS/eip-7516)
"""
POP = Opcode(0x50, popped_stack_items=1)
@@ -2049,8 +2077,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: the 32 bytes in memory starting at that offset. If it goes beyond its current size
- (see MSIZE), writes 0s
+ - value: the 32 bytes in memory starting at that offset.
+ If it goes beyond its current size (see MSIZE), writes 0s
Fork
----
@@ -2106,8 +2134,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- offset: offset in the memory in bytes
- - value: 1-byte value to write in the memory (the least significant byte of the 32-byte stack
- value)
+ - value: 1-byte value to write in the memory (the least significant
+ byte of the 32-byte stack value)
Fork
----
@@ -2136,7 +2164,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: 32-byte value corresponding to that key. 0 if that key was never written before
+ - value: 32-byte value corresponding to that key. 0 if that
+ key was never written before
Fork
----
@@ -2208,8 +2237,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - pc: byte offset in the deployed code where execution will continue from. Must be a
- JUMPDEST instruction
+ - pc: byte offset in the deployed code where execution will continue from.
+ Must be a JUMPDEST instruction
Outputs
----
@@ -2237,11 +2266,12 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - pc: byte offset in the deployed code where execution will continue from. Must be a
- JUMPDEST instruction
- - condition: the program counter will be altered with the new value only if this value is
- different from 0. Otherwise, the program counter is simply incremented and the next
- instruction will be executed
+ - pc: byte offset in the deployed code where execution will continue from.
+ Must be a JUMPDEST instruction
+ - condition: the program counter will be altered with the new value only
+ if this value is different from 0. Otherwise, the program
+ counter is simply incremented and the next instruction
+ will be executed
Fork
----
@@ -2261,7 +2291,8 @@ class Opcodes(Opcode, Enum):
Description
----
- Get the value of the program counter prior to the increment corresponding to this instruction
+ Get the value of the program counter prior to the increment corresponding
+ to this instruction
Inputs
----
@@ -2313,8 +2344,8 @@ class Opcodes(Opcode, Enum):
Description
----
- Get the amount of available gas, including the corresponding reduction for the cost of this
- instruction
+ Get the amount of available gas, including the corresponding reduction
+ for the cost of this instruction
Inputs
----
@@ -2406,7 +2437,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: 32-byte value corresponding to that key. 0 if that key was never written
+ - value: 32-byte value corresponding to that key. 0 if that key
+ was never written
Fork
----
@@ -2441,7 +2473,8 @@ class Opcodes(Opcode, Enum):
----
100
- Source: [eips.ethereum.org/EIPS/eip-1153](https://eips.ethereum.org/EIPS/eip-1153)
+ Source: [eips.ethereum.org/EIPS/eip-1153](https://eips.ethereum.org/EIPS/
+ eip-1153)
"""
MCOPY = Opcode(0x5E, popped_stack_items=3, kwargs=["dest_offset", "offset", "size"])
@@ -2473,7 +2506,8 @@ class Opcodes(Opcode, Enum):
- static_gas = 3
- dynamic_gas = 3 * minimum_word_size + memory_expansion_cost
- Source: [eips.ethereum.org/EIPS/eip-5656](https://eips.ethereum.org/EIPS/eip-5656)
+ Source: [eips.ethereum.org/EIPS/eip-5656](https://eips.ethereum.org/EIPS/
+ eip-5656)
"""
PUSH0 = Opcode(0x5F, pushed_stack_items=1)
@@ -2519,7 +2553,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2547,7 +2582,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the lowest
+ significant bytes)
Fork
----
@@ -2575,7 +2611,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2603,7 +2640,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the lowest
+ significant bytes)
Fork
----
@@ -2631,7 +2669,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the lowest
+ significant bytes)
Fork
----
@@ -2659,7 +2698,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2687,7 +2727,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2715,7 +2756,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2743,7 +2785,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2771,7 +2814,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2799,7 +2843,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2827,7 +2872,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in the
+ lowest significant bytes)
Fork
----
@@ -2855,7 +2901,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2883,7 +2930,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2912,7 +2960,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2940,7 +2989,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2968,7 +3018,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -2996,7 +3047,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3024,7 +3076,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3052,7 +3105,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3080,7 +3134,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3108,7 +3163,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3136,7 +3192,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3164,7 +3221,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3192,7 +3250,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3220,7 +3279,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3248,7 +3308,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3276,7 +3337,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3304,7 +3366,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3332,7 +3395,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3360,7 +3424,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -3388,7 +3453,8 @@ class Opcodes(Opcode, Enum):
Outputs
----
- - value: pushed value, aligned to the right (put in the lowest significant bytes)
+ - value: pushed value, aligned to the right (put in
+ the lowest significant bytes)
Fork
----
@@ -4725,7 +4791,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200)
+ Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/
+ eip-4200)
"""
DATALOAD = Opcode(0xD0, popped_stack_items=1, pushed_stack_items=1, kwargs=["offset"])
@@ -4755,7 +4822,8 @@ class Opcodes(Opcode, Enum):
----
4
- Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/eip-7480)
+ Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/
+ eip-7480)
"""
DATALOADN = Opcode(0xD1, pushed_stack_items=1, data_portion_length=2)
@@ -4789,7 +4857,8 @@ class Opcodes(Opcode, Enum):
----
3
- Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/eip-7480)
+ Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/
+ eip-7480)
"""
DATASIZE = Opcode(0xD2, pushed_stack_items=1)
@@ -4818,7 +4887,8 @@ class Opcodes(Opcode, Enum):
----
2
- Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/eip-7480)
+ Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/
+ eip-7480)
"""
DATACOPY = Opcode(0xD3, popped_stack_items=3, kwargs=["dest_offset", "offset", "size"])
@@ -4852,7 +4922,8 @@ class Opcodes(Opcode, Enum):
- static_gas = 3
- dynamic_gas = 3 * minimum_word_size + memory_expansion_cost
- Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/eip-7480)
+ Source: [eips.ethereum.org/EIPS/eip-7480](https://eips.ethereum.org/EIPS/
+ eip-7480)
"""
RJUMPI = Opcode(0xE1, popped_stack_items=1, data_portion_length=2)
@@ -4878,7 +4949,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200)
+ Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/
+ eip-4200)
"""
RJUMPV = Opcode(
@@ -4896,11 +4968,13 @@ class Opcodes(Opcode, Enum):
----
Relative jump with variable offset.
- When calling this opcode to generate bytecode, the first argument is used to format the data
- portion of the opcode, and it can be either of two types:
- - A bytes type, and in this instance the bytes are used verbatim as the data portion.
- - An integer iterable, list or tuple or any other iterable, where each element is a
- jump offset.
+ When calling this opcode to generate bytecode, the first argument is
+ used to format the data portion of the opcode, and it can be either
+ of two types:
+ - A bytes type, and in this instance the bytes are used verbatim
+ as the data portion.
+ - An integer iterable, list or tuple or any other iterable, where
+ each element is a jump offset.
Inputs
----
@@ -4915,7 +4989,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/eip-4200)
+ Source: [eips.ethereum.org/EIPS/eip-4200](https://eips.ethereum.org/EIPS/
+ eip-4200)
"""
CALLF = Opcode(0xE3, data_portion_length=2, unchecked_stack=True)
@@ -4930,21 +5005,21 @@ class Opcodes(Opcode, Enum):
- deduct 5 gas
- read uint16 operand idx
- - if 1024 < len(stack) + types[idx].max_stack_height - types[idx].inputs, execution results in
- an exceptional halt
+ - if 1024 < len(stack) + types[idx].max_stack_height - types[idx].inputs,
+ execution results in an exceptional halt
- if 1024 <= len(return_stack), execution results in an exceptional halt
- push new element to return_stack (current_code_idx, pc+3)
- update current_code_idx to idx and set pc to 0
Inputs
----
- Any: The inputs are not checked because we cannot know how many inputs the callee
- function/section requires
+ Any: The inputs are not checked because we cannot know how many inputs
+ the callee function/section requires
Outputs
----
- Any: The outputs are variable because we cannot know how many outputs the callee
- function/section produces
+ Any: The outputs are variable because we cannot know how many outputs the
+ callee function/section produces
Fork
----
@@ -4955,7 +5030,8 @@ class Opcodes(Opcode, Enum):
5
Source:
- [ipsilon/eof/blob/main/spec/eof.md](https://github.com/ipsilon/eof/blob/main/spec/eof.md)
+ [ipsilon/eof/blob/main/spec/eof.md](https://github.com/ipsilon/eof/blob/
+ main/spec/eof.md)
"""
RETF = Opcode(0xE4, terminating=True)
@@ -4995,8 +5071,8 @@ class Opcodes(Opcode, Enum):
- deduct 5 gas
- read uint16 operand idx
- - if 1024 < len(stack) + types[idx].max_stack_height - types[idx].inputs, execution results in
- an exceptional halt
+ - if 1024 < len(stack) + types[idx].max_stack_height - types[idx].inputs,
+ execution results in an exceptional halt
- set current_code_idx to idx
- set pc = 0
@@ -5102,7 +5178,8 @@ class Opcodes(Opcode, Enum):
Description
----
- Exchanges two stack positions. Two nybbles, n is high 4 bits + 1, then m is 4 low bits + 1.
+ Exchanges two stack positions. Two nybbles, n is high 4 bits + 1,
+ then m is 4 low bits + 1.
Exchanges the n+1'th item with the n + m + 1 item.
Inputs x and y when the opcode is used as `EXCHANGE[x, y]`, are equal to:
@@ -5231,7 +5308,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- value: value in wei to send to the new account
- - offset: byte offset in the memory in bytes, the initialization code for the new account
+ - offset: byte offset in the memory in bytes, the initialization code
+ for the new account
- size: byte size to copy (size of the initialization code)
Outputs
@@ -5250,8 +5328,9 @@ class Opcodes(Opcode, Enum):
code_deposit_cost = 200 * deployed_code_size
static_gas = 32000
- dynamic_gas = init_code_cost + memory_expansion_cost + deployment_code_execution_cost
- + code_deposit_cost
+ dynamic_gas = init_code_cost + memory_expansion_cost +
+ deployment_code_execution_cost +
+ code_deposit_cost
```
Source: [evm.codes/#F0](https://www.evm.codes/#F0)
@@ -5265,7 +5344,8 @@ class Opcodes(Opcode, Enum):
kwargs_defaults={"gas": GAS},
)
"""
- CALL(gas, address, value, args_offset, args_size, ret_offset, ret_size) = success
+ CALL(gas, address, value, args_offset, args_size, ret_offset, ret_size)
+ = success
----
Description
@@ -5274,14 +5354,15 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub
- context is returned to this one
+ - gas: amount of gas to send to the sub context to execute. The gas that
+ is not used by the sub context is returned to this one
- address: the account which context to execute
- value: value in wei to send to the account
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of
+ the sub context
- args_size: byte size to copy (size of the calldata)
- - ret_offset: byte offset in the memory in bytes, where to store the return data of the sub
- context
+ - ret_offset: byte offset in the memory in bytes, where to store the
+ return data of the sub context
- ret_size: byte size to copy (size of the return data)
Outputs
@@ -5296,8 +5377,9 @@ class Opcodes(Opcode, Enum):
----
```
static_gas = 0
- dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
- + positive_value_cost + value_to_empty_account_cost
+ dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost + positive_value_cost +
+ value_to_empty_account_cost
```
Source: [evm.codes/#F1](https://www.evm.codes/#F1)
@@ -5311,24 +5393,26 @@ class Opcodes(Opcode, Enum):
kwargs_defaults={"gas": GAS},
)
"""
- CALLCODE(gas, address, value, args_offset, args_size, ret_offset, ret_size) = success
+ CALLCODE(gas, address, value, args_offset, args_size, ret_offset, ret_size)
+ = success
----
Description
----
- Message-call into this account with an alternative account's code. Executes code starting at
- the address to which the call is made.
+ Message-call into this account with an alternative account's code.
+ Executes code starting at the address to which the call is made.
Inputs
----
- - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub
- context is returned to this one
+ - gas: amount of gas to send to the sub context to execute. The gas that
+ is not used by the sub context is returned to this one
- address: the account which code to execute
- value: value in wei to send to the account
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of
+ the sub context
- args_size: byte size to copy (size of the calldata)
- - ret_offset: byte offset in the memory in bytes, where to store the return data of the sub
- context
+ - ret_offset: byte offset in the memory in bytes, where to store the
+ return data of the sub context
- ret_size: byte size to copy (size of the return data)
Outputs
@@ -5343,8 +5427,8 @@ class Opcodes(Opcode, Enum):
----
```
static_gas = 0
- dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
- + positive_value_cost
+ dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost + positive_value_cost
```
Source: [evm.codes/#F2](https://www.evm.codes/#F2)
@@ -5361,8 +5445,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - offset: byte offset in the memory in bytes, to copy what will be the return data of this
- context
+ - offset: byte offset in the memory in bytes, to copy what will be
+ the return data of this context
- size: byte size to copy (size of the return data)
Outputs
@@ -5389,23 +5473,25 @@ class Opcodes(Opcode, Enum):
kwargs_defaults={"gas": GAS},
)
"""
- DELEGATECALL(gas, address, args_offset, args_size, ret_offset, ret_size) = success
+ DELEGATECALL(gas, address, args_offset, args_size, ret_offset, ret_size)
+ = success
----
Description
----
- Message-call into this account with an alternative account's code, but persisting the current
- values for sender and value
+ Message-call into this account with an alternative account's code, but
+ persisting the current values for sender and value
Inputs
----
- - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub
- context is returned to this one
+ - gas: amount of gas to send to the sub context to execute. The gas that
+ is not used by the sub context is returned to this one
- address: the account which code to execute
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of
+ the sub context
- args_size: byte size to copy (size of the calldata)
- - ret_offset: byte offset in the memory in bytes, where to store the return data of the sub
- context
+ - ret_offset: byte offset in the memory in bytes, where to store
+ the return data of the sub context
- ret_size: byte size to copy (size of the return data)
Outputs
@@ -5419,7 +5505,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- static_gas = 0
- - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
+ - dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost
Source: [evm.codes/#F4](https://www.evm.codes/#F4)
"""
@@ -5441,9 +5528,11 @@ class Opcodes(Opcode, Enum):
Inputs
----
- value: value in wei to send to the new account
- - offset: byte offset in the memory in bytes, the initialization code of the new account
+ - offset: byte offset in the memory in bytes, the initialization code
+ of the new account
- size: byte size to copy (size of the initialization code)
- - salt: 32-byte value used to create the new account at a deterministic address
+ - salt: 32-byte value used to create the new account at a
+ deterministic address
Outputs
----
@@ -5486,7 +5575,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- address: the account which context to execute
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of
+ the sub context
- args_size: byte size to copy (size of the calldata)
- value: value in wei to send to the account
@@ -5494,7 +5584,8 @@ class Opcodes(Opcode, Enum):
----
- success:
- `0` if the call was successful.
- - `1` if the call has reverted (also can be pushed earlier in a light failure scenario).
+ - `1` if the call has reverted (also can be pushed earlier in a
+ light failure scenario).
- `2` if the call has failed.
Fork
@@ -5505,8 +5596,9 @@ class Opcodes(Opcode, Enum):
----
```
static_gas = 0
- dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
- + positive_value_cost + value_to_empty_account_cost
+ dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost + positive_value_cost +
+ value_to_empty_account_cost
```
Source: [EIP-7069](https://eips.ethereum.org/EIPS/eip-7069)
@@ -5524,20 +5616,22 @@ class Opcodes(Opcode, Enum):
Description
----
- Message-call into this account with an alternative account's code, but persisting the current
- values for sender and value
+ Message-call into this account with an alternative account's code,
+ but persisting the current values for sender and value
Inputs
----
- address: the account which context to execute
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of
+ the sub context
- args_size: byte size to copy (size of the calldata)
Outputs
----
- success:
- `0` if the call was successful.
- - `1` if the call has reverted (also can be pushed earlier in a light failure scenario).
+ - `1` if the call has reverted (also can be pushed earlier in a
+ light failure scenario).
- `2` if the call has failed.
Fork
@@ -5547,7 +5641,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- static_gas = 0
- - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
+ - dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost
Source: [EIP-7069](https://eips.ethereum.org/EIPS/eip-7069)
"""
@@ -5560,7 +5655,8 @@ class Opcodes(Opcode, Enum):
kwargs_defaults={"gas": GAS},
)
"""
- STATICCALL(gas, address, args_offset, args_size, ret_offset, ret_size) = success
+ STATICCALL(gas, address, args_offset, args_size, ret_offset, ret_size)
+ = success
----
Description
@@ -5569,13 +5665,14 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - gas: amount of gas to send to the sub context to execute. The gas that is not used by the sub
- context is returned to this one
+ - gas: amount of gas to send to the sub context to execute. The gas
+ that is not used by the sub context is returned to this one
- address: the account which context to execute
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata of the
+ sub context
- args_size: byte size to copy (size of the calldata)
- - ret_offset: byte offset in the memory in bytes, where to store the return data of the sub
- context
+ - ret_offset: byte offset in the memory in bytes, where to store the
+ return data of the sub context
- ret_size: byte size to copy (size of the return data)
Outputs
@@ -5589,7 +5686,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- static_gas = 0
- - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
+ - dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost
Source: [evm.codes/#FA](https://www.evm.codes/#FA)
"""
@@ -5611,14 +5709,16 @@ class Opcodes(Opcode, Enum):
Inputs
----
- address: the account which context to execute
- - args_offset: byte offset in the memory in bytes, the calldata of the sub context
+ - args_offset: byte offset in the memory in bytes, the calldata
+ of the sub context
- args_size: byte size to copy (size of the calldata)
Outputs
----
- success:
- `0` if the call was successful.
- - `1` if the call has reverted (also can be pushed earlier in a light failure scenario).
+ - `1` if the call has reverted (also can be pushed earlier in a
+ light failure scenario).
- `2` if the call has failed.
Fork
@@ -5628,7 +5728,8 @@ class Opcodes(Opcode, Enum):
Gas
----
- static_gas = 0
- - dynamic_gas = memory_expansion_cost + code_execution_cost + address_access_cost
+ - dynamic_gas = memory_expansion_cost + code_execution_cost +
+ address_access_cost
Source: [EIP-7069](https://eips.ethereum.org/EIPS/eip-7069)
"""
@@ -5644,7 +5745,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - offset: byte offset in the return data from the last executed sub context to copy
+ - offset: byte offset in the return data from the last executed
+ sub context to copy
Fork
----
@@ -5666,7 +5768,8 @@ class Opcodes(Opcode, Enum):
Inputs
----
- - offset: byte offset in the memory in bytes. The return data of the calling context
+ - offset: byte offset in the memory in bytes. The return data of
+ the calling context
- size: byte size to copy (size of the return data)
Fork
@@ -5771,7 +5874,10 @@ class Opcodes(Opcode, Enum):
def _mstore_operation(data: OpcodeCallArg = b"", offset: OpcodeCallArg = 0) -> Bytecode:
- """Generate the bytecode that stores an arbitrary amount of data in memory."""
+ """
+ Generate the bytecode that stores an arbitrary
+ amount of data in memory.
+ """
assert isinstance(offset, int)
if isinstance(data, int):
data = data.to_bytes(32, "big")
@@ -5782,10 +5888,11 @@ def _mstore_operation(data: OpcodeCallArg = b"", offset: OpcodeCallArg = 0) -> B
if len(chunk) == 32:
bytecode += Opcodes.MSTORE(offset, chunk)
else:
- # We need to MLOAD the existing data at the offset and then do a bitwise OR with the
- # new data to store it in memory.
+ # We need to MLOAD the existing data at the offset and then
+ # do a bitwise OR with the new data to store it in memory.
bytecode += Opcodes.MLOAD(offset)
- # Create a mask to zero out the leftmost bytes of the existing data.
+ # Create a mask to zero out the leftmost bytes of
+ # the existing data.
mask_size = 32 - len(chunk)
bytecode += _push_opcodes_byte_list[mask_size - 1][-1]
bytecode += Opcodes.AND
diff --git a/src/pytest_plugins/concurrency.py b/src/pytest_plugins/concurrency.py
index 10dadcca9c6..0601824c338 100644
--- a/src/pytest_plugins/concurrency.py
+++ b/src/pytest_plugins/concurrency.py
@@ -1,6 +1,6 @@
"""
-Pytest plugin to create a temporary folder for the session where
-multi-process tests can store data that is shared between processes.
+Pytest plugin to create a temporary folder for the session where multi-process
+tests can store data that is shared between processes.
The provided `session_temp_folder` fixture is used, for example, by `consume`
when running hive simulators to ensure that only one `test_suite` is created
@@ -24,8 +24,8 @@ def session_temp_folder_name(testrun_uid: str) -> str:
Define the name of the temporary folder that will be shared among all the
xdist workers to coordinate the tests.
- "testrun_uid" is a fixture provided by the xdist plugin, and is unique for each test run,
- so it is used to create the unique folder name.
+ "testrun_uid" is a fixture provided by the xdist plugin, and is unique for
+ each test run, so it is used to create the unique folder name.
"""
return f"pytest-{testrun_uid}"
@@ -35,11 +35,11 @@ def session_temp_folder(
session_temp_folder_name: str,
) -> Generator[Path, None, None]:
"""
- Create a global temporary folder that will be shared among all the
- xdist workers to coordinate the tests.
+ Create a global temporary folder that will be shared among all the xdist
+ workers to coordinate the tests.
- We also create a file to keep track of how many workers are still using the folder, so we can
- delete it when the last worker is done.
+ We also create a file to keep track of how many workers are still using the
+ folder, so we can delete it when the last worker is done.
"""
session_temp_folder = Path(get_temp_dir()) / session_temp_folder_name
session_temp_folder.mkdir(exist_ok=True)
diff --git a/src/pytest_plugins/consume/consume.py b/src/pytest_plugins/consume/consume.py
index 676c7548081..4313615b19c 100644
--- a/src/pytest_plugins/consume/consume.py
+++ b/src/pytest_plugins/consume/consume.py
@@ -1,4 +1,6 @@
-"""A pytest plugin providing common functionality for consuming test fixtures."""
+"""
+A pytest plugin providing common functionality for consuming test fixtures.
+"""
import re
import sys
@@ -53,7 +55,10 @@ def __init__(self, url: str, destination_folder: Path): # noqa: D107
self.archive_name = self.strip_archive_extension(Path(self.parsed_url.path).name)
def download_and_extract(self) -> Tuple[bool, Path]:
- """Download the URL and extract it locally if it hasn't already been downloaded."""
+ """
+ Download the URL and extract it locally if it hasn't already been
+ downloaded.
+ """
if self.destination_folder.exists():
return True, self.detect_extracted_directory()
@@ -94,8 +99,8 @@ def fetch_and_extract(self) -> Path:
def detect_extracted_directory(self) -> Path:
"""
- Detect a single top-level dir within the extracted archive, otherwise return
- destination_folder.
+ Detect a single top-level dir within the extracted archive, otherwise
+ return destination_folder.
""" # noqa: D200
extracted_dirs = [
d for d in self.destination_folder.iterdir() if d.is_dir() and d.name != ".meta"
@@ -196,7 +201,9 @@ def from_url(
def from_release_spec(
cls, spec: str, cache_folder: Optional[Path] = None, extract_to: Optional[Path] = None
) -> "FixturesSource":
- """Create a fixture source from a release spec (e.g., develop@latest)."""
+ """
+ Create a fixture source from a release spec (e.g., develop@latest).
+ """
if cache_folder is None:
cache_folder = CACHED_DOWNLOADS_DIRECTORY
url = get_release_url(spec)
@@ -224,7 +231,9 @@ def from_release_spec(
@staticmethod
def validate_local_path(path: Path) -> "FixturesSource":
- """Validate that a local fixture path exists and contains JSON files."""
+ """
+ Validate that a local fixture path exists and contains JSON files.
+ """
if not path.exists():
pytest.exit(f"Specified fixture directory '{path}' does not exist.")
if not any(path.glob("**/*.json")):
@@ -242,24 +251,29 @@ def __init__(self, pattern: str, collectonly: bool = False): # noqa: D107
@staticmethod
def _escape_id(pattern: str) -> str:
"""
- Escape regex char in the pattern; prepend and append '.*' (for `fill` IDs).
+ Escape regex char in the pattern; prepend and append '.*' (for `fill`
+ IDs).
- The `pattern` is prefixed and suffixed with a wildcard match to allow `fill`
- test case IDs to be specified, otherwise the full `consume` test ID must be
- specified.
+ The `pattern` is prefixed and suffixed with a wildcard match to allow
+ `fill` test case IDs to be specified, otherwise the full `consume` test
+ ID must be specified.
"""
return f".*{re.escape(pattern)}.*"
@classmethod
def from_string(cls, pattern: str) -> "SimLimitBehavior":
"""
- Parse the `--sim.limit` argument and return a `SimLimitBehavior` instance.
+ Parse the `--sim.limit` argument and return a `SimLimitBehavior`
+ instance.
If `pattern`:
- - Is "collectonly", enable collection mode without filtering.
- - Starts with "collectonly:", enable collection mode and use the rest as a regex pattern.
- - Starts with "id:", treat the rest as a literal test ID and escape special regex chars.
- - Starts with "collectonly:id:", enable collection mode with a literal test ID.
+ - Is "collectonly", enable collection mode without filtering.
+ - Starts with "collectonly:", enable collection mode and use the
+ rest as a regex pattern.
+ - Starts with "id:", treat the rest as a literal test ID and escape
+ special regex chars.
+ - Starts with "collectonly:id:", enable collection mode with a
+ literal test ID.
"""
if pattern == "collectonly":
return cls(pattern=".*", collectonly=True)
@@ -357,22 +371,23 @@ def pytest_configure(config): # noqa: D103
test collection begins.
`@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
- called before the pytest-html plugin's pytest_configure to ensure that
- it uses the modified `htmlpath` option.
+ called before the pytest-html plugin's pytest_configure to ensure that it
+ uses the modified `htmlpath` option.
"""
# Validate --extract-to usage
if config.option.extract_to_folder is not None and "cache" not in sys.argv:
pytest.exit("The --extract-to flag is only valid with the 'cache' command.")
if config.option.fixtures_source is None:
- # NOTE: Setting the default value here is necessary for correct stdin/piping behavior.
+ # NOTE: Setting the default value here is necessary for correct
+ # stdin/piping behavior.
config.fixtures_source = FixturesSource(
input_option=default_input(), path=Path(default_input())
)
else:
- # NOTE: Setting `type=FixturesSource.from_input` in pytest_addoption() causes the option to
- # be evaluated twice which breaks the result of `was_cached`; the work-around is to call it
- # manually here.
+ # NOTE: Setting `type=FixturesSource.from_input` in pytest_addoption()
+ # causes the option to be evaluated twice which breaks the result of
+ # `was_cached`; the work-around is to call it manually here.
config.fixtures_source = FixturesSource.from_input(
config.option.fixtures_source,
Path(config.option.fixture_cache_folder),
@@ -428,7 +443,8 @@ def pytest_configure(config): # noqa: D103
all_forks = { # type: ignore
fork for fork in set(get_forks()) | get_transition_forks() if not fork.ignore()
}
- # Append all forks within the index file (compatibility with `ethereum/tests`)
+ # Append all forks within the index file (compatibility with
+ # `ethereum/tests`)
all_forks.update(getattr(index, "forks", []))
for fork in all_forks:
config.addinivalue_line("markers", f"{fork}: Tests for the {fork} fork")
@@ -483,7 +499,8 @@ def fixtures_source(request) -> FixturesSource: # noqa: D103
def pytest_generate_tests(metafunc):
"""
Generate test cases for every test fixture in all the JSON fixture files
- within the specified fixtures directory, or read from stdin if the directory is 'stdin'.
+ within the specified fixtures directory, or read from stdin if the
+ directory is 'stdin'.
"""
if "cache" in sys.argv:
return
diff --git a/src/pytest_plugins/consume/direct/conftest.py b/src/pytest_plugins/consume/direct/conftest.py
index e584e013471..db9208a910a 100644
--- a/src/pytest_plugins/consume/direct/conftest.py
+++ b/src/pytest_plugins/consume/direct/conftest.py
@@ -1,6 +1,6 @@
"""
-A pytest plugin that configures the consume command to act as a test runner
-for "direct" client fixture consumer interfaces.
+A pytest plugin that configures the consume command to act as a test runner for
+"direct" client fixture consumer interfaces.
For example, via go-ethereum's `evm blocktest` or `evm statetest` commands.
"""
@@ -120,7 +120,8 @@ def fixture_path(test_case: TestCaseIndexFile | TestCaseStream, fixtures_source:
"""
Path to the current JSON fixture file.
- If the fixture source is stdin, the fixture is written to a temporary json file.
+ If the fixture source is stdin, the fixture is written to a temporary json
+ file.
"""
if fixtures_source.is_stdin:
assert isinstance(test_case, TestCaseStream)
diff --git a/src/pytest_plugins/consume/direct/test_via_direct.py b/src/pytest_plugins/consume/direct/test_via_direct.py
index f1f95081444..e1f014d0198 100644
--- a/src/pytest_plugins/consume/direct/test_via_direct.py
+++ b/src/pytest_plugins/consume/direct/test_via_direct.py
@@ -1,6 +1,6 @@
"""
-Executes a JSON test fixture directly against a client using a dedicated
-client interface similar to geth's EVM 'blocktest' command.
+Executes a JSON test fixture directly against a client using a dedicated client
+interface similar to geth's EVM 'blocktest' command.
"""
from pathlib import Path
@@ -16,8 +16,8 @@ def test_fixture(
test_dump_dir: Path | None,
):
"""
- Generic test function used to call the fixture consumer with a given fixture file path and
- a fixture name (for a single test run).
+ Generic test function used to call the fixture consumer with a given
+ fixture file path and a fixture name (for a single test run).
"""
fixture_consumer.consume_fixture(
test_case.format,
diff --git a/src/pytest_plugins/consume/releases.py b/src/pytest_plugins/consume/releases.py
index 2e694a7eec5..26ecc67b17f 100644
--- a/src/pytest_plugins/consume/releases.py
+++ b/src/pytest_plugins/consume/releases.py
@@ -48,7 +48,8 @@ def from_string(cls, release_string: str) -> "ReleaseTag":
"""
Create a release descriptor from a string.
- The release source can be in the format `tag_name@version` or just `tag_name`.
+ The release source can be in the format `tag_name@version` or just
+ `tag_name`.
"""
version: str | None
if "@" in release_string:
@@ -69,7 +70,8 @@ def __eq__(self, value) -> bool:
"""
Check if the release descriptor matches the string value.
- Returns True if the value is the same as the tag name or the tag name and version.
+ Returns True if the value is the same as the tag name or the tag name
+ and version.
"""
assert isinstance(value, str), f"Expected a string, but got: {value}"
if self.version is not None:
@@ -141,7 +143,9 @@ class Releases(RootModel[List[ReleaseInformation]]):
def is_docker_or_ci() -> bool:
- """Check if the code is running inside a Docker container or a CI environment."""
+ """
+ Check if the code is running inside a Docker container or a CI environment.
+ """
return "GITHUB_ACTIONS" in os.environ or Path("/.dockerenv").exists()
@@ -221,14 +225,16 @@ def get_release_page_url(release_string: str) -> str:
Return the GitHub Release page URL for a specific release descriptor.
This function can handle:
- - A standard release string (e.g., "eip7692@latest") - from execution-spec-tests only.
+ - A standard release string (e.g., "eip7692@latest") from
+ execution-spec-tests only.
- A direct asset download link (e.g.,
- "https://github.com/ethereum/execution-spec-tests/releases/download/v4.0.0/fixtures_eip7692.tar.gz").
+ "https://github.com/ethereum/execution-spec-tests/releases/
+ download/v4.0.0/fixtures_eip7692.tar.gz").
"""
release_information = get_release_information()
- # Case 1: If it's a direct GitHub Releases download link,
- # find which release in `release_information` has an asset with this exact URL.
+ # Case 1: If it's a direct GitHub Releases download link, find which
+ # release in `release_information` has an asset with this exact URL.
repo_pattern = "|".join(re.escape(repo) for repo in SUPPORTED_REPOS)
regex_pattern = rf"https://github\.com/({repo_pattern})/releases/download/"
if re.match(regex_pattern, release_string):
@@ -238,7 +244,8 @@ def get_release_page_url(release_string: str) -> str:
return release.url # The HTML page for this release
raise NoSuchReleaseError(f"No release found for asset URL: {release_string}")
- # Case 2: Otherwise, treat it as a release descriptor (e.g., "eip7692@latest")
+ # Case 2: Otherwise, treat it as a release descriptor (e.g.,
+ # "eip7692@latest")
release_descriptor = ReleaseTag.from_string(release_string)
for release in release_information:
if release_descriptor in release:
@@ -252,9 +259,10 @@ def get_release_information() -> List[ReleaseInformation]:
"""
Get the release information.
- First check if the cached release information file exists. If it does, but it is older than 4
- hours, delete the file, unless running inside a CI environment or a Docker container.
- Then download the release information from the Github API and save it to the cache file.
+ First check if the cached release information file exists. If it does, but
+ it is older than 4 hours, delete the file, unless running inside a CI
+ environment or a Docker container. Then download the release information
+ from the Github API and save it to the cache file.
"""
if CACHED_RELEASE_INFORMATION_FILE.exists():
last_modified = CACHED_RELEASE_INFORMATION_FILE.stat().st_mtime
diff --git a/src/pytest_plugins/consume/simulators/base.py b/src/pytest_plugins/consume/simulators/base.py
index 0b62ebb5873..22736c97eba 100644
--- a/src/pytest_plugins/consume/simulators/base.py
+++ b/src/pytest_plugins/consume/simulators/base.py
@@ -45,7 +45,9 @@ def __init__(self) -> None:
self._fixtures: Dict[Path, Fixtures] = {}
def __getitem__(self, key: Path) -> Fixtures:
- """Return the fixtures from the index file, if not found, load from disk."""
+ """
+ Return the fixtures from the index file, if not found, load from disk.
+ """
assert key.is_file(), f"Expected a file path, got '{key}'"
if key not in self._fixtures:
self._fixtures[key] = Fixtures.model_validate_json(key.read_text())
@@ -54,7 +56,10 @@ def __getitem__(self, key: Path) -> Fixtures:
@pytest.fixture(scope="session")
def fixture_file_loader() -> Dict[Path, Fixtures]:
- """Return a singleton dictionary that caches loaded fixture files used in all tests."""
+ """
+ Return a singleton dictionary that caches loaded fixture files used in all
+ tests.
+ """
return FixturesDict()
@@ -65,12 +70,12 @@ def fixture(
test_case: TestCaseIndexFile | TestCaseStream,
) -> BaseFixture:
"""
- Load the fixture from a file or from stream in any of the supported
- fixture formats.
+ Load the fixture from a file or from stream in any of the supported fixture
+ formats.
- The fixture is either already available within the test case (if consume
- is taking input on stdin) or loaded from the fixture json file if taking
- input from disk (fixture directory with index file).
+ The fixture is either already available within the test case (if consume is
+ taking input on stdin) or loaded from the fixture json file if taking input
+ from disk (fixture directory with index file).
"""
fixture: BaseFixture
if fixtures_source.is_stdin:
diff --git a/src/pytest_plugins/consume/simulators/exceptions.py b/src/pytest_plugins/consume/simulators/exceptions.py
index 0e8d4d63a78..8db60438e01 100644
--- a/src/pytest_plugins/consume/simulators/exceptions.py
+++ b/src/pytest_plugins/consume/simulators/exceptions.py
@@ -54,7 +54,10 @@ def client_exception_mapper(
@pytest.fixture(scope="session")
def disable_strict_exception_matching(request: pytest.FixtureRequest) -> List[str]:
- """Return the list of clients or forks that should NOT use strict exception matching."""
+ """
+ Return the list of clients or forks that should NOT use strict exception
+ matching.
+ """
config_string = request.config.getoption("disable_strict_exception_matching")
return config_string.split(",") if config_string else []
@@ -76,7 +79,8 @@ def fork_strict_exception_matching(
disable_strict_exception_matching: List[str],
) -> bool:
"""Return True if the fork should use strict exception matching."""
- # NOTE: `in` makes it easier for transition forks ("Prague" in "CancunToPragueAtTime15k")
+ # NOTE: `in` makes it easier for transition forks ("Prague" in
+ # "CancunToPragueAtTime15k")
return not any(
s.lower() in str(fixture.fork).lower() for s in disable_strict_exception_matching
)
diff --git a/src/pytest_plugins/consume/simulators/helpers/exceptions.py b/src/pytest_plugins/consume/simulators/helpers/exceptions.py
index 6307fb095b6..f6a7e7e2bc4 100644
--- a/src/pytest_plugins/consume/simulators/helpers/exceptions.py
+++ b/src/pytest_plugins/consume/simulators/helpers/exceptions.py
@@ -16,10 +16,16 @@
class GenesisBlockMismatchExceptionError(Exception):
- """Definers a mismatch exception between the client and fixture genesis blockhash."""
+ """
+ Definers a mismatch exception between the client and fixture genesis
+ blockhash.
+ """
def __init__(self, *, expected_header: FixtureHeader, got_genesis_block: Dict[str, str]):
- """Initialize the exception with the expected and received genesis block headers."""
+ """
+ Initialize the exception with the expected and received genesis block
+ headers.
+ """
message = (
"Genesis block hash mismatch.\n\n"
f"Expected: {expected_header.block_hash}\n"
@@ -47,7 +53,9 @@ def __init__(self, *, expected_header: FixtureHeader, got_genesis_block: Dict[st
@staticmethod
def compare_models(expected: FixtureHeader, got: FixtureHeader) -> Tuple[Dict, List]:
- """Compare two FixtureHeader model instances and return their differences."""
+ """
+ Compare two FixtureHeader model instances and return their differences.
+ """
differences = {}
unexpected_fields = []
for (exp_name, exp_value), (got_name, got_value) in zip(expected, got, strict=False):
diff --git a/src/pytest_plugins/consume/simulators/helpers/ruleset.py b/src/pytest_plugins/consume/simulators/helpers/ruleset.py
index c1b7195b290..2098dc6c938 100644
--- a/src/pytest_plugins/consume/simulators/helpers/ruleset.py
+++ b/src/pytest_plugins/consume/simulators/helpers/ruleset.py
@@ -43,10 +43,12 @@ def get_blob_schedule_entries(fork: Fork) -> Dict[str, int]:
"""
Generate blob schedule entries for each fork (and respective parent forks).
- Adds the following entries to the ruleset for the given fork (and parent forks):
- HIVE_{FORK}_BLOB_TARGET: target_blobs_per_block()
- HIVE_{FORK}_BLOB_MAX: max_blobs_per_block()
- HIVE_{FORK}_BLOB_BASE_FEE_UPDATE_FRACTION: blob_base_fee_update_fraction()
+ Adds the following entries to the ruleset for the given fork (and parent
+ forks):
+ HIVE_{FORK}_BLOB_TARGET: target_blobs_per_block()
+ HIVE_{FORK}_BLOB_MAX: max_blobs_per_block()
+ HIVE_{FORK}_BLOB_BASE_FEE_UPDATE_FRACTION: blob_base_fee_update_
+ fraction()
"""
entries: Dict = {}
forks_with_blobs: List[Fork] = []
diff --git a/src/pytest_plugins/consume/simulators/helpers/timing.py b/src/pytest_plugins/consume/simulators/helpers/timing.py
index eddfe3f7804..e5cadee9578 100644
--- a/src/pytest_plugins/consume/simulators/helpers/timing.py
+++ b/src/pytest_plugins/consume/simulators/helpers/timing.py
@@ -5,7 +5,9 @@
class TimingData:
- """The times taken to perform the various steps of a test case (seconds)."""
+ """
+ The times taken to perform the various steps of a test case (seconds).
+ """
name: str
start_time: float | None
diff --git a/src/pytest_plugins/consume/simulators/rlp/conftest.py b/src/pytest_plugins/consume/simulators/rlp/conftest.py
index f2d8f93a343..66ad7dd62a7 100644
--- a/src/pytest_plugins/consume/simulators/rlp/conftest.py
+++ b/src/pytest_plugins/consume/simulators/rlp/conftest.py
@@ -46,7 +46,10 @@ def blocks_rlp(fixture: BlockchainFixture) -> List[Bytes]:
@pytest.fixture(scope="function")
def buffered_blocks_rlp(blocks_rlp: List[bytes]) -> List[io.BufferedReader]:
- """Convert the RLP-encoded blocks of the current test fixture to buffered readers."""
+ """
+ Convert the RLP-encoded blocks of the current test fixture to buffered
+ readers.
+ """
block_rlp_files = []
for _, block_rlp in enumerate(blocks_rlp):
block_rlp_stream = io.BytesIO(block_rlp)
diff --git a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py
index 1d54d0586c8..1ef5a774dea 100644
--- a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py
+++ b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py
@@ -1,8 +1,10 @@
"""
-A hive based simulator that executes blocks against clients using the `engine_newPayloadVX` method
-from the Engine API. The simulator uses the `BlockchainEngineFixtures` to test against clients.
+A hive based simulator that executes blocks against clients using the
+`engine_newPayloadVX` method from the Engine API. The simulator uses the
+`BlockchainEngineFixtures` to test against clients.
-Each `engine_newPayloadVX` is verified against the appropriate VALID/INVALID responses.
+Each `engine_newPayloadVX` is verified against the appropriate VALID/INVALID
+responses.
"""
import time
@@ -39,10 +41,12 @@ def test_blockchain_via_engine(
strict_exception_matching: bool,
):
"""
- 1. Check the client genesis block hash matches `fixture.genesis.block_hash`.
- 2. Execute the test case fixture blocks against the client under test using the
- `engine_newPayloadVX` method from the Engine API.
- 3. For valid payloads a forkchoice update is performed to finalize the chain.
+ 1. Check the client genesis block hash matches
+ `fixture.genesis.block_hash`.
+ 2. Execute the test case fixture blocks against the client under test using
+ the `engine_newPayloadVX` method from the Engine API.
+ 3. For valid payloads a forkchoice update is performed to finalize the
+ chain.
"""
# Send a initial forkchoice update
with timing_data.time("Initial forkchoice update"):
diff --git a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py
index 4c63eba5b78..e894947a305 100644
--- a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py
+++ b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py
@@ -1,8 +1,9 @@
"""
-A hive based simulator that executes RLP-encoded blocks against clients. The simulator uses the
-`BlockchainFixtures` to test this against clients.
+A hive based simulator that executes RLP-encoded blocks against clients. The
+simulator uses the `BlockchainFixtures` to test this against clients.
-Clients consume the genesis and RLP-encoded blocks from input files upon start-up.
+Clients consume the genesis and RLP-encoded blocks from input files upon
+start-up.
"""
import logging
@@ -23,7 +24,8 @@ def test_via_rlp(
fixture: BlockchainFixture,
):
"""
- 1. Check the client genesis block hash matches `fixture.genesis.block_hash`.
+ 1. Check the client genesis block hash matches
+ `fixture.genesis.block_hash`.
2. Check the client last block hash matches `fixture.last_block_hash`.
"""
with timing_data.time("Get genesis block"):
diff --git a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py
index 5ef252776af..2781b7540f9 100644
--- a/src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py
+++ b/src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py
@@ -1,10 +1,12 @@
"""
-A hive based simulator that executes blocks against clients using the `engine_newPayloadV*` method
-from the Engine API with sync testing. The simulator uses the `BlockchainEngineSyncFixtures` to
-test against clients with client synchronization.
+A hive based simulator that executes blocks against clients using the
+`engine_newPayloadV*` method from the Engine API with sync testing. The
+simulator uses the `BlockchainEngineSyncFixtures` to test against clients with
+client synchronization.
This simulator:
-1. Spins up two clients: one as the client under test and another as the sync client
+1. Spins up two clients: one as the client under test and another as the sync
+ client
2. Executes payloads on the client under test
3. Has the sync client synchronize from the client under test
4. Verifies that the sync was successful
@@ -328,8 +330,8 @@ def test_blockchain_via_sync(
f"{forkchoice_response}"
)
- # Add peer using admin_addPeer
- # This seems to be required... TODO: we can maybe improve flow here if not required
+ # Add peer using admin_addPeer This seems to be required... TODO: we can
+ # maybe improve flow here if not required
logger.info(f"Adding peer: {client_enode_url}")
assert sync_admin_rpc is not None, "sync_admin_rpc is required"
try:
@@ -338,7 +340,8 @@ def test_blockchain_via_sync(
except Exception as e:
raise LoggedError(f"admin_addPeer failed: {e}") from e
- time.sleep(1) # quick sleep to allow for connection - TODO: is this necessary?
+ # quick sleep to allow for connection - TODO: is this necessary?
+ time.sleep(1)
try:
sync_peer_count = sync_net_rpc.peer_count()
@@ -352,7 +355,8 @@ def test_blockchain_via_sync(
except Exception as e:
logger.warning(f"Could not verify peer connection: {e}")
- # Trigger sync by sending the target block via newPayload followed by forkchoice update
+ # Trigger sync by sending the target block via newPayload followed by
+ # forkchoice update
logger.info(f"Triggering sync to block {last_valid_block_hash}")
# Find the last valid payload to send to sync client
@@ -374,7 +378,8 @@ def test_blockchain_via_sync(
)
try:
- version = last_valid_payload.new_payload_version # log version used for debugging
+ # log version used for debugging
+ version = last_valid_payload.new_payload_version
logger.info(f"Sending target payload via engine_newPayloadV{version}")
# send the payload to sync client
@@ -403,9 +408,9 @@ def test_blockchain_via_sync(
# Give a moment for P2P connections to establish after sync starts
time.sleep(1)
- # Check peer count after triggering sync
- # Note: Reth does not actually raise the peer count but doesn't seem
- # to need this to sync.
+ # Check peer count after triggering sync Note: Reth does not
+ # actually raise the peer count but doesn't seem to need this to
+ # sync.
try:
assert sync_net_rpc is not None, "sync_net_rpc is required"
client_peer_count = net_rpc.peer_count()
@@ -444,7 +449,8 @@ def test_blockchain_via_sync(
# Send periodic forkchoice updates to keep sync alive
if time.time() - last_forkchoice_time >= forkchoice_interval:
try:
- # Send forkchoice update to sync client to trigger/maintain sync
+ # Send forkchoice update to sync client to trigger/maintain
+ # sync
assert sync_engine_rpc is not None, "sync_engine_rpc is required"
sync_fc_response = sync_engine_rpc.forkchoice_updated(
forkchoice_state=last_valid_block_forkchoice_state,
diff --git a/src/pytest_plugins/consume/simulators/single_test_client.py b/src/pytest_plugins/consume/simulators/single_test_client.py
index b6db7674c95..0ea5da90acb 100644
--- a/src/pytest_plugins/consume/simulators/single_test_client.py
+++ b/src/pytest_plugins/consume/simulators/single_test_client.py
@@ -1,4 +1,6 @@
-"""Common pytest fixtures for simulators with single-test client architecture."""
+"""
+Common pytest fixtures for simulators with single-test client architecture.
+"""
import io
import json
@@ -23,7 +25,10 @@
@pytest.fixture(scope="function")
def client_genesis(fixture: BlockchainFixtureCommon) -> dict:
- """Convert the fixture genesis block header and pre-state to a client genesis state."""
+ """
+ Convert the fixture genesis block header and pre-state to a client genesis
+ state.
+ """
genesis = to_json(fixture.genesis)
alloc = to_json(fixture.pre)
# NOTE: nethermind requires account keys without '0x' prefix
@@ -51,7 +56,10 @@ def environment(
@pytest.fixture(scope="function")
def buffered_genesis(client_genesis: dict) -> io.BufferedReader:
- """Create a buffered reader for the genesis block header of the current test fixture."""
+ """
+ Create a buffered reader for the genesis block header of the current test
+ fixture.
+ """
genesis_json = json.dumps(client_genesis)
genesis_bytes = genesis_json.encode("utf-8")
return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes)))
@@ -71,7 +79,9 @@ def client(
client_type: ClientType,
total_timing_data: TimingData,
) -> Generator[Client, None, None]:
- """Initialize the client with the appropriate files and environment variables."""
+ """
+ Initialize the client with the appropriate files and environment variables.
+ """
logger.info(f"Starting client ({client_type.name})...")
logger.debug(f"Main client Network ID: {environment.get('HIVE_NETWORK_ID', 'NOT SET!')}")
logger.debug(f"Main client Chain ID: {environment.get('HIVE_CHAIN_ID', 'NOT SET!')}")
diff --git a/src/pytest_plugins/consume/simulators/sync/conftest.py b/src/pytest_plugins/consume/simulators/sync/conftest.py
index bf9a1323792..9447ea47287 100644
--- a/src/pytest_plugins/consume/simulators/sync/conftest.py
+++ b/src/pytest_plugins/consume/simulators/sync/conftest.py
@@ -60,7 +60,8 @@ def pytest_collection_modifyitems(session, config, items):
# Format: ``-{client}_sync_{sync_client}``
new_suffix = f"-{client_name}::sync_{sync_client_name}"
- # client_param-tests/path/to/test.py::test_name[test_params]-sync_client_param
+ # client_param-
+ # tests/path/to/test.py::test_name[test_params]-sync_client_param
# 1. Remove the client prefix from the beginning
# 2. Replace the -client_param part at the end with our new format
nodeid = item.nodeid
@@ -119,7 +120,10 @@ def admin_rpc(client: Client) -> AdminRPC:
@pytest.fixture(scope="function")
def sync_genesis(fixture: BlockchainEngineSyncFixture) -> dict:
- """Convert the fixture genesis block header and pre-state to a sync client genesis state."""
+ """
+ Convert the fixture genesis block header and pre-state to a sync client
+ genesis state.
+ """
genesis = to_json(fixture.genesis)
alloc = to_json(fixture.pre)
# NOTE: nethermind requires account keys without '0x' prefix
@@ -129,7 +133,9 @@ def sync_genesis(fixture: BlockchainEngineSyncFixture) -> dict:
@pytest.fixture(scope="function")
def sync_buffered_genesis(sync_genesis: dict) -> io.BufferedReader:
- """Create a buffered reader for the genesis block header of the sync client."""
+ """
+ Create a buffered reader for the genesis block header of the sync client.
+ """
genesis_json = json.dumps(sync_genesis)
genesis_bytes = genesis_json.encode("utf-8")
return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes)))
diff --git a/src/pytest_plugins/consume/simulators/test_case_description.py b/src/pytest_plugins/consume/simulators/test_case_description.py
index 3a97c63f198..8989bd05c2f 100644
--- a/src/pytest_plugins/consume/simulators/test_case_description.py
+++ b/src/pytest_plugins/consume/simulators/test_case_description.py
@@ -1,4 +1,7 @@
-"""Pytest fixtures that help create the test case "Description" displayed in the Hive UI."""
+"""
+Pytest fixtures that help create the test case "Description" displayed in the
+Hive UI.
+"""
import logging
import textwrap
@@ -30,7 +33,10 @@ def hive_clients_yaml_generator_command(
hive_clients_yaml_target_filename: str,
hive_info: HiveInfo,
) -> str:
- """Generate a shell command that creates a clients YAML file for the current client."""
+ """
+ Generate a shell command that creates a clients YAML file for the current
+ client.
+ """
try:
if not client_file:
raise ValueError("No client information available - try updating hive")
@@ -65,10 +71,13 @@ def filtered_hive_options(hive_info: HiveInfo) -> List[str]:
logger.info("Hive info: %s", hive_info.command)
unwanted_options = [
- "--client", # gets overwritten: we specify a single client; the one from the test case
+ "--client", # gets overwritten: we specify a single client; the one
+ # from the test case
"--client-file", # gets overwritten: we'll write our own client file
- "--results-root", # use default value instead (or you have to pass it to ./hiveview)
- "--sim.limit", # gets overwritten: we only run the current test case id
+ "--results-root", # use default value instead (or you have to pass it
+ # to ./hiveview)
+ "--sim.limit", # gets overwritten: we only run the current test case
+ # id
"--sim.parallelism", # skip; we'll only be running a single test
]
@@ -118,7 +127,10 @@ def hive_dev_command(
client_type: ClientType,
hive_client_config_file_parameter: str,
) -> str:
- """Return the command used to instantiate hive alongside the `consume` command."""
+ """
+ Return the command used to instantiate hive alongside the `consume`
+ command.
+ """
return f"./hive --dev {hive_client_config_file_parameter} --client {client_type.name}"
@@ -151,7 +163,8 @@ def test_case_description(
if "description" not in fixture.info or fixture.info["description"] is None:
test_docstring = "No documentation available."
else:
- # this prefix was included in the fixture description field for fixtures <= v4.3.0
+ # this prefix was included in the fixture description field for
+ # fixtures <= v4.3.0
test_docstring = fixture.info["description"].replace("Test function documentation:\n", "") # type: ignore
description = textwrap.dedent(f"""
@@ -162,15 +175,18 @@ def test_case_description(
{test_docstring}
Run This Test Locally:
- To run this test in hive:
+ To run this test in
+ hive
+ :
{hive_clients_yaml_generator_command}
{hive_consume_command}
- Advanced: Run the test against a hive developer backend using EEST's consume
command
+ Advanced: Run the test against a hive developer backend using
+ EEST's consume
command
Create the client YAML file, as above, then:
1. Start hive in dev mode: {hive_dev_command}
2. In the EEST repository root: {eest_consume_command}
- """) # noqa: E501
+ """)
description = description.strip()
description = description.replace("\n", "
")
diff --git a/src/pytest_plugins/consume/tests/test_consume_args.py b/src/pytest_plugins/consume/tests/test_consume_args.py
index 110566fd808..58646879261 100644
--- a/src/pytest_plugins/consume/tests/test_consume_args.py
+++ b/src/pytest_plugins/consume/tests/test_consume_args.py
@@ -22,7 +22,10 @@ def test_function(state_test, pre):
@pytest.fixture
def minimal_test_path(pytester: pytest.Pytester) -> Path:
- """Minimal test file that's written to a file using pytester and ready to fill."""
+ """
+ Minimal test file that's written to a file using pytester and ready to
+ fill.
+ """
tests_dir = pytester.mkdir("tests")
test_file = tests_dir / MINIMAL_TEST_FILE_NAME
test_file.write_text(MINIMAL_TEST_CONTENTS)
@@ -72,11 +75,12 @@ def fill_tests(
"""
Run fill to generate test fixtures for use with testing consume.
- We only need to do this once so ideally the scope of this fixture should be "module",
- however the `pytester` fixture's scope is function and cannot be accessed from a higher
- scope fixture.
+ We only need to do this once so ideally the scope of this fixture should be
+ "module", however the `pytester` fixture's scope is function and cannot be
+ accessed from a higher scope fixture.
- Instead we use a file lock and only write the fixtures once to the directory.
+ Instead we use a file lock and only write the fixtures once to the
+ directory.
"""
with FileLock(fixtures_dir.with_suffix(".lock")):
meta_folder = fixtures_dir / ".meta"
@@ -100,9 +104,11 @@ def fill_tests(
@pytest.fixture(autouse=True, scope="function")
def test_fixtures(pytester: Pytester, fixtures_dir: Path, fill_tests: None) -> List[Path]:
"""
- Copy test fixtures from the regular temp path to the pytester temporary dir.
+ Copy test fixtures from the regular temp path to the pytester temporary
+ dir.
- We intentionally copy the `.meta/index.json` file to test its compatibility with consume.
+ We intentionally copy the `.meta/index.json` file to test its compatibility
+ with consume.
"""
del fill_tests
@@ -131,7 +137,11 @@ def copy_consume_test_paths(pytester: Pytester):
shutil.move("conftest.py", target_dir / "conftest.py")
-single_test_id = f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Shanghai-state_test]]" # noqa: E501
+single_test_id = (
+ "src/pytest_plugins/consume/direct/"
+ "test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/"
+ f"{MINIMAL_TEST_FILE_NAME}::test_function[fork_Shanghai-state_test]]"
+)
@pytest.mark.parametrize(
diff --git a/src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py b/src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py
index 9eb205c73f0..e18cc2b7312 100644
--- a/src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py
+++ b/src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py
@@ -10,7 +10,9 @@ class TestSimplifiedConsumeBehavior:
"""Test suite for the simplified consume behavior."""
def test_fixtures_source_from_release_url_no_api_calls(self):
- """Test that direct release URLs do not make API calls for release page."""
+ """
+ Test that direct release URLs do not make API calls for release page.
+ """
test_url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_develop.tar.gz"
with patch("pytest_plugins.consume.consume.FixtureDownloader") as mock_downloader:
@@ -26,7 +28,9 @@ def test_fixtures_source_from_release_url_no_api_calls(self):
assert source.input_option == test_url
def test_fixtures_source_from_release_spec_makes_api_calls(self):
- """Test that release specs still make API calls and get release page."""
+ """
+ Test that release specs still make API calls and get release page.
+ """
test_spec = "stable@latest"
with patch("pytest_plugins.consume.consume.get_release_url") as mock_get_url:
@@ -68,7 +72,9 @@ def test_fixtures_source_from_regular_url_no_release_page(self):
assert source.url == test_url
def test_output_formatting_without_release_page_for_direct_urls(self):
- """Test output formatting when release page is empty for direct URLs."""
+ """
+ Test output formatting when release page is empty for direct URLs.
+ """
from unittest.mock import MagicMock
from pytest import Config
@@ -97,7 +103,9 @@ def test_output_formatting_without_release_page_for_direct_urls(self):
assert "Input:" in reason
def test_output_formatting_with_release_page_for_specs(self):
- """Test output formatting when release page is present for release specs."""
+ """
+ Test output formatting when release page is present for release specs.
+ """
from unittest.mock import MagicMock
from pytest import Config
diff --git a/src/pytest_plugins/custom_logging/__init__.py b/src/pytest_plugins/custom_logging/__init__.py
index a03e1b28ad7..9e71d615bae 100644
--- a/src/pytest_plugins/custom_logging/__init__.py
+++ b/src/pytest_plugins/custom_logging/__init__.py
@@ -1,4 +1,7 @@
-"""Import the logging module content to make it available from pytest_plugins.logging."""
+"""
+Import the logging module content to make it available from
+pytest_plugins.logging.
+"""
from .plugin_logging import (
FAIL_LEVEL,
diff --git a/src/pytest_plugins/custom_logging/plugin_logging.py b/src/pytest_plugins/custom_logging/plugin_logging.py
index dfe2cf7d1ae..3387992c8a9 100644
--- a/src/pytest_plugins/custom_logging/plugin_logging.py
+++ b/src/pytest_plugins/custom_logging/plugin_logging.py
@@ -1,15 +1,17 @@
"""
A Pytest plugin to configure logging for pytest sessions.
-Note: While pytest's builtin logging is generally amazing, it does not write timestamps
-when log output is written to pytest's caplog (the captured output for a test). And having
-timestamps in this output is the main use case for adding logging to our plugins.
-This output gets shown in the `FAILURES` summary section, which is shown as the
-"simulator log" in hive simulations. For this use case, timestamps are essential to verify
-timing issues against the clients log.
+Note: While pytest's builtin logging is generally amazing, it does not write
+timestamps when log output is written to pytest's caplog (the captured output
+for a test). And having timestamps in this output is the main use case for
+adding logging to our plugins. This output gets shown in the `FAILURES` summary
+section, which is shown as the "simulator log" in hive simulations. For this
+use case, timestamps are essential to verify timing issues against the clients
+log.
This module provides both:
-1. A standalone logging configuration system that can be used in any Python project
+1. A standalone logging configuration system that can be used in any
+ Python project
2. A pytest plugin that automatically configures logging for pytest sessions
"""
@@ -71,8 +73,8 @@ def fail(
"""
Log a message with FAIL level severity (35).
- This level is between WARNING (30) and ERROR (40), intended for test failures
- and similar issues.
+ This level is between WARNING (30) and ERROR (40), intended for test
+ failures and similar issues.
"""
if stacklevel is None:
stacklevel = 1
@@ -94,15 +96,22 @@ def get_logger(name: str) -> EESTLogger:
class UTCFormatter(logging.Formatter):
- """Log formatter that formats UTC timestamps with milliseconds and +00:00 suffix."""
+ """
+ Log formatter that formats UTC timestamps with milliseconds and +00:00
+ suffix.
+ """
- def formatTime(self, record, datefmt=None): # noqa: D102,N802 # camelcase required
+ def formatTime(self, record, datefmt=None): # noqa: D102,N802
+ # camelcase required
dt = datetime.fromtimestamp(record.created, tz=timezone.utc)
return dt.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + "+00:00"
class ColorFormatter(UTCFormatter):
- """Formatter that adds ANSI color codes to log level names for terminal output."""
+ """
+ Formatter that adds ANSI color codes to log level names for terminal
+ output.
+ """
running_in_docker: ClassVar[bool] = Path("/.dockerenv").exists()
@@ -150,9 +159,9 @@ def from_cli(cls, value: str) -> int:
raise ValueError(f"Invalid log level '{value}'. Expected one of: {valid} or a number.")
-# ==============================================================================
+# =========================================================================
# Standalone logging configuration (usable without pytest)
-# ==============================================================================
+# =========================================================================
def configure_logging(
@@ -169,14 +178,13 @@ def configure_logging(
same settings as the pytest plugin.
Args:
- log_level: The logging level to use (name or numeric value)
- log_file: Path to the log file (if None, no file logging is set up)
- log_to_stdout: Whether to log to stdout
- log_format: The log format string
- use_color: Whether to use colors in stdout output (auto-detected if None)
+ log_level: The logging level to use (name or numeric value)
+ log_file: Path to the log file (if None, no file logging is set up)
+ log_to_stdout: Whether to log to stdout
+ log_format: The log format string
+ use_color: Whether to use colors in stdout output (auto-detected if None)
- Returns:
- The file handler if log_file is provided, otherwise None
+ Returns: The file handler if log_file is provided, otherwise None
"""
# Initialize root logger
@@ -222,9 +230,9 @@ def configure_logging(
return file_handler_instance
-# ==============================================================================
+# ==========================================================================
# Pytest plugin integration
-# ==============================================================================
+# ==========================================================================
def pytest_addoption(parser): # noqa: D103
@@ -232,7 +240,8 @@ def pytest_addoption(parser): # noqa: D103
"logging", "Arguments related to logging from test fixtures and tests."
)
logging_group.addoption(
- "--eest-log-level", # --log-level is defined by pytest's built-in logging
+ "--eest-log-level", # --log-level is defined by pytest's built-in
+ # logging
"--eestloglevel",
action="store",
default="INFO",
@@ -274,9 +283,9 @@ def pytest_configure(config: pytest.Config) -> None:
"""
Initialize logging for pytest sessions.
- This goes to a lot of effort to ensure that a log file is created per worker
- if xdist is used and that the timestamp used in the filename is the same across
- main and all workers.
+ This goes to a lot of effort to ensure that a log file is created per
+ worker if xdist is used and that the timestamp used in the filename is the
+ same across main and all workers.
"""
global file_handler
@@ -312,10 +321,11 @@ def pytest_report_header(config: pytest.Config) -> list[str]:
return []
-def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int) -> None:
- """Display the log file path in the terminal summary like the HTML report does."""
- del exitstatus
-
+def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
+ """
+ Display the log file path in the terminal summary like the HTML report
+ does.
+ """
if terminalreporter.config.option.collectonly:
return
if eest_log_file_path := terminalreporter.config.option.eest_log_file_path:
diff --git a/src/pytest_plugins/execute/__init__.py b/src/pytest_plugins/execute/__init__.py
index 08e96f51787..71d37fb87ee 100644
--- a/src/pytest_plugins/execute/__init__.py
+++ b/src/pytest_plugins/execute/__init__.py
@@ -1 +1,4 @@
-"""A pytest plugin that provides fixtures that execute tests in live devnets/testnets."""
+"""
+A pytest plugin that provides fixtures that execute tests in live
+devnets/testnets.
+"""
diff --git a/src/pytest_plugins/execute/eth_config/eth_config.py b/src/pytest_plugins/execute/eth_config/eth_config.py
index cb8d30a029e..c373083eb90 100644
--- a/src/pytest_plugins/execute/eth_config/eth_config.py
+++ b/src/pytest_plugins/execute/eth_config/eth_config.py
@@ -95,8 +95,8 @@ def pytest_addoption(parser):
def pytest_configure(config: pytest.Config) -> None:
"""
- Load the network configuration file and load the specific network to be used for
- the test.
+ Load the network configuration file and load the specific network to be
+ used for the test.
"""
genesis_config_file = config.getoption("genesis_config_file")
genesis_config_url = config.getoption("genesis_config_url")
@@ -183,14 +183,20 @@ def pytest_configure(config: pytest.Config) -> None:
@pytest.fixture(autouse=True, scope="session")
def rpc_endpoint(request) -> str:
- """Return remote RPC endpoint to be used to make requests to the execution client."""
+ """
+ Return remote RPC endpoint to be used to make requests to the execution
+ client.
+ """
return request.config.getoption("rpc_endpoint")
def all_rpc_endpoints(config) -> Dict[str, List[EthRPC]]:
- """Derive a mapping of exec clients to the RPC URLs they are reachable at."""
+ """
+ Derive a mapping of exec clients to the RPC URLs they are reachable at.
+ """
rpc_endpoint = config.getoption("rpc_endpoint")
- el_clients: List[str] = config.getoption("majority_clients") # besu, erigon, ..
+ # besu, erigon, ..
+ el_clients: List[str] = config.getoption("majority_clients")
if len(el_clients) == 0:
endpoint_name = rpc_endpoint
try:
@@ -215,23 +221,23 @@ def all_rpc_endpoints(config) -> Dict[str, List[EthRPC]]:
for exec_client in el_clients
}
# url_dict looks like this:
- # {
- # 'besu': [, , ..], # noqa: E501
- # 'erigon': ...
- # ...
- # }
+ # { 'besu': [,
+ # , ..],
+ # 'erigon': ... ... }
return url_dict
def pytest_generate_tests(metafunc: pytest.Metafunc):
"""Generate tests for all clients under test."""
# all_rpc_endpoints is a dictionary with the name of the exec client as key
- # and the possible URLs to contact it (different cl combinations) as value list
+ # and the possible URLs to contact it (different cl combinations) as value
+ # list
all_rpc_endpoints_dict = all_rpc_endpoints(metafunc.config)
if metafunc.definition.name == "test_eth_config_majority":
if len(all_rpc_endpoints_dict) < 2:
- # The test function is not run because we only have a single client, so no majority comparison # noqa: E501
+ # The test function is not run because we only have a single
+ # client, so no majority comparison
logger.info(
"Skipping eth_config majority because less than 2 exec clients were passed"
)
diff --git a/src/pytest_plugins/execute/eth_config/execute_eth_config.py b/src/pytest_plugins/execute/eth_config/execute_eth_config.py
index af43ec0b1e2..efdc3122f99 100644
--- a/src/pytest_plugins/execute/eth_config/execute_eth_config.py
+++ b/src/pytest_plugins/execute/eth_config/execute_eth_config.py
@@ -1,4 +1,6 @@
-"""Pytest test to verify a client's configuration using `eth_config` RPC endpoint."""
+"""
+Pytest test to verify a client's configuration using `eth_config` RPC endpoint.
+"""
import json
import time
@@ -17,7 +19,9 @@
@pytest.fixture(scope="function")
def eth_config_response(eth_rpc: List[EthRPC]) -> EthConfigResponse | None:
- """Get the `eth_config` response from the client to be verified by all tests."""
+ """
+ Get the `eth_config` response from the client to be verified by all tests.
+ """
for rpc in eth_rpc:
try:
response = rpc.config()
@@ -37,13 +41,17 @@ def network(request) -> NetworkConfig:
@pytest.fixture(scope="function")
def current_time() -> int:
- """Get the `eth_config` response from the client to be verified by all tests."""
+ """
+ Get the `eth_config` response from the client to be verified by all tests.
+ """
return int(time.time())
@pytest.fixture(scope="function")
def expected_eth_config(network: NetworkConfig, current_time: int) -> EthConfigResponse:
- """Calculate the current fork value to verify against the client's response."""
+ """
+ Calculate the current fork value to verify against the client's response.
+ """
return network.get_eth_config(current_time)
@@ -190,12 +198,15 @@ def test_eth_config_last_fork_id(
def test_eth_config_majority(
all_rpc_endpoints: Dict[str, List[EthRPC]],
) -> None:
- """Queries devnet exec clients for their eth_config and fails if not all have the same response.""" # noqa: E501
+ """
+ Queries devnet exec clients for their eth_config and fails if not all have
+ the same response.
+ """
responses = dict() # Dict[exec_client_name : response] # noqa: C408
client_to_url_used_dict = dict() # noqa: C408
for exec_client in all_rpc_endpoints.keys():
- # try only as many consensus+exec client combinations until you receive a response
- # if all combinations for a given exec client fail we panic
+ # try only as many consensus+exec client combinations until you receive
+ # a response if all combinations for a given exec client fail we panic
for eth_rpc_target in all_rpc_endpoints[exec_client]:
try:
response = eth_rpc_target.config(timeout=5)
@@ -212,7 +223,7 @@ def test_eth_config_majority(
responses[exec_client] = response_str
client_to_url_used_dict[exec_client] = (
eth_rpc_target.url
- ) # remember which cl+el combination was used # noqa: E501
+ ) # remember which cl+el combination was used
logger.info(f"Response of {exec_client}: {response_str}\n\n")
break # no need to gather more responses for this client
@@ -226,14 +237,15 @@ def test_eth_config_majority(
"this execution client"
)
# determine hashes of client responses
- client_to_hash_dict = dict() # Dict[exec_client : response hash] # noqa: C408
+ client_to_hash_dict = {} # Dict[exec_client : response hash] # noqa: C408
for client in responses.keys():
response_bytes = responses[client].encode("utf-8")
response_hash = sha256(response_bytes).digest().hex()
logger.info(f"Response hash of client {client}: {response_hash}")
client_to_hash_dict[client] = response_hash
- # if not all responses have the same hash there is a critical consensus issue
+ # if not all responses have the same hash there is a critical consensus
+ # issue
expected_hash = ""
for h in client_to_hash_dict.keys():
if expected_hash == "":
@@ -241,15 +253,17 @@ def test_eth_config_majority(
continue
assert client_to_hash_dict[h] == expected_hash, (
- "Critical consensus issue: Not all eth_config responses are the same!\n"
+ "Critical consensus issue: Not all eth_config responses are the "
+ " same!\n"
"Here is an overview of client response hashes:\n"
+ "\n\t".join(f"{k}: {v}" for k, v in client_to_hash_dict.items())
- + "\n\n" # noqa: E501
+ + "\n\n"
"Here is an overview of which URLs were contacted:\n\t"
+ "\n\t".join(f"{k}: @{v.split('@')[1]}" for k, v in client_to_url_used_dict.items())
- + "\n\n" # log which cl+el combinations were used without leaking full url # noqa: E501
+ + "\n\n"
+ # log which cl+el combinations were used without leaking full url
"Here is a dump of all client responses:\n"
- + "\n\n".join(f"{k}: {v}" for k, v in responses.items()) # noqa: E501
+ + "\n\n".join(f"{k}: {v}" for k, v in responses.items())
)
assert expected_hash != ""
diff --git a/src/pytest_plugins/execute/eth_config/execute_types.py b/src/pytest_plugins/execute/eth_config/execute_types.py
index efe32bb430a..393d5c251ca 100644
--- a/src/pytest_plugins/execute/eth_config/execute_types.py
+++ b/src/pytest_plugins/execute/eth_config/execute_types.py
@@ -33,8 +33,8 @@
class AddressOverrideDict(EthereumTestRootModel):
"""
Dictionary with overrides to the default addresses specified for each fork.
- Required for testnets or devnets which have a different location of precompiles or system
- contracts.
+ Required for testnets or devnets which have a different location of
+ precompiles or system contracts.
"""
root: Dict[Address, Address]
@@ -73,8 +73,8 @@ def system_contracts(self) -> Dict[str, Address]:
def get_config(self, fork_id: ForkHash) -> ForkConfig:
"""
- Get the current and next fork configurations given the current time and the network
- configuration.
+ Get the current and next fork configurations given the current time and
+ the network configuration.
"""
return ForkConfig(
activation_time=self.activation_time,
@@ -87,7 +87,10 @@ def get_config(self, fork_id: ForkHash) -> ForkConfig:
def calculate_fork_id(genesis_hash: Hash, activation_times: Set[int]) -> ForkHash:
- """Calculate the fork Id given the genesis hash and each fork activation times."""
+ """
+ Calculate the fork Id given the genesis hash and each fork activation
+ times.
+ """
buffer = bytes(genesis_hash)
for activation_time in sorted(activation_times):
if activation_time == 0:
@@ -275,9 +278,11 @@ def fork(self) -> Fork:
@classmethod
def preprocess_fork_times_blocks(cls, data: Any):
"""
- Pre-process the dictionary to put fork block numbers and times in the correct format.
+ Pre-process the dictionary to put fork block numbers and times in the
+ correct format.
- Fork times and block numbers have the following format in the root of the object:
+ Fork times and block numbers have the following format in the root of
+ the object:
```
"berlinBlock": 0,
@@ -287,8 +292,8 @@ def preprocess_fork_times_blocks(cls, data: Any):
"osakaTime": 1753379304,
```
- This function strips the "*Block" and "*Time" part and moves the values.
-
+ This function strips the "*Block" and "*Time" part and moves the
+ values.
"""
if isinstance(data, dict):
fork_activation_times: Dict[str, int] = {}
diff --git a/src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py b/src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py
index bb147b82411..7a7bd7df0cf 100644
--- a/src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py
+++ b/src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py
@@ -414,7 +414,7 @@
target: 15
max: 20
baseFeeUpdateFraction: 5007716
-"""
+""" # W505
@pytest.fixture(scope="session")
@@ -435,7 +435,9 @@ def network(request: pytest.FixtureRequest, network_configs: NetworkConfigFile)
@pytest.fixture
def eth_config(network: NetworkConfig, current_time: int) -> EthConfigResponse:
- """Get the `eth_config` response from the client to be verified by all tests."""
+ """
+ Get the `eth_config` response from the client to be verified by all tests.
+ """
return network.get_eth_config(current_time)
diff --git a/src/pytest_plugins/execute/execute.py b/src/pytest_plugins/execute/execute.py
index e6ed68af657..42676a62113 100644
--- a/src/pytest_plugins/execute/execute.py
+++ b/src/pytest_plugins/execute/execute.py
@@ -1,4 +1,6 @@
-"""Test execution plugin for pytest, to run Ethereum tests using in live networks."""
+"""
+Test execution plugin for pytest, to run Ethereum tests using in live networks.
+"""
import os
from dataclasses import dataclass, field
@@ -122,12 +124,12 @@ def pytest_configure(config):
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
- Custom marker registration:
- https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
+ Custom marker registration:
+ https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
- called before the pytest-html plugin's pytest_configure to ensure that
- it uses the modified `htmlpath` option.
+ called before the pytest-html plugin's pytest_configure to ensure that
+ it uses the modified `htmlpath` option.
"""
# Modify the block gas limit if specified.
if config.getoption("transaction_gas_limit"):
@@ -256,7 +258,9 @@ def default_max_priority_fee_per_gas(request) -> int:
def modify_transaction_defaults(
default_gas_price: int, default_max_fee_per_gas: int, default_max_priority_fee_per_gas: int
):
- """Modify transaction defaults to values better suited for live networks."""
+ """
+ Modify transaction defaults to values better suited for live networks.
+ """
TransactionDefaults.gas_price = default_gas_price
TransactionDefaults.max_fee_per_gas = default_max_fee_per_gas
TransactionDefaults.max_priority_fee_per_gas = default_max_priority_fee_per_gas
@@ -264,7 +268,10 @@ def modify_transaction_defaults(
@dataclass(kw_only=True)
class Collector:
- """A class that collects transactions and post-allocations for every test case."""
+ """
+ A class that collects transactions and post-allocations for every test
+ case.
+ """
eth_rpc: EthRPC
collected_tests: Dict[str, BaseExecute] = field(default_factory=dict)
@@ -280,8 +287,8 @@ def collector(
eth_rpc: EthRPC,
) -> Generator[Collector, None, None]:
"""
- Return configured fixture collector instance used for all tests
- in one test module.
+ Return configured fixture collector instance used for all tests in one test
+ module.
"""
del request
@@ -293,8 +300,8 @@ def base_test_parametrizer(cls: Type[BaseTest]):
"""
Generate pytest.fixture for a given BaseTest subclass.
- Implementation detail: All spec fixtures must be scoped on test function level to avoid
- leakage between tests.
+ Implementation detail: All spec fixtures must be scoped on test function
+ level to avoid leakage between tests.
"""
cls_fixture_parameters = [p for p in ALL_FIXTURE_PARAMETERS if p in cls.model_fields]
@@ -311,14 +318,15 @@ def base_test_parametrizer_func(
collector: Collector,
):
"""
- Fixture used to instantiate an auto-fillable BaseTest object from within
- a test function.
+ Fixture used to instantiate an auto-fillable BaseTest object from
+ within a test function.
- Every test that defines a test filler must explicitly specify its parameter name
- (see `pytest_parameter_name` in each implementation of BaseTest) in its function
- arguments.
+ Every test that defines a test filler must explicitly specify its
+ parameter name (see `pytest_parameter_name` in each implementation of
+ BaseTest) in its function arguments.
- When parametrize, indirect must be used along with the fixture format as value.
+ When parametrize, indirect must be used along with the fixture format
+ as value.
"""
execute_format = request.param
assert execute_format in BaseExecute.formats.values()
@@ -377,8 +385,8 @@ def __init__(self, *args, **kwargs):
def pytest_generate_tests(metafunc: pytest.Metafunc):
"""
- Pytest hook used to dynamically generate test cases for each fixture format a given
- test spec supports.
+ Pytest hook used to dynamically generate test cases for each fixture format
+ a given test spec supports.
"""
engine_rpc_supported = metafunc.config.engine_rpc_supported # type: ignore
for test_type in BaseTest.spec_types.values():
@@ -397,10 +405,11 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
)
-def pytest_collection_modifyitems(config: pytest.Config, items: List[pytest.Item]):
- """Remove transition tests and add the appropriate execute markers to the test."""
- del config
-
+def pytest_collection_modifyitems(items: List[pytest.Item]):
+ """
+ Remove transition tests and add the appropriate execute markers to the
+ test.
+ """
items_for_removal = []
for i, item in enumerate(items):
if isinstance(item, EIPSpecTestItem):
diff --git a/src/pytest_plugins/execute/pre_alloc.py b/src/pytest_plugins/execute/pre_alloc.py
index 273a6ab3a44..239de55f932 100644
--- a/src/pytest_plugins/execute/pre_alloc.py
+++ b/src/pytest_plugins/execute/pre_alloc.py
@@ -48,8 +48,9 @@ class AddressStubs(EthereumTestRootModel[Dict[str, Address]]):
"""
Address stubs class.
- The key represents the label that is used in the test to tag the contract, and the value
- is the address where the contract is already located at in the current network.
+ The key represents the label that is used in the test to tag the contract,
+ and the value is the address where the contract is already located at in
+ the current network.
"""
root: Dict[str, Address]
@@ -65,8 +66,8 @@ def __getitem__(self, item: str) -> Address:
@classmethod
def model_validate_json_or_file(cls, json_data_or_path: str) -> Self:
"""
- Try to load from file if the value resembles a path that ends with .json/.yml and the
- file exists.
+ Try to load from file if the value resembles a path that ends with
+ .json/.yml and the file exists.
"""
lower_json_data_or_path = json_data_or_path.lower()
if (
@@ -342,7 +343,10 @@ def fund_eoa(
delegation: Address | Literal["Self"] | None = None,
nonce: NumberConvertible | None = None,
) -> EOA:
- """Add a previously unused EOA to the pre-alloc with the balance specified by `amount`."""
+ """
+ Add a previously unused EOA to the pre-alloc with the balance specified
+ by `amount`.
+ """
assert nonce is None, "nonce parameter is not supported for execute"
eoa = next(self._eoa_iterator)
eoa.label = label
@@ -385,7 +389,8 @@ def fund_eoa(
if delegation is not None:
if not isinstance(delegation, Address) and delegation == "Self":
delegation = eoa
- # TODO: This tx has side-effects on the EOA state because of the delegation
+ # TODO: This tx has side-effects on the EOA state because of
+ # the delegation
fund_tx = Transaction(
sender=self._sender,
to=eoa,
@@ -409,7 +414,8 @@ def fund_eoa(
authorization_list=[
AuthorizationTuple(
chain_id=self._chain_id,
- address=0, # Reset delegation to an address without code
+ # Reset delegation to an address without code
+ address=0,
nonce=eoa.nonce,
signer=eoa,
),
@@ -478,7 +484,8 @@ def fund_address(self, address: Address, amount: NumberConvertible):
def empty_account(self) -> Address:
"""
- Add a previously unused account guaranteed to be empty to the pre-alloc.
+ Add a previously unused account guaranteed to be empty to the
+ pre-alloc.
This ensures the account has:
- Zero balance
@@ -486,8 +493,9 @@ def empty_account(self) -> Address:
- No code
- No storage
- This is different from precompiles or system contracts. The function does not
- send any transactions, ensuring that the account remains "empty."
+ This is different from precompiles or system contracts. The function
+ does not send any transactions, ensuring that the account remains
+ "empty."
Returns:
Address: The address of the created empty account.
diff --git a/src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py b/src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py
index 017eb40fbcb..d41fd688432 100644
--- a/src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py
+++ b/src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py
@@ -1,4 +1,7 @@
-"""Chain builder Ethereum RPC that can drive the chain when new transactions are submitted."""
+"""
+Chain builder Ethereum RPC that can drive the chain when new transactions are
+submitted.
+"""
import time
from pathlib import Path
@@ -87,10 +90,11 @@ def __iter__(self):
class PendingTxHashes:
"""
- A class to manage the pending transaction hashes in a multi-process environment.
+ A class to manage the pending transaction hashes in a multi-process
+ environment.
- It uses a lock file to ensure that only one process can access the pending hashes file at a
- time.
+ It uses a lock file to ensure that only one process can access the pending
+ hashes file at a time.
"""
pending_hashes_file: Path
@@ -166,9 +170,9 @@ def __iter__(self):
class ChainBuilderEthRPC(BaseEthRPC, namespace="eth"):
"""
- Special type of Ethereum RPC client that also has access to the Engine API and automatically
- coordinates block generation based on the number of pending transactions or a block generation
- interval.
+ Special type of Ethereum RPC client that also has access to the Engine API
+ and automatically coordinates block generation based on the number of
+ pending transactions or a block generation interval.
"""
fork: Fork
@@ -336,11 +340,12 @@ def wait_for_transactions(
self, transactions: List[Transaction]
) -> List[TransactionByHashResponse]:
"""
- Wait for all transactions in the provided list to be included in a block.
+ Wait for all transactions in the provided list to be included in a
+ block.
- Waits for all transactions in the provided list to be included in a block
- by polling `eth_getTransactionByHash` until they are confirmed or a
- timeout occurs.
+ Waits for all transactions in the provided list to be included in a
+ block by polling `eth_getTransactionByHash` until they are confirmed or
+ a timeout occurs.
Args:
transactions: A list of transactions to track.
@@ -398,8 +403,8 @@ def wait_for_transactions(
class PendingTransactionHandler:
"""
- Manages block generation based on the number of pending transactions or a block generation
- interval.
+ Manages block generation based on the number of pending transactions or a
+ block generation interval.
Attributes:
block_generation_interval: The number of iterations after which a block
@@ -423,11 +428,12 @@ def handle(self):
"""
Handle pending transactions and generate blocks if necessary.
- If the number of pending transactions reaches the limit, a block is generated.
+ If the number of pending transactions reaches the limit, a block is
+ generated.
- If no new transactions have been added to the pending list and the block
- generation interval has been reached, a block is generated to avoid potential
- deadlock.
+ If no new transactions have been added to the pending list and the
+ block generation interval has been reached, a block is generated to
+ avoid potential deadlock.
"""
with self.chain_builder_eth_rpc.pending_tx_hashes:
if (
@@ -442,8 +448,8 @@ def handle(self):
== self.last_pending_tx_hashes_count
and self.i % self.block_generation_interval == 0
):
- # If no new transactions have been added to the pending list,
- # generate a block to avoid potential deadlock.
+ # If no new transactions have been added to the pending
+ # list, generate a block to avoid potential deadlock.
self.chain_builder_eth_rpc.generate_block()
self.last_pending_tx_hashes_count = len(self.chain_builder_eth_rpc.pending_tx_hashes)
self.i += 1
diff --git a/src/pytest_plugins/execute/rpc/hive.py b/src/pytest_plugins/execute/rpc/hive.py
index 0ae0f02e09b..c36349c5faf 100644
--- a/src/pytest_plugins/execute/rpc/hive.py
+++ b/src/pytest_plugins/execute/rpc/hive.py
@@ -147,7 +147,10 @@ def base_pre_genesis(
@pytest.fixture(scope="session")
def client_genesis(base_pre_genesis: Tuple[Alloc, FixtureHeader]) -> dict:
- """Convert the fixture's genesis block header and pre-state to a client genesis state."""
+ """
+ Convert the fixture's genesis block header and pre-state to a client
+ genesis state.
+ """
genesis = to_json(base_pre_genesis[1]) # NOTE: to_json() excludes None values
alloc = to_json(base_pre_genesis[0])
# NOTE: nethermind requires account keys without '0x' prefix
@@ -211,7 +214,9 @@ def test_suite_description() -> str:
def base_hive_test(
request: pytest.FixtureRequest, test_suite: HiveTestSuite, session_temp_folder: Path
) -> Generator[HiveTest, None, None]:
- """Test (base) used to deploy the main client to be used throughout all tests."""
+ """
+ Test (base) used to deploy the main client to be used throughout all tests.
+ """
base_name = "base_hive_test"
base_file = session_temp_folder / base_name
base_lock_file = session_temp_folder / f"{base_name}.lock"
@@ -276,7 +281,9 @@ def client(
client_type: ClientType,
session_temp_folder: Path,
) -> Generator[Client, None, None]:
- """Initialize the client with the appropriate files and environment variables."""
+ """
+ Initialize the client with the appropriate files and environment variables.
+ """
base_name = "hive_client"
base_file = session_temp_folder / base_name
base_error_file = session_temp_folder / f"{base_name}.err"
diff --git a/src/pytest_plugins/execute/rpc/remote.py b/src/pytest_plugins/execute/rpc/remote.py
index d5168af951a..4f87766235b 100644
--- a/src/pytest_plugins/execute/rpc/remote.py
+++ b/src/pytest_plugins/execute/rpc/remote.py
@@ -87,7 +87,8 @@ def pytest_configure(config: pytest.Config):
"""Check if a chain ID configuration is provided."""
if config.getoption("rpc_chain_id") is None and config.getoption("chain_id") is None:
pytest.exit("No chain ID configuration found. Please use --chain-id.")
- # Verify the chain ID configuration is consistent with the remote RPC endpoint
+ # Verify the chain ID configuration is consistent with the remote RPC
+ # endpoint
rpc_endpoint = config.getoption("rpc_endpoint")
eth_rpc = EthRPC(rpc_endpoint)
remote_chain_id = eth_rpc.chain_id()
@@ -127,8 +128,9 @@ def pytest_configure(config: pytest.Config):
f"JWT secret must be a bytes object, got {type(jwt_secret)}"
)
engine_rpc = EngineRPC(engine_endpoint, jwt_secret=jwt_secret)
- # TODO: Perform a request to the engine endpoint to verify that the JWT secret is valid.
- # Potentially could be `engine_getClientVersionV1` but need to implement this in rpc.py.
+ # TODO: Perform a request to the engine endpoint to verify that the JWT
+ # secret is valid. Potentially could be `engine_getClientVersionV1` but
+ # need to implement this in rpc.py.
config.engine_rpc = engine_rpc # type: ignore
@@ -140,7 +142,10 @@ def engine_rpc(request) -> EngineRPC | None:
@pytest.fixture(autouse=True, scope="session")
def rpc_endpoint(request) -> str:
- """Return remote RPC endpoint to be used to make requests to the execution client."""
+ """
+ Return remote RPC endpoint to be used to make requests to the execution
+ client.
+ """
return request.config.getoption("rpc_endpoint")
diff --git a/src/pytest_plugins/execute/sender.py b/src/pytest_plugins/execute/sender.py
index 016fd4862f8..a93a4081d21 100644
--- a/src/pytest_plugins/execute/sender.py
+++ b/src/pytest_plugins/execute/sender.py
@@ -85,15 +85,17 @@ def sender_key_initial_balance(
"""
Calculate the initial balance of each sender key.
- The way to do this is to fetch the seed sender balance and divide it by the number of
- workers. This way we can ensure that each sender key has the same initial balance.
+ The way to do this is to fetch the seed sender balance and divide it by the
+ number of workers. This way we can ensure that each sender key has the same
+ initial balance.
- We also only do this once per session, because if we try to fetch the balance again, it
- could be that another worker has already sent a transaction and the balance is different.
+ We also only do this once per session, because if we try to fetch the
+ balance again, it could be that another worker has already sent a
+ transaction and the balance is different.
- It's not really possible to calculate the transaction costs of each test that each worker
- is going to run, so we can't really calculate the initial balance of each sender key
- based on that.
+ It's not really possible to calculate the transaction costs of each test
+ that each worker is going to run, so we can't really calculate the initial
+ balance of each sender key based on that.
"""
base_name = "sender_key_initial_balance"
base_file = session_temp_folder / base_name
@@ -108,7 +110,8 @@ def sender_key_initial_balance(
seed_account_sweep_amount = eth_rpc.get_balance(seed_sender)
seed_sender_balance_per_worker = seed_account_sweep_amount // worker_count
assert seed_sender_balance_per_worker > 100, "Seed sender balance too low"
- # Subtract the cost of the transaction that is going to be sent to the seed sender
+ # Subtract the cost of the transaction that is going to be sent to
+ # the seed sender
sender_key_initial_balance = seed_sender_balance_per_worker - (
sender_fund_refund_gas_limit * sender_funding_transactions_gas_price
)
@@ -132,11 +135,12 @@ def sender_key(
"""
Get the sender keys for all tests.
- The seed sender is going to be shared among different processes, so we need to lock it
- before we produce each funding transaction.
+ The seed sender is going to be shared among different processes, so we need
+ to lock it before we produce each funding transaction.
"""
- # For the seed sender we do need to keep track of the nonce because it is shared among
- # different processes, and there might not be a new block produced between the transactions.
+ # For the seed sender we do need to keep track of the nonce because it is
+ # shared among different processes, and there might not be a new block
+ # produced between the transactions.
seed_sender_nonce_file_name = "seed_sender_nonce"
seed_sender_lock_file_name = f"{seed_sender_nonce_file_name}.lock"
seed_sender_nonce_file = session_temp_folder / seed_sender_nonce_file_name
@@ -172,15 +176,16 @@ def sender_key(
)
refund_gas_limit = sender_fund_refund_gas_limit
- # double the gas price to ensure the transaction is included and overwrites any other
- # transaction that might have been sent by the sender.
+ # double the gas price to ensure the transaction is included and overwrites
+ # any other transaction that might have been sent by the sender.
refund_gas_price = sender_funding_transactions_gas_price * 2
tx_cost = refund_gas_limit * refund_gas_price
if (remaining_balance - 1) < tx_cost:
return
- # Update the nonce of the sender in case one of the pre-alloc transactions failed
+ # Update the nonce of the sender in case one of the pre-alloc transactions
+ # failed
sender.nonce = Number(eth_rpc.get_transaction_count(sender))
refund_tx = Transaction(
diff --git a/src/pytest_plugins/filler/eip_checklist.py b/src/pytest_plugins/filler/eip_checklist.py
index 2387aa178c0..855c903bb57 100644
--- a/src/pytest_plugins/filler/eip_checklist.py
+++ b/src/pytest_plugins/filler/eip_checklist.py
@@ -1,8 +1,8 @@
"""
Pytest plugin for generating EIP test completion checklists.
-This plugin collects checklist markers from tests and generates a filled checklist
-for each EIP based on the template at
+This plugin collects checklist markers from tests and generates a filled
+checklist for each EIP based on the template at
docs/writing_tests/checklist_templates/eip_testing_checklist_template.md
"""
@@ -195,7 +195,9 @@ class ConflictingChecklistItemsWarning(ChecklistWarning):
@classmethod
def from_items(cls, all_items: Dict[str, EIPItem]) -> ChecklistWarning | None:
- """Generate a conflicting checklist items warning from a list of items."""
+ """
+ Generate a conflicting checklist items warning from a list of items.
+ """
conflicting_items = [
item for item in all_items.values() if item.not_applicable and item.covered
]
@@ -335,8 +337,8 @@ def generate_filled_checklist_lines(self) -> List[str]:
# Replace the title line with the EIP number
lines[lines.index(TITLE_LINE)] = f"# EIP-{self.number} Test Checklist"
- # Last, add the warnings if there are any, this must be the last thing we do
- # to avoid shifting the lines below the percentage line
+ # Last, add the warnings if there are any, this must be the last thing
+ # we do to avoid shifting the lines below the percentage line
if self.warnings:
warnings_line_idx = lines.index(WARNINGS_LINE)
warnings_lines = ["", "## ⚠️ Checklist Warnings ⚠️", ""]
diff --git a/src/pytest_plugins/filler/filler.py b/src/pytest_plugins/filler/filler.py
index 30b605d7534..edb3293c874 100644
--- a/src/pytest_plugins/filler/filler.py
+++ b/src/pytest_plugins/filler/filler.py
@@ -2,8 +2,8 @@
Top-level pytest configuration file providing:
- Command-line options,
- Test-fixtures that can be used by all test cases,
-and that modifies pytest hooks in order to fill test specs for all tests and
-writes the generated fixtures to file.
+and that modifies pytest hooks in order to fill test specs for all tests
+and writes the generated fixtures to file.
"""
import configparser
@@ -60,12 +60,16 @@ class PhaseManager:
"""
Manages the execution phase for fixture generation.
- The filler plugin supports two-phase execution for pre-allocation group generation:
- - Phase 1: Generate pre-allocation groups (pytest run with --generate-pre-alloc-groups).
- - Phase 2: Fill fixtures using pre-allocation groups (pytest run with --use-pre-alloc-groups).
+ The filler plugin supports two-phase execution for pre-allocation group
+ generation:
+ - Phase 1: Generate pre-allocation groups (pytest run with
+ --generate-pre-alloc-groups).
- Note: These are separate pytest runs orchestrated by the CLI wrapper.
- Each run gets a fresh PhaseManager instance (no persistence between phases).
+ - Phase 2: Fill fixtures using pre-allocation
+ groups (pytest run with --use-pre-alloc-groups).
+
+ Note: These are separate pytest runs orchestrated by the CLI wrapper. Each
+ run gets a fresh PhaseManager instance (no persistence between phases).
"""
current_phase: FixtureFillingPhase
@@ -77,13 +81,15 @@ def from_config(cls, config: pytest.Config) -> "Self":
Create a PhaseManager from pytest configuration.
Flag logic:
- - use_pre_alloc_groups: We're in phase 2 (FILL) after phase 1 (PRE_ALLOC_GENERATION).
- - generate_pre_alloc_groups or generate_all_formats: We're in phase 1
- (PRE_ALLOC_GENERATION).
- - Otherwise: Normal single-phase filling (FILL).
-
- Note: generate_all_formats triggers PRE_ALLOC_GENERATION because the CLI
- passes it to phase 1 to ensure all formats are considered for grouping.
+ - use_pre_alloc_groups: We're in phase 2 (FILL) after phase
+ 1 (PRE_ALLOC_GENERATION).
+ - generate_pre_alloc_groups or generate_all_formats:
+ We're in phase 1 (PRE_ALLOC_GENERATION). -
+ Otherwise: Normal single-phase filling (FILL).
+
+ Note: generate_all_formats triggers PRE_ALLOC_GENERATION because the
+ CLI passes it to phase 1 to ensure all formats are considered for
+ grouping.
"""
generate_pre_alloc = config.getoption("generate_pre_alloc_groups", False)
use_pre_alloc = config.getoption("use_pre_alloc_groups", False)
@@ -127,10 +133,11 @@ def is_single_phase_fill(self) -> bool:
@dataclass(kw_only=True)
class FormatSelector:
"""
- Handles fixture format selection based on the current phase and format capabilities.
+ Handles fixture format selection based on the current phase and format
+ capabilities.
- This class encapsulates the complex logic for determining which fixture formats
- should be generated in each phase of the two-phase execution model.
+ This class encapsulates the complex logic for determining which fixture
+ formats should be generated in each phase of the two-phase execution model.
"""
phase_manager: PhaseManager
@@ -141,10 +148,11 @@ def should_generate(self, fixture_format: Type[BaseFixture] | LabeledFixtureForm
Determine if a fixture format should be generated in the current phase.
Args:
- fixture_format: The fixture format to check (may be wrapped in LabeledFixtureFormat)
+ fixture_format: The fixture format to check (may be wrapped in
+ LabeledFixtureFormat).
Returns:
- True if the format should be generated in the current phase
+ True if the format should be generated in the current phase.
"""
format_phases = fixture_format.format_phases
@@ -155,7 +163,10 @@ def should_generate(self, fixture_format: Type[BaseFixture] | LabeledFixtureForm
return self._should_generate_fill(format_phases)
def _should_generate_pre_alloc(self, format_phases: Set[FixtureFillingPhase]) -> bool:
- """Determine if format should be generated during pre-alloc generation phase."""
+ """
+ Determine if format should be generated during pre-alloc generation
+ phase.
+ """
# Only generate formats that need pre-allocation groups
return FixtureFillingPhase.PRE_ALLOC_GENERATION in format_phases
@@ -164,7 +175,8 @@ def _should_generate_fill(self, format_phases: Set[FixtureFillingPhase]) -> bool
if FixtureFillingPhase.PRE_ALLOC_GENERATION in self.phase_manager.previous_phases:
# Phase 2: After pre-alloc generation
if self.generate_all_formats:
- # Generate all formats, including those that don't need pre-alloc
+ # Generate all formats, including those that don't need pre-
+ # alloc
return True
else:
# Only generate formats that needed pre-alloc groups
@@ -179,12 +191,13 @@ class FillingSession:
"""
Manages all state for a single pytest fill session.
- This class serves as the single source of truth for all filler state management,
- including phase management, format selection, and pre-allocation groups.
+ This class serves as the single source of truth for all filler state
+ management, including phase management, format selection, and
+ pre-allocation groups.
- Important: Each pytest run gets a fresh FillingSession instance. There is no
- persistence between phase 1 (generate pre-alloc) and phase 2 (use pre-alloc)
- except through file I/O.
+ Important: Each pytest run gets a fresh FillingSession instance. There is
+ no persistence between phase 1 (generate pre-alloc) and phase 2 (use
+ pre-alloc) except through file I/O.
"""
fixture_output: FixtureOutput
@@ -240,7 +253,8 @@ def should_generate_format(
self, fixture_format: Type[BaseFixture] | LabeledFixtureFormat
) -> bool:
"""
- Determine if a fixture format should be generated in the current session.
+ Determine if a fixture format should be generated in the current
+ session.
Args:
fixture_format: The fixture format to check.
@@ -335,9 +349,9 @@ def calculate_post_state_diff(post_state: Alloc, genesis_state: Alloc) -> Alloc:
"""
Calculate the state difference between post_state and genesis_state.
- This function enables significant space savings in Engine X fixtures by storing
- only the accounts that changed during test execution, rather than the full
- post-state which may contain thousands of unchanged accounts.
+ This function enables significant space savings in Engine X fixtures by
+ storing only the accounts that changed during test execution, rather than
+ the full post-state which may contain thousands of unchanged accounts.
Returns an Alloc containing only the accounts that:
- Changed between genesis and post state (balance, nonce, storage, code)
@@ -624,12 +638,12 @@ def pytest_configure(config):
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
- Custom marker registration:
- https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
+ Custom marker registration:
+ https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
- called before the pytest-html plugin's pytest_configure to ensure that
- it uses the modified `htmlpath` option.
+ called before the pytest-html plugin's pytest_configure to ensure that
+ it uses the modified `htmlpath` option.
"""
# Register custom markers
# Modify the block gas limit if specified.
@@ -646,7 +660,8 @@ def pytest_configure(config):
return
try:
- # Check whether the directory exists and is not empty; if --clean is set, it will delete it
+ # Check whether the directory exists and is not empty; if --clean is
+ # set, it will delete it
config.fixture_output.create_directories(is_master=not hasattr(config, "workerinput"))
except ValueError as e:
pytest.exit(str(e), returncode=pytest.ExitCode.USAGE_ERROR)
@@ -670,8 +685,9 @@ def pytest_configure(config):
"optimize_gas", False
)
- # Instantiate the transition tool here to check that the binary path/trace option is valid.
- # This ensures we only raise an error once, if appropriate, instead of for every test.
+ # Instantiate the transition tool here to check that the binary path/trace
+ # option is valid. This ensures we only raise an error once, if
+ # appropriate, instead of for every test.
evm_bin = config.getoption("evm_bin")
trace = config.getoption("evm_collect_traces")
t8n_server_url = config.getoption("t8n_server_url")
@@ -730,8 +746,8 @@ def pytest_report_teststatus(report, config: pytest.Config):
We use this:
1. To disable test session progress report if we're writing the JSON
- fixtures to stdout to be read by a consume command on stdin. I.e.,
- don't write this type of output to the console:
+ fixtures to stdout to be read by a consume command on stdin. I.e., don't
+ write this type of output to the console:
```text
...x...
```
@@ -872,7 +888,8 @@ def pytest_runtest_makereport(item, call):
]:
report.user_properties.append(("evm_dump_dir", item.config.evm_dump_dir))
else:
- report.user_properties.append(("evm_dump_dir", "N/A")) # not yet for EOF
+ # not yet for EOF
+ report.user_properties.append(("evm_dump_dir", "N/A"))
def pytest_html_report_title(report):
@@ -889,8 +906,7 @@ def evm_bin(request: pytest.FixtureRequest) -> Path | None:
@pytest.fixture(autouse=True, scope="session")
def verify_fixtures_bin(request: pytest.FixtureRequest) -> Path | None:
"""
- Return configured evm tool binary path used to run statetest or
- blocktest.
+ Return configured evm tool binary path used to run statetest or blocktest.
"""
return request.config.getoption("verify_fixtures_bin")
@@ -935,8 +951,8 @@ def evm_fixture_verification(
verify_fixtures_bin: Path | None,
) -> Generator[FixtureConsumer | None, None, None]:
"""
- Return configured evm binary for executing statetest and blocktest
- commands used to verify generated JSON fixtures.
+ Return configured evm binary for executing statetest and blocktest commands
+ used to verify generated JSON fixtures.
"""
if not do_fixture_verification:
yield None
@@ -1049,8 +1065,9 @@ def dump_dir_parameter_level(
Directory to dump evm transition tool debug output on a test parameter
level.
- Example with --evm-dump-dir=/tmp/evm:
- -> /tmp/evm/shanghai__eip3855_push0__test_push0__test_push0_key_sstore/fork_shanghai/
+ Example with --evm-dump-dir=/tmp/evm: ->
+ /tmp/evm/shanghai__eip3855_push0__test_push0__test_push0_key_sstore/fork_shangh
+ ai/
"""
evm_dump_dir = node_to_test_info(request.node).get_dump_dir_path(
base_dump_dir,
@@ -1102,8 +1119,8 @@ def fixture_collector(
fixture_output: FixtureOutput,
) -> Generator[FixtureCollector, None, None]:
"""
- Return configured fixture collector instance used for all tests
- in one test module.
+ Return configured fixture collector instance used for all tests in one test
+ module.
"""
# Dynamically load the 'static_filler' and 'solc' plugins if needed
if request.config.getoption("fill_static_tests_enabled"):
@@ -1163,8 +1180,9 @@ def fixture_source_url(
)
test_module_relative_path = os.path.relpath(request.module.__file__)
if module_relative_path != test_module_relative_path:
- # This can be the case when the test function's body only contains pass and the entire
- # test logic is implemented as a test generator from the framework.
+ # This can be the case when the test function's body only contains pass
+ # and the entire test logic is implemented as a test generator from the
+ # framework.
test_module_github_url = generate_github_url(
test_module_relative_path,
branch_or_commit_or_tag=commit_hash_or_tag,
@@ -1177,8 +1195,8 @@ def base_test_parametrizer(cls: Type[BaseTest]):
"""
Generate pytest.fixture for a given BaseTest subclass.
- Implementation detail: All spec fixtures must be scoped on test function level to avoid
- leakage between tests.
+ Implementation detail: All spec fixtures must be scoped on test function
+ level to avoid leakage between tests.
"""
cls_fixture_parameters = [p for p in ALL_FIXTURE_PARAMETERS if p in cls.model_fields]
@@ -1201,14 +1219,15 @@ def base_test_parametrizer_func(
witness_generator,
):
"""
- Fixture used to instantiate an auto-fillable BaseTest object from within
- a test function.
+ Fixture used to instantiate an auto-fillable BaseTest object from
+ within a test function.
- Every test that defines a test filler must explicitly specify its parameter name
- (see `pytest_parameter_name` in each implementation of BaseTest) in its function
- arguments.
+ Every test that defines a test filler must explicitly specify its
+ parameter name (see `pytest_parameter_name` in each implementation of
+ BaseTest) in its function arguments.
- When parametrize, indirect must be used along with the fixture format as value.
+ When parametrize, indirect must be used along with the fixture format
+ as value.
"""
if hasattr(request.node, "fixture_format"):
fixture_format = request.node.fixture_format
@@ -1248,13 +1267,15 @@ def __init__(self, *args, **kwargs):
# Phase 1: Generate pre-allocation groups
if session.phase_manager.is_pre_alloc_generation:
- # Use the original update_pre_alloc_groups method which returns the groups
+ # Use the original update_pre_alloc_groups method which
+ # returns the groups
self.update_pre_alloc_groups(
session.pre_alloc_groups, fork, request.node.nodeid
)
return # Skip fixture generation in phase 1
- # Phase 2: Use pre-allocation groups (only for BlockchainEngineXFixture)
+ # Phase 2: Use pre-allocation groups (only for
+ # BlockchainEngineXFixture)
pre_alloc_hash = None
if FixtureFillingPhase.PRE_ALLOC_GENERATION in fixture_format.format_phases:
pre_alloc_hash = self.compute_pre_alloc_group_hash(fork=fork)
@@ -1273,11 +1294,13 @@ def __init__(self, *args, **kwargs):
):
gas_optimized_tests = request.config.gas_optimized_tests
assert gas_optimized_tests is not None
- # Force adding something to the list, even if it's None,
- # to keep track of failed tests in the output file.
+ # Force adding something to the list, even if it's
+ # None, to keep track of failed tests in the output
+ # file.
gas_optimized_tests[request.node.nodeid] = self._gas_optimization
- # Post-process for Engine X format (add pre_hash and state diff)
+ # Post-process for Engine X format (add pre_hash and state
+ # diff)
if (
FixtureFillingPhase.PRE_ALLOC_GENERATION in fixture_format.format_phases
and pre_alloc_hash is not None
@@ -1299,7 +1322,8 @@ def __init__(self, *args, **kwargs):
_info_metadata=t8n._info_metadata,
)
- # Generate witness data if witness functionality is enabled via the witness plugin
+ # Generate witness data if witness functionality is enabled via
+ # the witness plugin
if witness_generator is not None:
witness_generator(fixture)
@@ -1328,11 +1352,11 @@ def __init__(self, *args, **kwargs):
def pytest_generate_tests(metafunc: pytest.Metafunc):
"""
- Pytest hook used to dynamically generate test cases for each fixture format a given
- test spec supports.
+ Pytest hook used to dynamically generate test cases for each fixture format
+ a given test spec supports.
- NOTE: The static test filler does NOT use this hook. See FillerFile.collect() in
- ./static_filler.py for more details.
+ NOTE: The static test filler does NOT use this hook. See
+ FillerFile.collect() in ./static_filler.py for more details.
"""
session: FillingSession = metafunc.config.filling_session # type: ignore[attr-defined]
for test_type in BaseTest.spec_types.values():
@@ -1360,7 +1384,8 @@ def pytest_collection_modifyitems(
Remove pre-Paris tests parametrized to generate hive type fixtures; these
can't be used in the Hive Pyspec Simulator.
- Replaces the test ID for state tests that use a transition fork with the base fork.
+ Replaces the test ID for state tests that use a transition fork with the
+ base fork.
These can't be handled in this plugins pytest_generate_tests() as the fork
parametrization occurs in the forks plugin.
@@ -1390,7 +1415,8 @@ def pytest_collection_modifyitems(
markers = list(item.iter_markers())
- # Automatically apply pre_alloc_group marker to slow tests that are not benchmark tests
+ # Automatically apply pre_alloc_group marker to slow tests that are not
+ # benchmark tests
has_slow_marker = any(marker.name == "slow" for marker in markers)
has_benchmark_marker = any(marker.name == "benchmark" for marker in markers)
has_pre_alloc_group_marker = any(marker.name == "pre_alloc_group" for marker in markers)
@@ -1408,8 +1434,8 @@ def pytest_collection_modifyitems(
# Re-collect markers after adding the new one
markers = list(item.iter_markers())
- # Both the fixture format itself and the spec filling it have a chance to veto the
- # filling of a specific format.
+ # Both the fixture format itself and the spec filling it have a chance
+ # to veto the filling of a specific format.
if fixture_format.discard_fixture_format_by_marks(fork, markers):
items_for_removal.append(i)
continue
diff --git a/src/pytest_plugins/filler/fixture_output.py b/src/pytest_plugins/filler/fixture_output.py
index 3238c7d6ec2..c9f421f6ed9 100644
--- a/src/pytest_plugins/filler/fixture_output.py
+++ b/src/pytest_plugins/filler/fixture_output.py
@@ -69,7 +69,9 @@ def pre_alloc_groups_folder_path(self) -> Path:
@property
def should_auto_enable_all_formats(self) -> bool:
- """Check if all formats should be auto-enabled due to tarball output."""
+ """
+ Check if all formats should be auto-enabled due to tarball output.
+ """
return self.is_tarball
@staticmethod
@@ -95,7 +97,8 @@ def is_directory_usable_for_phase(self) -> bool:
# Phase 1: Directory must be completely empty
return self.is_directory_empty()
elif self.use_pre_alloc_groups:
- # Phase 2: Only pre-allocation groups must exist, no other files allowed
+ # Phase 2: Only pre-allocation groups must exist, no other files
+ # allowed
if not self.pre_alloc_groups_folder_path.exists():
return False
# Check that only the pre-allocation group files exist
@@ -149,13 +152,14 @@ def create_directories(self, is_master: bool) -> None:
"""
Create output and metadata directories if needed.
- If clean flag is set, remove and recreate the directory.
- Otherwise, verify the directory is empty before proceeding.
+ If clean flag is set, remove and recreate the directory. Otherwise,
+ verify the directory is empty before proceeding.
"""
if self.is_stdout:
return
- # Only the master process should delete/create directories if using pytest-xdist
+ # Only the master process should delete/create directories if using
+ # pytest-xdist
if not is_master:
return
diff --git a/src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py b/src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py
index 044b2a2e9a1..aae46ab5feb 100644
--- a/src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py
+++ b/src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py
@@ -1,34 +1,34 @@
"""
A pytest plugin that generates test case documentation for use in mkdocs.
-It generates the top-level "Test Case Reference" section in EEST's mkdocs
-site.
+It generates the top-level "Test Case Reference" section in EEST's mkdocs site.
Note:
----
-- No output directory is specified for the generated output; file IO occurs
- via the `mkdocs-gen-files` plugin. `mkdocs serve` writes intermediate files
- to our local `docs/` directory and then copies it to the site directory.
- We modify `docs/navigation.md` and write all other output underneath
- `docs/tests`. If mkdocs is interrupted, these intermediate artifacts are
- left in `docs/`.
+- No output directory is specified for the generated output; file IO
+occurs via the `mkdocs-gen-files` plugin. `mkdocs serve` writes intermediate
+files to our local `docs/` directory and then copies it to the site directory.
+We modify `docs/navigation.md` and write all other output underneath
+`docs/tests`. If mkdocs is interrupted, these intermediate artifacts are left
+in `docs/`.
Usage:
------
!!! note "Ensuring a clean build"
- In case mkdocs has polluted the `docs/` directory with intermediate files, run:
+In case mkdocs has polluted the `docs/` directory with intermediate files, run:
- ```console
- git restore docs/navigation.md # Careful if you have local modifications!
- rm -rf docs/tests docs/docs site
- ```
+```console
+git restore docs/navigation.md # Careful if you have local modifications!
+rm -rf docs/tests docs/docs site
+```
To test doc generation, run the plugin without mkdocs:
```console
-uv run fill -p pytest_plugins.filler.gen_test_doc.gen_test_doc --gen-docs --fork= tests
+uv run fill -p pytest_plugins.filler.gen_test_doc.gen_test_doc --gen-docs \
+ --fork= tests
```
Or to build and view the site:
@@ -128,8 +128,8 @@ def get_test_function_import_path(item: pytest.Item) -> str:
"""
Retrieve the fully qualified import path for an item's test function.
- This is used in jinja2 templates to get the test function, respectively
- the test function's class, documentation with mkdocstrings.
+ This is used in jinja2 templates to get the test function, respectively the
+ test function's class, documentation with mkdocstrings.
"""
item = cast(pytest.Function, item) # help mypy infer type
module_name = item.module.__name__
@@ -150,7 +150,8 @@ def get_import_path(path: Path) -> str:
Get the import path for a given path.
- For modules, strip the file extension.
- - For directories (i.e., packages such as `tests.berlin`), `with_suffix()` is ignored.
+ - For directories (i.e., packages such as `tests.berlin`),
+ `with_suffix()` is ignored.
To do:
------
@@ -273,7 +274,8 @@ def pytest_collection_modifyitems(self, config: pytest.Config, items: List[pytes
self.create_module_page_props()
# add the pages to the page_props dict
self.page_props = {**self.page_props, **self.function_page_props, **self.module_page_props}
- # this adds pages for the intermediate directory structure (tests, tests/berlin)
+ # this adds pages for the intermediate directory structure (tests,
+ # tests/berlin)
self.add_directory_page_props()
# add other interesting pages
self.add_spec_page_props()
@@ -297,7 +299,8 @@ def _setup_logger(self):
Configure the mkdocs logger and adds a StreamHandler if outside mkdocs.
We use the mkdocs logger to report warnings if conditions are invalid -
- this will inform the user and fail the build with `mkdocs build --strict`.
+ this will inform the user and fail the build with `mkdocs build
+ --strict`.
"""
if not logger.hasHandlers() or logger.level == logging.NOTSET:
stream_handler = logging.StreamHandler(sys.stdout)
@@ -309,12 +312,12 @@ def get_doc_site_base_url(self) -> str:
"""
Return site's base in its URL for inclusion of local files.
- This is required in order to include docs/javascripts/site.js, for
+ This is required in order to include docs/javascripts/site.js, for
example, in the standalone html pages.
- Github pages deploys to a sub-directory "execution-spec-tests" and
- mike deploys a version of the site underneath a sub-directory named
- after the version, e.g.:
+ Github pages deploys to a sub-directory "execution-spec-tests" and mike
+ deploys a version of the site underneath a sub-directory named after
+ the version, e.g.:
- https://eest.ethereum.org/main/
- https://eest.ethereum.org/v4.1.0/
@@ -352,7 +355,8 @@ def add_global_page_props_to_env(self):
def create_function_page_props(self, test_functions: Dict["str", List[Item]]) -> None:
"""
- Traverse all test items and create a lookup of doc pages & required props.
+ Traverse all test items and create a lookup of doc pages & required
+ props.
To do: Needs refactor.
"""
@@ -361,17 +365,20 @@ def create_function_page_props(self, test_functions: Dict["str", List[Item]]) ->
]
for function_id, function_items in test_functions.items():
assert all(isinstance(item, pytest.Function) for item in function_items)
- items = cast(List[pytest.Function], function_items) # help mypy infer type
+ # help mypy infer type
+ items = cast(List[pytest.Function], function_items)
# extract parametrized test cases for each test function
test_cases = []
if getattr(items[0], "callspec", None):
for item in items:
param_set = item.callspec.params
- # Don't show skipped parameters as columns in the test case table
+ # Don't show skipped parameters as columns in the test case
+ # table
keys = [key for key in param_set.keys() if key not in skip_params]
values = [param_set[key] for key in keys]
- # TODO: This formatting of bytes objects should be moved elsewhere
+ # TODO: This formatting of bytes objects should be moved
+ # elsewhere
values = [
(
" ".join(
@@ -406,8 +413,8 @@ def create_function_page_props(self, test_functions: Dict["str", List[Item]]) ->
if not valid_from_marker:
valid_from_fork = "Frontier"
else:
- # NOTE: The EOF tests cases contain two fork names in their valid_from marker,
- # separated by a comma. Take the last.
+ # NOTE: The EOF tests cases contain two fork names in their
+ # valid_from marker, separated by a comma. Take the last.
valid_from_fork = valid_from_marker.args[0].split(",")[-1]
target_or_valid_fork = (
@@ -481,7 +488,8 @@ def add_directory_page_props(self) -> None:
"""
Discover the intermediate directory pages and extract their properties.
- These directories may not have any test modules within them, e.g., tests/berlin/.
+ These directories may not have any test modules within them, e.g.,
+ tests/berlin/.
"""
sub_paths: Set[Path] = set()
for module_page in self.module_page_props.values():
@@ -511,10 +519,12 @@ def add_directory_page_props(self) -> None:
path=directory,
pytest_node_id=str(directory),
source_code_url=generate_github_url(directory, branch_or_commit_or_tag=self.ref),
- # TODO: This won't work in all cases; should be from the development fork
- # Currently breaks for `tests/unscheduled/eip7692_eof_v1/index.md` # noqa: SC100
+ # TODO: This won't work in all cases; should be from the
+ # development fork Currently breaks for
+ # `tests/unscheduled/eip7692_eof_v1/index.md`
target_or_valid_fork=fork.capitalize() if fork else "Unknown",
- package_name=get_import_path(directory), # init.py will be used for docstrings
+ # init.py will be used for docstrings
+ package_name=get_import_path(directory),
is_benchmark=is_benchmark,
)
@@ -538,7 +548,10 @@ def find_files_within_collection_scope(self, file_pattern: str) -> List[Path]:
return [Path(file) for file in set(files)]
def add_spec_page_props(self) -> None:
- """Add page path properties for spec files discovered in the collection scope."""
+ """
+ Add page path properties for spec files discovered in the collection
+ scope.
+ """
for spec_path in self.find_files_within_collection_scope("spec.py"):
self.page_props[str(spec_path)] = ModulePageProps(
title="Spec",
@@ -551,44 +564,58 @@ def add_spec_page_props(self) -> None:
)
def add_markdown_page_props(self) -> None:
- """Add page path properties for markdown files discovered in the collection scope."""
+ """
+ Add page path properties for markdown files discovered in the
+ collection scope.
+ """
for md_path in self.find_files_within_collection_scope("*.md"):
self.page_props[str(md_path)] = MarkdownPageProps(
title=md_path.stem,
path=md_path,
source_code_url=generate_github_url(md_path, branch_or_commit_or_tag=self.ref),
- pytest_node_id=str(md_path), # abuse: not a test, but used in source code link
+ # abuse: not a test, but used in source code link
+ pytest_node_id=str(md_path),
target_or_valid_fork="",
package_name="",
)
def update_mkdocs_nav(self) -> None:
- """Add the generated 'Test Case Reference' entries to the mkdocs navigation menu."""
+ """
+ Add the generated 'Test Case Reference' entries to the mkdocs
+ navigation menu.
+ """
fork_order = {fork.name().lower(): i for i, fork in enumerate(reversed(get_forks()))}
def sort_by_fork_deployment_and_path(x: PageProps) -> Tuple[Any, ...]:
"""
- Key function used to sort navigation menu entries for test case ref docs.
+ Key function used to sort navigation menu entries for test case ref
+ docs.
Nav entries / output files contain special cases such as:
- ("Test Case Reference",) -> tests/index.md
- ("Test Case Reference", "Berlin") -> tests/berlin/index.md
- ("Test Case Reference", "EIP-7692 EOF V1", tracker.md")
- tests/unscheduled/eip7692_eof_v1/tracker.md
+ tests/unscheduled/eip7692_eof_v1/tracker.md
- ("Test Case Reference", "Shanghai", "EIP-3855 PUSH0", "Spec") ->
- tests/shanghai/eip3855_push0/spec.py
+ tests/shanghai/eip3855_push0/spec.py
- This function provides and ordering to sort nav men entries as follows:
+ This function provides and ordering to sort nav men entries as
+ follows:
- 1. Forks are listed in the chronological order that they were deployed.
- 2. Special files listed first (before test pages): "*.md" and `Spec.py`,
- 3. The page's corresponding file path under `./tests/`.
+ 1. Forks are listed in the chronological order that they were
+ deployed.
+ 2. Special files listed first (before test pages): "*.md"
+ and `Spec.py`,
+ 3. The page's corresponding file path under
+ `./tests/`.
"""
length = len(x.path.parts)
if length > 1:
- fork = str(x.path.parts[1]).lower() # the fork folder from the relative path
- if fork not in fork_order: # unscheduled features added to the end
+ # the fork folder from the relative path
+ fork = str(x.path.parts[1]).lower()
+ # unscheduled features added to the end
+ if fork not in fork_order:
return (999, str(x.path))
if length == 1:
return (0,)
diff --git a/src/pytest_plugins/filler/gen_test_doc/page_props.py b/src/pytest_plugins/filler/gen_test_doc/page_props.py
index 2c3194ee8cb..471cd1f2d56 100644
--- a/src/pytest_plugins/filler/gen_test_doc/page_props.py
+++ b/src/pytest_plugins/filler/gen_test_doc/page_props.py
@@ -1,10 +1,10 @@
"""
Classes and helpers used for templates, navigation menus and file output.
-The dataclass fields are used to define the page properties fields which
-are used in the jinja2 templates when generating site content (located in
-docs/templates). The classes also define each page's navigation menu entry
-and target output file.
+The dataclass fields are used to define the page properties fields which are
+used in the jinja2 templates when generating site content (located in
+docs/templates). The classes also define each page's navigation menu entry and
+target output file.
A few helpers are defined with EEST logic in order to sanitize strings from
file paths for use in navigation menu.
@@ -23,7 +23,8 @@
def apply_name_filters(input_string: str):
"""
- Apply a list of capitalizations/regexes to names used in titles & nav menus.
+ Apply a list of capitalizations/regexes to names used in titles & nav
+ menus.
Note: As of 2024-10-08, with 634 doc pages, this function constitutes ~2.0s
of the total runtime (~5.5s). This seems to be insignificant with the time
@@ -62,7 +63,9 @@ def apply_name_filters(input_string: str):
def snake_to_capitalize(string: str) -> str: # noqa: D103
- """Convert valid identifiers to a capitalized string, otherwise leave as-is."""
+ """
+ Convert valid identifiers to a capitalized string, otherwise leave as-is.
+ """
if string.isidentifier():
return " ".join(word.capitalize() for word in string.split("_"))
return string
@@ -74,14 +77,17 @@ def sanitize_string_title(string: str) -> str:
def nav_path_to_sanitized_str_tuple(nav_path: Path) -> tuple:
- """Convert a nav path to a tuple of sanitized strings for use in mkdocs navigation."""
+ """
+ Convert a nav path to a tuple of sanitized strings for use in mkdocs
+ navigation.
+ """
return tuple(sanitize_string_title(part) for part in nav_path.parts)
class FileOpener(Protocol):
"""
- Protocol to replace `mkdocs_gen_files` so it doesn't have to be imported/installed for
- unit tests.
+ Protocol to replace `mkdocs_gen_files` so it doesn't have to be
+ imported/installed for unit tests.
"""
def open(self, path: Path, mode: str) -> ContextManager[IO[Any]]:
@@ -94,8 +100,8 @@ class PagePropsBase:
"""
Common test reference doc page properties and definitions.
- The dataclass attributes are made directly available in the jinja2
- found in `docs/templates/*.j2`.
+ The dataclass attributes are made directly available in the jinja2 found in
+ `docs/templates/*.j2`.
"""
title: str
@@ -159,7 +165,10 @@ def write_page(self, file_opener: FileOpener, jinja2_env: Environment):
@dataclass
class TestCase:
- """Properties used to define a single test case in test function parameter tables."""
+ """
+ Properties used to define a single test case in test function parameter
+ tables.
+ """
full_id: str
abbreviated_id: str
@@ -200,10 +209,11 @@ def nav_entry(self, top_level_nav_entry) -> tuple:
def write_page(self, file_opener: FileOpener, jinja2_env: Environment):
"""
- Test functions also get a static HTML page with parametrized test cases.
+ Test functions also get a static HTML page with parametrized test
+ cases.
- This is intended for easier viewing (without mkdocs styling) of the data-table
- that documents the parametrized test cases.
+ This is intended for easier viewing (without mkdocs styling) of the
+ data-table that documents the parametrized test cases.
"""
super().write_page(file_opener, jinja2_env)
if not self.cases:
@@ -218,7 +228,10 @@ def write_page(self, file_opener: FileOpener, jinja2_env: Environment):
@dataclass
class TestFunction:
- """Properties used to build the test function overview table in test module pages."""
+ """
+ Properties used to build the test function overview table in test module
+ pages.
+ """
name: str
test_type: str
@@ -228,7 +241,10 @@ class TestFunction:
@dataclass
class ModulePageProps(PagePropsBase):
- """Definitions used for test modules, e.g., `tests/berlin/eip2930_access_list/test_acl.py`."""
+ """
+ Definitions used for test modules, e.g.,
+ `tests/berlin/eip2930_access_list/test_acl.py`.
+ """
test_functions: List[TestFunction] = field(default_factory=list)
@@ -247,7 +263,10 @@ def target_output_file(self) -> Path:
@dataclass
class DirectoryPageProps(PagePropsBase):
- """Definitions used for parent directories in test paths, e.g., `tests/berlin`."""
+ """
+ Definitions used for parent directories in test paths, e.g.,
+ `tests/berlin`.
+ """
@property
def template(self) -> str:
@@ -262,7 +281,9 @@ def target_output_file(self) -> Path:
@dataclass
class MarkdownPageProps(PagePropsBase):
- """Definitions used to verbatim include markdown files included in test paths."""
+ """
+ Definitions used to verbatim include markdown files included in test paths.
+ """
@property
def template(self) -> str:
diff --git a/src/pytest_plugins/filler/ported_tests.py b/src/pytest_plugins/filler/ported_tests.py
index 228647baf5c..0d7f5475bbb 100644
--- a/src/pytest_plugins/filler/ported_tests.py
+++ b/src/pytest_plugins/filler/ported_tests.py
@@ -1,20 +1,21 @@
"""
A pytest plugin that shows `ported_from` marker information.
-This plugin extracts and displays information from @pytest.mark.ported_from markers,
-showing either the static filler file paths or associated PR URLs.
+This plugin extracts and displays information from @pytest.mark.ported_from
+markers, showing either the static filler file paths or associated PR URLs.
Usage:
------
-# Show static filler file paths
-uv run fill --show-ported-from tests/
+# Show static filler file paths:
+# uv run fill --show-ported-from tests/
-# Show PR URLs instead
-uv run fill --show-ported-from=prs tests/
+# Show PR URLs instead:
+# uv run fill --show-ported-from=prs tests/
The plugin will:
1. Collect all test items with @pytest.mark.ported_from markers
-2. Extract either the file paths (first positional argument) or PR URLs (pr keyword argument)
+2. Extract either the file paths (first positional argument) or PR URLs (pr
+ keyword argument)
3. Output a deduplicated, sorted list, one per line
4. Skip test execution (collection only)
5. Exclude tests with coverage_missed_reason from output
@@ -22,7 +23,8 @@
Marker Format:
--------------
@pytest.mark.ported_from(
- ["path/to/static_filler1.json", "path/to/static_filler2.json"],
+ ["path/to/static_filler1.json",
+ "path/to/static_filler2.json"],
pr=[
"https://github.com/ethereum/execution-spec-tests/pull/1234",
"https://github.com/ethereum/execution-spec-tests/pull/5678",
diff --git a/src/pytest_plugins/filler/pre_alloc.py b/src/pytest_plugins/filler/pre_alloc.py
index 49a9a4594bc..3bb74b3e0f5 100644
--- a/src/pytest_plugins/filler/pre_alloc.py
+++ b/src/pytest_plugins/filler/pre_alloc.py
@@ -147,8 +147,9 @@ def deploy_contract(
"""
Deploy a contract to the allocation.
- Warning: `address` parameter is a temporary solution to allow tests to hard-code the
- contract address. Do NOT use in new tests as it will be removed in the future!
+ Warning: `address` parameter is a temporary solution to allow tests to
+ hard-code the contract address. Do NOT use in new tests as it will be
+ removed in the future!
"""
if storage is None:
storage = {}
@@ -195,10 +196,11 @@ def fund_eoa(
nonce: NumberConvertible | None = None,
) -> EOA:
"""
- Add a previously unused EOA to the pre-alloc with the balance specified by `amount`.
+ Add a previously unused EOA to the pre-alloc with the balance specified
+ by `amount`.
- If amount is 0, nothing will be added to the pre-alloc but a new and unique EOA will be
- returned.
+ If amount is 0, nothing will be added to the pre-alloc but a new and
+ unique EOA will be returned.
"""
eoa = next(self._eoa_iterator)
if amount is None:
@@ -218,13 +220,14 @@ def fund_eoa(
if nonce > 0:
eoa.nonce = nonce
else:
- # Type-4 transaction is sent to the EOA to set the storage, so the nonce must be 1
+ # Type-4 transaction is sent to the EOA to set the storage, so
+ # the nonce must be 1
if not isinstance(delegation, Address) and delegation == "Self":
delegation = eoa
- # If delegation is None but storage is not, realistically the nonce should be 2
- # because the account must have delegated to set the storage and then again to
- # reset the delegation (but can be overridden by the test for a non-realistic
- # scenario)
+ # If delegation is None but storage is not, realistically the
+ # nonce should be 2 because the account must have delegated to
+ # set the storage and then again to reset the delegation (but
+ # can be overridden by the test for a non-realistic scenario)
real_nonce = 2 if delegation is None else 1
nonce = Number(real_nonce if nonce is None else nonce)
account = Account(
@@ -257,7 +260,8 @@ def fund_address(self, address: Address, amount: NumberConvertible):
def empty_account(self) -> Address:
"""
- Add a previously unused account guaranteed to be empty to the pre-alloc.
+ Add a previously unused account guaranteed to be empty to the
+ pre-alloc.
This ensures the account has:
- Zero balance
@@ -265,8 +269,9 @@ def empty_account(self) -> Address:
- No code
- No storage
- This is different from precompiles or system contracts. The function does not
- send any transactions, ensuring that the account remains "empty."
+ This is different from precompiles or system contracts. The function
+ does not send any transactions, ensuring that the account remains
+ "empty."
Returns:
Address: The address of the created empty account.
@@ -314,8 +319,8 @@ def sha256_from_string(s: str) -> int:
if name not in ALL_FIXTURE_FORMAT_NAMES:
ALL_FIXTURE_FORMAT_NAMES.append(name)
-# Sort by length, from longest to shortest, since some fixture format names contain others
-# so we are always sure to catch the longest one first.
+# Sort by length, from longest to shortest, since some fixture format names
+# contain others so we are always sure to catch the longest one first.
ALL_FIXTURE_FORMAT_NAMES.sort(key=len, reverse=True)
@@ -324,17 +329,18 @@ def node_id_for_entropy(request: pytest.FixtureRequest, fork: Fork | None) -> st
"""
Return the node id with the fixture format name and fork name stripped.
- Used in cases where we are filling for pre-alloc groups, and we take the name of the
- test as source of entropy to get a deterministic address when generating the pre-alloc
- grouping.
+ Used in cases where we are filling for pre-alloc groups, and we take the
+ name of the test as source of entropy to get a deterministic address when
+ generating the pre-alloc grouping.
- Removing the fixture format and the fork name from the node id before hashing results in the
- contracts and senders addresses being the same across fixture types and forks for the same
- test.
+ Removing the fixture format and the fork name from the node id before
+ hashing results in the contracts and senders addresses being the same
+ across fixture types and forks for the same test.
"""
node_id: str = request.node.nodeid
if fork is None:
- # FIXME: Static tests don't have a fork, so we need to get it from the node.
+ # FIXME: Static tests don't have a fork, so we need to get it from the
+ # node.
assert hasattr(request.node, "fork")
fork = request.node.fork
for fixture_format_name in ALL_FIXTURE_FORMAT_NAMES:
@@ -358,7 +364,8 @@ def contract_address_iterator(
) -> Iterator[Address]:
"""Return iterator over contract addresses with dynamic scoping."""
if request.config.getoption(
- # TODO: Ideally, we should check the fixture format instead of checking parameters.
+ # TODO: Ideally, we should check the fixture format instead of checking
+ # parameters.
"generate_pre_alloc_groups",
default=False,
) or request.config.getoption("use_pre_alloc_groups", default=False):
@@ -383,7 +390,8 @@ def eoa_iterator(
) -> Iterator[EOA]:
"""Return iterator over EOAs copies with dynamic scoping."""
if request.config.getoption(
- # TODO: Ideally, we should check the fixture format instead of checking parameters.
+ # TODO: Ideally, we should check the fixture format instead of checking
+ # parameters.
"generate_pre_alloc_groups",
default=False,
) or request.config.getoption("use_pre_alloc_groups", default=False):
diff --git a/src/pytest_plugins/filler/static_filler.py b/src/pytest_plugins/filler/static_filler.py
index 4fbe412f096..70feff2023d 100644
--- a/src/pytest_plugins/filler/static_filler.py
+++ b/src/pytest_plugins/filler/static_filler.py
@@ -1,6 +1,6 @@
"""
-Static filler pytest plugin that reads test cases from static files and fills them into test
-fixtures.
+Static filler pytest plugin that reads test cases from static files and fills
+them into test fixtures.
"""
import inspect
@@ -111,7 +111,10 @@ def get_all_combinations_from_parametrize_marks(
def pytest_collect_file(file_path: Path, parent) -> pytest.Collector | None:
- """Pytest hook that collects test cases from static files and fills them into test fixtures."""
+ """
+ Pytest hook that collects test cases from static files and fills them into
+ test fixtures.
+ """
fill_static_tests_enabled = parent.config.getoption("fill_static_tests_enabled")
if not fill_static_tests_enabled:
return None
@@ -147,8 +150,8 @@ class NoIntResolver(yaml.SafeLoader):
class FillerFile(pytest.File):
"""
- Filler file that reads test cases from static files and fills them into test
- fixtures.
+ Filler file that reads test cases from static files and fills them into
+ test fixtures.
"""
def collect(self: "FillerFile") -> Generator["FillerTestItem", None, None]:
@@ -241,7 +244,8 @@ def collect(self: "FillerFile") -> Generator["FillerTestItem", None, None]:
get_all_combinations_from_parametrize_marks(parametrize_marks)
)
for parameter_set in parameter_set_list:
- # Copy and extend the params with the parameter set
+ # Copy and extend the params with the
+ # parameter set
case_marks = (
marks[:]
+ [
@@ -349,10 +353,10 @@ def yul(fork: Fork, request: pytest.FixtureRequest):
"""
Fixture that allows contract code to be defined with Yul code.
- This fixture defines a class that wraps the ::ethereum_test_tools.Yul
- class so that upon instantiation within the test case, it provides the
- test case's current fork parameter. The forks is then available for use
- in solc's arguments for the Yul code compilation.
+ This fixture defines a class that wraps the ::ethereum_test_tools.Yul class
+ so that upon instantiation within the test case, it provides the test
+ case's current fork parameter. The forks is then available for use in
+ solc's arguments for the Yul code compilation.
Test cases can override the default value by specifying a fixed version
with the @pytest.mark.compile_yul_with(FORK) marker.
diff --git a/src/pytest_plugins/filler/tests/conftest.py b/src/pytest_plugins/filler/tests/conftest.py
index 903d6e8019c..570f9e3d0d2 100644
--- a/src/pytest_plugins/filler/tests/conftest.py
+++ b/src/pytest_plugins/filler/tests/conftest.py
@@ -9,10 +9,11 @@
@pytest.fixture(autouse=True)
def monkeypatch_path_for_entry_points(monkeypatch):
"""
- Monkeypatch the PATH to add the "bin" directory where entrypoints are installed.
+ Monkeypatch the PATH to add the "bin" directory where entrypoints are
+ installed.
- This would typically be in the venv in which pytest is running these tests and fill,
- which, with uv, is `./.venv/bin`.
+ This would typically be in the venv in which pytest is running these tests
+ and fill, which, with uv, is `./.venv/bin`.
This is required in order for fill to locate the ethereum-spec-evm-resolver
"binary" (entrypoint) when being executed using pytester.
diff --git a/src/pytest_plugins/filler/tests/test_benchmarking.py b/src/pytest_plugins/filler/tests/test_benchmarking.py
index acceecda6be..42c8f80874e 100644
--- a/src/pytest_plugins/filler/tests/test_benchmarking.py
+++ b/src/pytest_plugins/filler/tests/test_benchmarking.py
@@ -37,12 +37,11 @@ def setup_test_directory_structure(
Set up the common test directory structure used across multiple tests.
Args:
- pytester: The pytest Pytester fixture
- test_content: The content to write to the test file
- test_filename: The name of the test file to create
+ pytester: The pytest Pytester fixture
+ test_content: The content to write to the test file
+ test_filename: The name of the test file to create
- Returns:
- The path to the created test module file
+ Returns: The path to the created test module file
"""
tests_dir = pytester.mkdir("tests")
@@ -71,7 +70,10 @@ def test_gas_benchmark_option_added(pytester: pytest.Pytester):
def test_benchmarking_mode_configured_with_option(pytester: pytest.Pytester):
- """Test that fill_mode is set to BENCHMARKING when --gas-benchmark-values is used."""
+ """
+ Test that fill_mode is set to BENCHMARKING when --gas-benchmark-values is
+ used.
+ """
setup_test_directory_structure(pytester, test_module_dummy, "test_dummy_benchmark.py")
# Test with gas benchmark values
@@ -96,7 +98,10 @@ def test_benchmarking_mode_configured_with_option(pytester: pytest.Pytester):
def test_benchmarking_mode_not_configured_without_option(pytester: pytest.Pytester):
- """Test that fill_mode is not set to BENCHMARKING when --gas-benchmark-values is not used."""
+ """
+ Test that fill_mode is not set to BENCHMARKING when --gas-benchmark-values
+ is not used.
+ """
setup_test_directory_structure(pytester, test_module_dummy, "test_dummy_benchmark.py")
# Test without gas benchmark values
diff --git a/src/pytest_plugins/filler/tests/test_collect_only.py b/src/pytest_plugins/filler/tests/test_collect_only.py
index 8fee4e198f0..b9b09974cbc 100644
--- a/src/pytest_plugins/filler/tests/test_collect_only.py
+++ b/src/pytest_plugins/filler/tests/test_collect_only.py
@@ -51,6 +51,6 @@ def test_collect_only_output(pytester: pytest.Pytester):
in line
for line in result.outlines
), f"Expected test output: {result.outlines}"
- # fill generates 3 test variants: state_test, blockchain_test_from_state_test,
- # blockchain_test_engine_from_state_test
+ # fill generates 3 test variants: state_test,
+ # blockchain_test_from_state_test, blockchain_test_engine_from_state_test
assert any("3 tests collected" in line for line in result.outlines)
diff --git a/src/pytest_plugins/filler/tests/test_format_selector.py b/src/pytest_plugins/filler/tests/test_format_selector.py
index 16c34c5939b..0346bc1ce5a 100644
--- a/src/pytest_plugins/filler/tests/test_format_selector.py
+++ b/src/pytest_plugins/filler/tests/test_format_selector.py
@@ -77,11 +77,14 @@ def test_should_generate_single_phase_pre_alloc_format(self):
},
)
- # Should not generate because it needs pre-alloc but we're in single phase
+ # Should not generate because it needs pre-alloc but we're in single
+ # phase
assert not format_selector.should_generate(format_with_pre_alloc)
def test_should_generate_phase2_with_pre_alloc_format(self):
- """Test phase 2 (after pre-alloc) with format that supports pre-alloc."""
+ """
+ Test phase 2 (after pre-alloc) with format that supports pre-alloc.
+ """
phase_manager = PhaseManager(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
@@ -166,8 +169,12 @@ def test_should_generate_labeled_format(self):
assert format_selector.should_generate(labeled_format)
def test_comprehensive_scenarios(self):
- """Test comprehensive scenarios covering all phase and format combinations."""
- # Test matrix: (current_phase, previous_phases, format_phases, generate_all) -> expected
+ """
+ Test comprehensive scenarios covering all phase and format
+ combinations.
+ """
+ # Test matrix: (current_phase, previous_phases, format_phases,
+ # generate_all) -> expected
test_cases: List[ # type: ignore[annotation-unchecked]
Tuple[
FixtureFillingPhase, Set[FixtureFillingPhase], Set[FixtureFillingPhase], bool, bool
diff --git a/src/pytest_plugins/filler/tests/test_generate_all_formats.py b/src/pytest_plugins/filler/tests/test_generate_all_formats.py
index 8c39d017084..3f7a3543339 100644
--- a/src/pytest_plugins/filler/tests/test_generate_all_formats.py
+++ b/src/pytest_plugins/filler/tests/test_generate_all_formats.py
@@ -4,7 +4,10 @@
def test_fixture_output_with_generate_all_formats():
- """Test that FixtureOutput properly handles the should_generate_all_formats parameter."""
+ """
+ Test that FixtureOutput properly handles the should_generate_all_formats
+ parameter.
+ """
# Test with should_generate_all_formats=True
fixture_output = FixtureOutput(
output_path="/tmp/test",
@@ -20,7 +23,10 @@ def test_fixture_output_with_generate_all_formats():
def test_fixture_output_from_config_includes_generate_all_formats():
- """Test that FixtureOutput.from_config includes the should_generate_all_formats option."""
+ """
+ Test that FixtureOutput.from_config includes the
+ should_generate_all_formats option.
+ """
# Mock pytest config object
class MockConfig:
@@ -43,7 +49,10 @@ def getoption(self, option):
def test_tarball_output_auto_enables_generate_all_formats():
- """Test that tarball output (.tar.gz) automatically enables should_generate_all_formats."""
+ """
+ Test that tarball output (.tar.gz) automatically enables
+ should_generate_all_formats.
+ """
# Mock pytest config object with tarball output
class MockConfig:
@@ -67,7 +76,10 @@ def getoption(self, option):
def test_regular_output_does_not_auto_enable_generate_all_formats():
- """Test that regular directory output doesn't auto-enable should_generate_all_formats."""
+ """
+ Test that regular directory output doesn't auto-enable
+ should_generate_all_formats.
+ """
# Mock pytest config object with regular output
class MockConfig:
@@ -91,7 +103,10 @@ def getoption(self, option):
def test_explicit_generate_all_formats_overrides_tarball_auto_enable():
- """Test that explicitly setting should_generate_all_formats=True works with tarball output."""
+ """
+ Test that explicitly setting should_generate_all_formats=True works with
+ tarball output.
+ """
# Mock pytest config object with tarball output and explicit flag
class MockConfig:
diff --git a/src/pytest_plugins/filler/tests/test_output_directory.py b/src/pytest_plugins/filler/tests/test_output_directory.py
index 4eba64d88f2..eca2cb1015e 100644
--- a/src/pytest_plugins/filler/tests/test_output_directory.py
+++ b/src/pytest_plugins/filler/tests/test_output_directory.py
@@ -21,7 +21,10 @@ def test_function(state_test, pre):
@pytest.fixture
def minimal_test_path(pytester: pytest.Pytester) -> Path:
- """Minimal test file that's written to a file using pytester and ready to fill."""
+ """
+ Minimal test file that's written to a file using pytester and ready to
+ fill.
+ """
tests_dir = pytester.mkdir("tests")
test_file = tests_dir / MINIMAL_TEST_FILE_NAME
test_file.write_text(MINIMAL_TEST_CONTENTS)
@@ -48,7 +51,10 @@ def run_fill(
fill_fork_until: str,
default_t8n: TransitionTool,
):
- """Create a function to run the fill command with various output directory scenarios."""
+ """
+ Create a function to run the fill command with various output directory
+ scenarios.
+ """
def _run_fill(
output_dir: Path,
@@ -56,7 +62,10 @@ def _run_fill(
expect_failure: bool = False,
disable_capture_output: bool = False,
) -> pytest.RunResult:
- """Run the fill command with the specified output directory and clean flag."""
+ """
+ Run the fill command with the specified output directory and clean
+ flag.
+ """
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args = [
"-c",
@@ -141,7 +150,9 @@ def test_fill_to_nonempty_directory_with_clean(tmp_path_factory: TempPathFactory
def test_fill_to_directory_with_meta_fails(tmp_path_factory: TempPathFactory, run_fill):
- """Test filling to a directory with .meta subdirectory fails without --clean."""
+ """
+ Test filling to a directory with .meta subdirectory fails without --clean.
+ """
# Create a directory with .meta
output_dir = tmp_path_factory.mktemp("directory_with_meta")
meta_dir = output_dir / ".meta"
@@ -204,7 +215,9 @@ def test_fill_to_tarball_directory(tmp_path_factory: TempPathFactory, run_fill):
# New tests for the is_master functionality
def test_create_directories_skips_when_not_master():
- """Test that create_directories skips operations when not the master process."""
+ """
+ Test that create_directories skips operations when not the master process.
+ """
fixture_output = FixtureOutput(
output_path=Path("/fake/path"),
clean=True,
@@ -227,7 +240,10 @@ def test_create_directories_skips_when_not_master():
def test_create_directories_operates_when_master():
- """Test that create_directories performs operations when is the master process."""
+ """
+ Test that create_directories performs operations when is the master
+ process.
+ """
fixture_output = FixtureOutput(
output_path=Path("/fake/path"),
clean=True,
@@ -264,7 +280,8 @@ def test_create_directories_checks_empty_when_master():
patch.object(Path, "exists", return_value=True),
patch.object(Path, "mkdir"),
):
- # Call with is_master=True and expect an error about non-empty directory
+ # Call with is_master=True and expect an error about non-empty
+ # directory
with pytest.raises(ValueError, match="not empty"):
fixture_output.create_directories(is_master=True)
@@ -274,7 +291,10 @@ def test_create_directories_checks_empty_when_master():
def test_stdout_skips_directory_operations_regardless_of_master():
- """Test that stdout output skips directory operations regardless of is_master value."""
+ """
+ Test that stdout output skips directory operations regardless of is_master
+ value.
+ """
fixture_output = FixtureOutput(
output_path=Path("stdout"),
clean=True,
diff --git a/src/pytest_plugins/filler/tests/test_phase_manager.py b/src/pytest_plugins/filler/tests/test_phase_manager.py
index e727b641543..6aa6e5fa7b4 100644
--- a/src/pytest_plugins/filler/tests/test_phase_manager.py
+++ b/src/pytest_plugins/filler/tests/test_phase_manager.py
@@ -74,7 +74,7 @@ def test_from_config_use_pre_alloc(self):
assert not phase_manager.is_single_phase_fill
def test_from_config_generate_all_formats(self):
- """Test that generate_all_formats triggers PRE_ALLOC_GENERATION phase."""
+ """Generate_all_formats should trigger PRE_ALLOC_GENERATION phase."""
config = MockConfig(generate_all_formats=True)
phase_manager = PhaseManager.from_config(config)
@@ -94,7 +94,7 @@ def test_from_config_generate_all_and_pre_alloc(self):
assert phase_manager.is_pre_alloc_generation
def test_from_config_use_pre_alloc_with_generate_all(self):
- """Test phase 2 with generate_all_formats (passed by CLI to phase 2)."""
+ """Test phase 2 with generate_all_formats (passed by CLI)."""
config = MockConfig(use_pre_alloc_groups=True, generate_all_formats=True)
phase_manager = PhaseManager.from_config(config)
@@ -104,19 +104,27 @@ def test_from_config_use_pre_alloc_with_generate_all(self):
assert phase_manager.is_fill_after_pre_alloc
def test_all_flag_combinations(self):
- """Test all 8 possible flag combinations to ensure correct phase determination."""
+ """
+ Test all 8 possible flag combinations to ensure correct phase
+ determination.
+ """
test_cases = [
- # (generate_pre_alloc, use_pre_alloc, generate_all) -> (current_phase, has_previous)
- (False, False, False, FixtureFillingPhase.FILL, False), # Normal fill
+ # (generate_pre_alloc, use_pre_alloc, generate_all) ->
+ # (current_phase, has_previous)
+ # Normal fill
+ (False, False, False, FixtureFillingPhase.FILL, False),
# Generate all triggers phase 1
(False, False, True, FixtureFillingPhase.PRE_ALLOC_GENERATION, False),
(False, True, False, FixtureFillingPhase.FILL, True), # Phase 2
- (False, True, True, FixtureFillingPhase.FILL, True), # Phase 2 with generate all
+ # Phase 2 with generate all
+ (False, True, True, FixtureFillingPhase.FILL, True),
(True, False, False, FixtureFillingPhase.PRE_ALLOC_GENERATION, False), # Phase 1
# Phase 1 with generate all
(True, False, True, FixtureFillingPhase.PRE_ALLOC_GENERATION, False),
- (True, True, False, FixtureFillingPhase.FILL, True), # Invalid but use_pre_alloc wins
- (True, True, True, FixtureFillingPhase.FILL, True), # Invalid but use_pre_alloc wins
+ # Invalid but use_pre_alloc wins
+ (True, True, False, FixtureFillingPhase.FILL, True),
+ # Invalid but use_pre_alloc wins
+ (True, True, True, FixtureFillingPhase.FILL, True),
]
for gen_pre, use_pre, gen_all, expected_phase, has_previous in test_cases:
diff --git a/src/pytest_plugins/filler/tests/test_prealloc_group.py b/src/pytest_plugins/filler/tests/test_prealloc_group.py
index 86ccc9c3b61..29e9da7c62b 100644
--- a/src/pytest_plugins/filler/tests/test_prealloc_group.py
+++ b/src/pytest_plugins/filler/tests/test_prealloc_group.py
@@ -412,7 +412,10 @@ def test_pre_alloc_grouping_by_test_type(
test_definitions: List[FormattedTest],
expected_different_pre_alloc_groups: int,
):
- """Test pre-alloc grouping when filling state tests, and the effect of the `state_test.env`."""
+ """
+ Test pre-alloc grouping when filling state tests, and the effect of the
+ `state_test.env`.
+ """
tests_dir = Path(pytester.mkdir("tests"))
for i, test in enumerate(test_definitions):
test_module = tests_dir / f"test_{i}.py"
diff --git a/src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py b/src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py
index 508db25a68f..6ffd67994ae 100644
--- a/src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py
+++ b/src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py
@@ -1,8 +1,8 @@
"""
Example usage of the pre_alloc_group marker.
-This file demonstrates how tests would use the marker in practice.
-Note: This is just documentation, not executable tests.
+This file demonstrates how tests would use the marker in practice. Note: This
+is just documentation, not executable tests.
"""
import pytest
@@ -13,9 +13,12 @@
"separate", reason="Deploys beacon root contract using actual hardcoded deployer address"
)
def test_beacon_root_contract_deployment():
- """Test beacon root contract deployment with the official deployer address."""
- # This test uses the actual beacon root deployer address (e.g., 0x4242...4242)
- # which could conflict with dynamically allocated addresses in other tests
+ """
+ Test beacon root contract deployment with the official deployer address.
+ """
+ # This test uses the actual beacon root deployer address (e.g.,
+ # 0x4242...4242) which could conflict with dynamically allocated addresses
+ # in other tests
pass
@@ -25,8 +28,9 @@ def test_beacon_root_contract_deployment():
)
def test_custom_consolidation_contract():
"""Test that deploys a modified consolidation contract."""
- # This test deploys a consolidation contract with custom bytecode that differs
- # from the standard implementation, requiring isolation from other consolidation tests
+ # This test deploys a consolidation contract with custom bytecode that
+ # differs from the standard implementation, requiring isolation from other
+ # consolidation tests
pass
@@ -36,8 +40,9 @@ def test_custom_consolidation_contract():
)
def test_custom_consolidation_edge_cases():
"""Test edge cases with the custom consolidation contract."""
- # This test can share the pre-allocation with test_custom_consolidation_contract
- # since they both use the same custom contract setup
+ # This test can share the pre-allocation with
+ # test_custom_consolidation_contract since they both use the same custom
+ # contract setup
pass
diff --git a/src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py b/src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py
index c5f3d012c15..67b0d43c5f1 100644
--- a/src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py
+++ b/src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py
@@ -6,7 +6,10 @@
def test_slow_marker_gets_pre_alloc_group(pytester, default_t8n: TransitionTool):
- """Test that slow tests without benchmark marker get pre_alloc_group automatically."""
+ """
+ Test that slow tests without benchmark marker get pre_alloc_group
+ automatically.
+ """
test_module = textwrap.dedent(
"""\
import pytest
@@ -49,7 +52,9 @@ def test_slow_without_benchmark(state_test: StateTestFiller, pre: Alloc):
def test_slow_with_benchmark_no_pre_alloc(pytester, default_t8n: TransitionTool):
- """Test that slow tests WITH benchmark marker do NOT get pre_alloc_group."""
+ """
+ Test that slow tests WITH benchmark marker do NOT get pre_alloc_group.
+ """
test_module = textwrap.dedent(
"""\
import pytest
@@ -92,7 +97,9 @@ def test_slow_with_benchmark(state_test: StateTestFiller, pre: Alloc):
def test_slow_with_existing_pre_alloc_unchanged(pytester, default_t8n: TransitionTool):
- """Test that slow tests with existing pre_alloc_group marker are unchanged."""
+ """
+ Test that slow tests with existing pre_alloc_group marker are unchanged.
+ """
test_module = textwrap.dedent(
"""\
import pytest
@@ -176,7 +183,9 @@ def test_normal_speed(state_test: StateTestFiller, pre: Alloc):
def test_integration_with_fill(pytester, default_t8n: TransitionTool):
- """Integration test using actual fill command to verify marker application."""
+ """
+ Integration test using actual fill command to verify marker application.
+ """
test_module = textwrap.dedent(
"""\
import pytest
@@ -221,10 +230,15 @@ def test_slow_for_integration(state_test: StateTestFiller, pre: Alloc):
"tests/cancun/slow_test_module/",
]
- # The test generates 3 formats (state_test, blockchain_test, blockchain_test_engine)
- # But it also runs on multiple forks (Cancun and Prague), so expect more tests
- # This is fine - the important thing is that they all pass
+ # The test generates 3 formats (state_test, blockchain_test,
+ # blockchain_test_engine).
+
+ # But it also runs on multiple forks (Cancun and
+ # Prague), so expect more tests.
+
+ # This is fine - the important thing is that they all pass.
result = pytester.runpytest(*args)
- # Verify that tests passed (don't care about exact count due to fork variations)
+ # Verify that tests passed (don't care about exact count due to fork
+ # variations)
assert result.ret == 0, "Fill command should succeed"
diff --git a/src/pytest_plugins/filler/tests/test_verify_sync_marker.py b/src/pytest_plugins/filler/tests/test_verify_sync_marker.py
index 2ecc167d621..83e7450dfb8 100644
--- a/src/pytest_plugins/filler/tests/test_verify_sync_marker.py
+++ b/src/pytest_plugins/filler/tests/test_verify_sync_marker.py
@@ -72,9 +72,12 @@ def test_verify_sync_marker(
Test blockchain sync fixture generation with verify_sync marker.
The test module has 3 test functions (4 test cases with parametrization):
- - test_verify_sync_default: generates all formats except sync (no verify_sync marker)
- - test_verify_sync_with_marker: generates all formats including sync (has verify_sync marker)
- - test_verify_sync_with_param_marks: tests parametrized marks with verify_sync (2 cases)
+ - test_verify_sync_default: generates all formats except sync
+ (no verify_sync marker)
+ - test_verify_sync_with_marker: generates all formats including sync
+ (has verify_sync marker)
+ - test_verify_sync_with_param_marks: tests parametrized marks with
+ verify_sync (2 cases)
Each test generates fixture formats:
- BlockchainFixture (always)
@@ -83,13 +86,15 @@ def test_verify_sync_marker(
Expected outcomes:
- 4 test cases total
- - Each generates BlockchainFixture (4) and BlockchainEngineFixture (4) = 8 fixtures
+ - Each generates BlockchainFixture (4) and BlockchainEngineFixture (4) =
+ 8 fixtures
+
- Sync fixtures:
- - test_verify_sync_with_marker: 1 sync fixture ✓
- - test_verify_sync_with_param_marks[no_exception]: 1 sync fixture ✓
- - Total sync fixtures: 2
- - Not generated (due to exception_test marker):
- - test_verify_sync_with_param_marks[with_exception]: sync fixture not generated
+ - test_verify_sync_with_marker: 1 sync fixture ✓
+ - test_verify_sync_with_param_marks[no_exception]: 1 sync fixture ✓
+ - Total sync fixtures: 2 - Not generated (due to exception_test marker):
+ - test_verify_sync_with_param_marks[with_exception]: sync fixture
+ not generated
Final counts:
- Passed: 8 (base fixtures) + 2 (sync fixtures) = 10 passed
diff --git a/src/pytest_plugins/filler/witness.py b/src/pytest_plugins/filler/witness.py
index fc21e3dceeb..fe88ab282c0 100644
--- a/src/pytest_plugins/filler/witness.py
+++ b/src/pytest_plugins/filler/witness.py
@@ -1,8 +1,9 @@
"""
Pytest plugin for witness functionality.
-Provides --witness command-line option that checks for the witness-filler tool in PATH
-and generates execution witness data for blockchain test fixtures when enabled.
+Provides --witness command-line option that checks for the witness-filler tool
+in PATH and generates execution witness data for blockchain test fixtures when
+enabled.
"""
import shutil
@@ -17,7 +18,9 @@
class WitnessFillerResult(EthereumTestRootModel[List[WitnessChunk]]):
- """Model that defines the expected result from the `witness-filler` command."""
+ """
+ Model that defines the expected result from the `witness-filler` command.
+ """
root: List[WitnessChunk]
@@ -26,9 +29,9 @@ class Merge(Paris):
"""
Paris fork that serializes as 'Merge' for witness-filler compatibility.
- IMPORTANT: This class MUST be named 'Merge' (not 'MergeForWitness' or similar)
- because the class name is used directly in Pydantic serialization, and
- witness-filler expects exactly 'Merge' for this fork.
+ IMPORTANT: This class MUST be named 'Merge' (not 'MergeForWitness' or
+ similar) because the class name is used directly in Pydantic serialization,
+ and witness-filler expects exactly 'Merge' for this fork.
"""
pass
@@ -54,7 +57,8 @@ def pytest_configure(config):
"""
Pytest hook called after command line options have been parsed.
- If --witness is enabled, checks that the witness-filler tool is available in PATH.
+ If --witness is enabled, checks that the witness-filler tool is available
+ in PATH.
"""
if config.getoption("witness"):
# Check if witness-filler binary is available in PATH
@@ -75,20 +79,22 @@ def witness_generator(
"""
Provide a witness generator function if --witness is enabled.
- Returns:
- None if witness functionality is disabled.
- Callable that generates witness data for a BlockchainFixture if enabled.
-
+ Returns: None if witness functionality is disabled. Callable that generates
+ witness data for a BlockchainFixture if enabled.
"""
if not request.config.getoption("witness"):
return None
def generate_witness(fixture: BlockchainFixture) -> None:
- """Generate witness data for a blockchain fixture using the witness-filler tool."""
+ """
+ Generate witness data for a blockchain fixture using the witness-filler
+ tool.
+ """
if not isinstance(fixture, BlockchainFixture):
return None
- # Hotfix: witness-filler expects "Merge" but execution-spec-tests uses "Paris"
+ # Hotfix: witness-filler expects "Merge" but execution-spec-tests uses
+ # "Paris"
original_fork = None
if fixture.fork is Paris:
original_fork = fixture.fork
diff --git a/src/pytest_plugins/fix_package_test_path.py b/src/pytest_plugins/fix_package_test_path.py
index a8503deeaa1..c54580ee2f2 100644
--- a/src/pytest_plugins/fix_package_test_path.py
+++ b/src/pytest_plugins/fix_package_test_path.py
@@ -1,6 +1,6 @@
"""
-Pytest plugin to fix the test IDs for all pytest command that use a command-logic test
-file.
+Pytest plugin to fix the test IDs for all pytest command that use a
+command-logic test file.
"""
from typing import List
@@ -9,7 +9,10 @@
def pytest_collection_modifyitems(items: List[pytest.Item]):
- """Modify collected item names to remove the test runner function from the name."""
+ """
+ Modify collected item names to remove the test runner function from the
+ name.
+ """
for item in items:
original_name = item.originalname # type: ignore
remove = f"{original_name}["
diff --git a/src/pytest_plugins/forks/forks.py b/src/pytest_plugins/forks/forks.py
index 6147b981de3..52346f366f9 100644
--- a/src/pytest_plugins/forks/forks.py
+++ b/src/pytest_plugins/forks/forks.py
@@ -87,10 +87,12 @@ def __init__(
Initialize a new fork parametrizer object for a given fork.
Args:
- fork: The fork for which the test cases will be parametrized.
- marks: A list of pytest marks to apply to all the test cases parametrized by the fork.
- fork_covariant_parameters: A list of fork covariant parameters for the test case, for
- unit testing purposes only.
+ fork: The fork for which the test cases will be parametrized.
+ marks: A list of pytest marks to apply to all the test cases
+ parametrized by the fork.
+ fork_covariant_parameters: A list of fork covariant parameters
+ for the test case, for unit testing
+ purposes only.
"""
if marks is None:
@@ -122,7 +124,8 @@ def argnames(self) -> List[str]:
def argvalues(self) -> List[ParameterSet]:
"""Return the parameter values for the test case."""
parameter_set_combinations = itertools.product(
- # Add the values for each parameter, all of them are lists of at least one element.
+ # Add the values for each parameter, all of them are lists of at
+ # least one element.
*[p.values for p in self.fork_covariant_parameters],
)
@@ -148,8 +151,8 @@ def argvalues(self) -> List[ParameterSet]:
class CovariantDescriptor:
"""
- A descriptor for a parameter that is covariant with the fork:
- the parametrized values change depending on the fork.
+ A descriptor for a parameter that is covariant with the fork: the
+ parametrized values change depending on the fork.
"""
argnames: List[str] = []
@@ -175,11 +178,13 @@ def __init__(
Initialize a new covariant descriptor.
Args:
- argnames: The names of the parameters that are covariant with the fork.
- fn: A function that takes the fork as the single parameter and returns the values for
- the parameter for each fork.
- selector: A function that filters the values for the parameter.
- marks: A list of pytest marks to apply to the test cases parametrized by the parameter.
+ argnames: The names of the parameters that are covariant with the
+ fork.
+ fn: A function that takes the fork as the single parameter and
+ returns the values for the parameter for each fork.
+ selector: A function that filters the values for the parameter.
+ marks: A list of pytest marks to apply to the test cases
+ parametrized by the parameter.
"""
self.argnames = (
@@ -226,8 +231,8 @@ def process_values(self, values: Iterable[Any]) -> List[ParameterSet]:
"""
Filter the values for the covariant parameter.
- I.e. if the marker has an argument, the argument is interpreted as a lambda function
- that filters the values.
+ I.e. if the marker has an argument, the argument is interpreted as a
+ lambda function that filters the values.
"""
processed_values: List[ParameterSet] = []
for value in values:
@@ -251,18 +256,21 @@ def add_values(self, fork_parametrizer: ForkParametrizer) -> None:
class CovariantDecorator(CovariantDescriptor):
"""
- A marker used to parametrize a function by a covariant parameter with the values
- returned by a fork method.
+ A marker used to parametrize a function by a covariant parameter with the
+ values returned by a fork method.
- The decorator must be subclassed with the appropriate class variables before initialization.
+ The decorator must be subclassed with the appropriate class variables
+ before initialization.
Attributes:
- marker_name: Name of the marker.
- description: Description of the marker.
- fork_attribute_name: Name of the method to call on the fork to get the values.
- marker_parameter_names: Names of the parameters to be parametrized in the test function.
- indirect: Whether the parameters should be passed through fixtures (indirect
- parametrization).
+ marker_name: Name of the marker.
+ description: Description of the marker.
+ fork_attribute_name: Name of the method to call on the fork to
+ get the values.
+ marker_parameter_names: Names of the parameters to be parametrized
+ in the test function.
+ indirect: Whether the parameters should be passed through fixtures
+ (indirect parametrization).
"""
@@ -276,11 +284,12 @@ def __init__(self, metafunc: Metafunc):
"""
Initialize the covariant decorator.
- The decorator must already be subclassed with the appropriate class variables before
- initialization.
+ The decorator must already be subclassed with the appropriate class
+ variables before initialization.
Args:
- metafunc: The metafunc object that pytest uses when generating tests.
+ metafunc: The metafunc object that pytest uses when generating
+ tests.
"""
self.metafunc = metafunc
@@ -408,7 +417,8 @@ def pytest_configure(config: pytest.Config):
Register the plugin's custom markers and process command-line options.
Custom marker registration:
- https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
+ https://docs.pytest.org/en/7.1.x/how-to/
+ writing_plugins.html# registering-custom-markers
"""
config.addinivalue_line(
"markers",
@@ -565,7 +575,10 @@ def fork(request):
@pytest.fixture(scope="session")
def session_fork(request: pytest.FixtureRequest) -> Fork | None:
- """Session-wide fork object used if the plugin is configured in single-fork mode."""
+ """
+ Session-wide fork object used if the plugin is configured in single-fork
+ mode.
+ """
if hasattr(request.config, "single_fork_mode") and request.config.single_fork_mode:
return list(request.config.selected_fork_set)[0] # type: ignore
raise AssertionError(
@@ -584,12 +597,14 @@ class ValidityMarker(ABC):
Subclassing this class allows for the creation of new validity markers.
- Instantiation must be done per test function, and the `process` method must be called to
- process the fork arguments.
+ Instantiation must be done per test function, and the `process` method must
+ be called to process the fork arguments.
When subclassing, the following optional parameters can be set:
- - marker_name: Name of the marker, if not set, the class name is converted to underscore.
- - mutually_exclusive: List of other marker types incompatible with this one.
+ - marker_name: Name of the marker, if not set, the class name is
+ converted to underscore.
+ - mutually_exclusive: List of other marker types incompatible
+ with this one.
- flag: Whether the marker is a flag and should always be included.
"""
@@ -609,7 +624,8 @@ def __init_subclass__(
"""Register the validity marker subclass."""
super().__init_subclass__(**kwargs)
if marker_name is None:
- # Use the class name converted to underscore: https://stackoverflow.com/a/1176023
+ # Use the class name converted to underscore:
+ # https://stackoverflow.com/a/1176023
marker_name = MARKER_NAME_REGEX.sub("_", cls.__name__).lower()
cls.marker_name = marker_name
cls.mutually_exclusive = mutually_exclusive if mutually_exclusive else []
@@ -661,14 +677,18 @@ def get_all_validity_markers(markers: Iterator[pytest.Mark]) -> List["ValidityMa
@staticmethod
def get_test_fork_set(validity_markers: List["ValidityMarker"]) -> Set[Fork]:
- """Get the set of forks where a test is valid from the validity markers and filters."""
+ """
+ Get the set of forks where a test is valid from the validity markers
+ and filters.
+ """
if not len(
[validity_marker for validity_marker in validity_markers if not validity_marker.flag]
):
# Limit to non-transition forks if no validity markers were applied
test_fork_set = set(ALL_FORKS)
else:
- # Start with all forks and transitions if any validity markers were applied
+ # Start with all forks and transitions if any validity markers were
+ # applied
test_fork_set = set(ALL_FORKS_WITH_TRANSITIONS)
for v in validity_markers:
@@ -679,14 +699,20 @@ def get_test_fork_set(validity_markers: List["ValidityMarker"]) -> Set[Fork]:
@staticmethod
def get_test_fork_set_from_markers(markers: Iterator[pytest.Mark]) -> Set[Fork]:
- """Get the set of forks where a test is valid using the markers applied to the test."""
+ """
+ Get the set of forks where a test is valid using the markers applied to
+ the test.
+ """
return ValidityMarker.get_test_fork_set(ValidityMarker.get_all_validity_markers(markers))
@staticmethod
def get_test_fork_set_from_metafunc(
metafunc: Metafunc,
) -> Set[Fork]:
- """Get the set of forks where a test is valid using its pytest meta-function."""
+ """
+ Get the set of forks where a test is valid using its pytest
+ meta-function.
+ """
return ValidityMarker.get_test_fork_set_from_markers(metafunc.definition.iter_markers())
@staticmethod
@@ -711,16 +737,17 @@ def _process_with_marker_args(self, *args, **kwargs) -> Set[Fork]:
Method must be implemented by the subclass.
- If the validity marker is of flag type, the returned forks will be subtracted from the
- fork set, otherwise the returned forks will be intersected with the current set.
+ If the validity marker is of flag type, the returned forks will be
+ subtracted from the fork set, otherwise the returned forks will be
+ intersected with the current set.
"""
pass
class ValidFrom(ValidityMarker):
"""
- Marker used to specify the fork from which the test is valid. The test will not be filled for
- forks before the specified fork.
+ Marker used to specify the fork from which the test is valid. The test will
+ not be filled for forks before the specified fork.
```python
import pytest
@@ -735,8 +762,8 @@ def test_something_only_valid_after_london(
pass
```
- In this example, the test will only be filled for the London fork and after, e.g. London,
- Paris, Shanghai, Cancun, etc.
+ In this example, the test will only be filled for the London fork and
+ after, e.g. London, Paris, Shanghai, Cancun, etc.
"""
def _process_with_marker_args(self, *fork_args) -> Set[Fork]:
@@ -750,8 +777,8 @@ def _process_with_marker_args(self, *fork_args) -> Set[Fork]:
class ValidUntil(ValidityMarker):
"""
- Marker to specify the fork until which the test is valid. The test will not be filled for
- forks after the specified fork.
+ Marker to specify the fork until which the test is valid. The test will not
+ be filled for forks after the specified fork.
```python
import pytest
@@ -766,8 +793,8 @@ def test_something_only_valid_until_london(
pass
```
- In this example, the test will only be filled for the London fork and before, e.g. London,
- Berlin, Istanbul, etc.
+ In this example, the test will only be filled for the London fork and
+ before, e.g. London, Berlin, Istanbul, etc.
"""
def _process_with_marker_args(self, *fork_args) -> Set[Fork]:
@@ -796,7 +823,8 @@ def test_something_only_valid_at_london_and_cancun(
pass
```
- In this example, the test will only be filled for the London and Cancun forks.
+ In this example, the test will only be filled for the London and Cancun
+ forks.
"""
def _process_with_marker_args(self, *fork_args) -> Set[Fork]:
@@ -806,11 +834,12 @@ def _process_with_marker_args(self, *fork_args) -> Set[Fork]:
class ValidAtTransitionTo(ValidityMarker, mutually_exclusive=[ValidAt, ValidFrom, ValidUntil]):
"""
- Marker to specify that a test is only meant to be filled at the transition to the specified
- fork.
+ Marker to specify that a test is only meant to be filled at the transition
+ to the specified fork.
- The test usually starts at the fork prior to the specified fork at genesis and at block 5 (for
- pre-merge forks) or at timestamp 15,000 (for post-merge forks) the fork transition occurs.
+ The test usually starts at the fork prior to the specified fork at genesis
+ and at block 5 (for pre-merge forks) or at timestamp 15,000 (for post-merge
+ forks) the fork transition occurs.
```python
import pytest
@@ -825,36 +854,40 @@ def test_something_that_happens_during_the_fork_transition_to_london(
pass
```
- In this example, the test will only be filled for the fork that transitions to London at block
- number 5, `BerlinToLondonAt5`, and no other forks.
+ In this example, the test will only be filled for the fork that transitions
+ to London at block number 5, `BerlinToLondonAt5`, and no other forks.
- To see or add a new transition fork, see the `ethereum_test_forks.forks.transition` module.
+ To see or add a new transition fork, see the
+ `ethereum_test_forks.forks.transition` module.
- Note that the test uses a `BlockchainTestFiller` fixture instead of a `StateTestFiller`,
- as the transition forks are used to test changes throughout the blockchain progression, and
- not just the state change of a single transaction.
+ Note that the test uses a `BlockchainTestFiller` fixture instead of a
+ `StateTestFiller`, as the transition forks are used to test changes
+ throughout the blockchain progression, and not just the state change of a
+ single transaction.
This marker also accepts the following keyword arguments:
- - `subsequent_transitions`: Force the test to also fill for subsequent fork transitions.
- - `until`: Implies `subsequent_transitions` and puts a limit on which transition fork will the
- test filling will be limited to.
+ - `subsequent_transitions`: Force the test to also fill for subsequent fork
+ transitions.
+ - `until`: Implies `subsequent_transitions` and puts a limit
+ on which transition fork will the test filling will be limited to.
For example:
```python
@pytest.mark.valid_at_transition_to("Cancun", subsequent_transitions=True)
```
- produces tests on `ShanghaiToCancunAtTime15k` and `CancunToPragueAtTime15k`, and any transition
- fork after that.
+ produces tests on `ShanghaiToCancunAtTime15k` and
+ `CancunToPragueAtTime15k`, and any transition fork after that.
And:
```python
- @pytest.mark.valid_at_transition_to("Cancun", subsequent_transitions=True, until="Prague")
+ @pytest.mark.valid_at_transition_to("Cancun",
+ subsequent_transitions=True, until="Prague")
```
- produces tests on `ShanghaiToCancunAtTime15k` and `CancunToPragueAtTime15k`, but no forks after
- Prague.
+ produces tests on `ShanghaiToCancunAtTime15k` and
+ `CancunToPragueAtTime15k`, but no forks after Prague.
"""
def _process_with_marker_args(
@@ -984,7 +1017,10 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
def add_fork_covariant_parameters(
metafunc: Metafunc, fork_parametrizers: List[ForkParametrizer]
) -> None:
- """Iterate over the fork covariant descriptors and add their values to the test function."""
+ """
+ Iterate over the fork covariant descriptors and add their values to the
+ test function.
+ """
# Process all covariant decorators uniformly
for covariant_descriptor in fork_covariant_decorators:
if list(metafunc.definition.iter_markers(covariant_descriptor.marker_name)):
diff --git a/src/pytest_plugins/forks/tests/test_bad_command_line_options.py b/src/pytest_plugins/forks/tests/test_bad_command_line_options.py
index 3799dbb570a..5d45dd2427b 100644
--- a/src/pytest_plugins/forks/tests/test_bad_command_line_options.py
+++ b/src/pytest_plugins/forks/tests/test_bad_command_line_options.py
@@ -1,6 +1,5 @@
"""
-Test that the correct error is produced if bad/invalid command-line
-arguments are used.
+Test the correct error is produced with bad/invalid command-line arguments.
"""
import pytest
@@ -59,11 +58,11 @@
def test_bad_options(pytester, options, error_string):
"""
Test that a test with an invalid command-line options:
- - Creates an outcome with exactly one error.
- - Triggers the expected error string in pytest's console output.
+ - Creates an outcome with exactly one error.
+ - Triggers the expected error string in pytest's console output.
- Each invalid marker/marker combination is tested with one test in its own test
- session.
+ Each invalid marker/marker combination is tested with one test in its own
+ test session.
"""
pytester.makepyfile(
"""
diff --git a/src/pytest_plugins/forks/tests/test_bad_validity_markers.py b/src/pytest_plugins/forks/tests/test_bad_validity_markers.py
index 7aea95220c2..1e33c30aedd 100644
--- a/src/pytest_plugins/forks/tests/test_bad_validity_markers.py
+++ b/src/pytest_plugins/forks/tests/test_bad_validity_markers.py
@@ -1,4 +1,6 @@
-"""Test that the correct error is produced if bad/invalid validity markers are specified."""
+"""
+Test the correct error is produced with bad/invalid validity markers.
+"""
import pytest
@@ -214,11 +216,11 @@ def test_case(state_test):
def test_invalid_validity_markers(pytester, error_string, test_function):
"""
Test that a test with an invalid marker cases:
- - Creates an outcome with exactly one error.
- - Triggers the expected error string in pytest's console output.
+ - Creates an outcome with exactly one error.
+ - Triggers the expected error string in pytest's console output.
- Each invalid marker/marker combination is tested with one test in its own test
- session.
+ Each invalid marker/marker combination is tested with one test in its own
+ test session.
"""
pytester.makepyfile(test_function)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
diff --git a/src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py b/src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py
index e655b46605a..94d6eb5eb1a 100644
--- a/src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py
+++ b/src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py
@@ -188,7 +188,9 @@ def test_fork_parametrizer(
expected_names: List[str],
expected_parameter_sets: List[ParameterSet],
):
- """Test that the fork parametrizer correctly parametrizes tests based on the fork name."""
+ """
+ Test the fork parametrizer correctly parametrizes using the fork name.
+ """
argnames, values = parameters_from_fork_parametrizer_list(fork_parametrizers)
assert argnames == expected_names
assert len(values) == len(expected_parameter_sets)
diff --git a/src/pytest_plugins/help/__init__.py b/src/pytest_plugins/help/__init__.py
index c1a6507e991..2e861b4d85e 100644
--- a/src/pytest_plugins/help/__init__.py
+++ b/src/pytest_plugins/help/__init__.py
@@ -1 +1,3 @@
-"""Pytest plugin that prints help defined in other execution-spec-tests plugins."""
+"""
+Pytest plugin that prints help defined in other execution-spec-tests plugins.
+"""
diff --git a/src/pytest_plugins/help/help.py b/src/pytest_plugins/help/help.py
index b70c0357a55..808584e3149 100644
--- a/src/pytest_plugins/help/help.py
+++ b/src/pytest_plugins/help/help.py
@@ -65,7 +65,9 @@ def pytest_addoption(parser):
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
- """Handle specific help flags by displaying the corresponding help message."""
+ """
+ Handle specific help flags by displaying the corresponding help message.
+ """
if config.getoption("show_check_eip_versions_help"):
show_specific_help(
config,
@@ -143,7 +145,10 @@ def pytest_configure(config):
def show_specific_help(config, expected_ini, substrings):
- """Print help options filtered by specific substrings from the given configuration."""
+ """
+ Print help options filtered by specific substrings from the given
+ configuration.
+ """
pytest_ini = Path(config.inifile)
if pytest_ini.name != expected_ini:
raise ValueError(
diff --git a/src/pytest_plugins/help/tests/test_help.py b/src/pytest_plugins/help/tests/test_help.py
index 9b7b45ae6c8..3ed648720b2 100644
--- a/src/pytest_plugins/help/tests/test_help.py
+++ b/src/pytest_plugins/help/tests/test_help.py
@@ -18,8 +18,8 @@
@pytest.mark.parametrize("help_flag", ["--fill-help"])
def test_local_arguments_present_in_fill_help(pytester, help_flag):
"""
- Test that locally defined command-line flags appear in the help if
- our custom help flag is used.
+ Test that locally defined command-line flags appear in the help if our
+ custom help flag is used.
"""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini", help_flag)
@@ -43,7 +43,10 @@ def test_local_arguments_present_in_fill_help(pytester, help_flag):
],
)
def test_local_arguments_present_in_base_consume_help(pytester, help_flag, command):
- """Test that locally defined command-line flags appear in the help for consume subcommands."""
+ """
+ Test that locally defined command-line flags appear in the help for consume
+ subcommands.
+ """
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-consume.ini")
result = pytester.runpytest("-c", "pytest-consume.ini", command, help_flag)
for test_arg in CONSUME_TEST_ARGS:
diff --git a/src/pytest_plugins/pytest_hive/pytest_hive.py b/src/pytest_plugins/pytest_hive/pytest_hive.py
index 34dfd788095..b1624391ef4 100644
--- a/src/pytest_plugins/pytest_hive/pytest_hive.py
+++ b/src/pytest_plugins/pytest_hive/pytest_hive.py
@@ -10,23 +10,28 @@
Log Capture Architecture:
-------------------------
-This module implements a log capture approach that ensures all logs, including those
-generated during fixture teardown, are properly captured and included in the test results.
-
-The key insight is that we need to ensure that test finalization happens *before* the
-test suite is finalized, but *after* all fixtures have been torn down so we can capture
-their logs. This is accomplished through the fixture teardown mechanism in pytest:
-
-1. Since the `hive_test` fixture depends on the `test_suite` fixture, pytest guarantees
- that the teardown of `hive_test` runs before the teardown of `test_suite`
-2. All logs are processed and the test is finalized in the teardown phase of the
- `hive_test` fixture using the pytest test report data
-3. This sequencing ensures that all logs are captured and the test is properly finalized
- before its parent test suite is finalized
-
-This approach relies on the pytest fixture dependency graph and teardown ordering to
-ensure proper sequencing, which is more reliable than using hooks which might run in
-an unpredictable order relative to fixture teardown.
+This module implements a log capture approach that ensures all logs,
+including those generated during fixture teardown, are properly
+captured and included in the test results.
+
+The key insight is that we need to ensure that test finalization happens
+*before* the test suite is finalized, but *after* all fixtures have been torn
+down so we can capture their logs. This is accomplished through the fixture
+teardown mechanism in pytest:
+
+1. Since the `hive_test` fixture depends on the `test_suite` fixture, pytest
+guarantees that the teardown of `hive_test` runs before the teardown of
+`test_suite`
+
+2. All logs are processed and the test is finalized in the
+teardown phase of the `hive_test` fixture using the pytest test report data
+
+3. This sequencing ensures that all logs are captured and the test is properly
+finalized before its parent test suite is finalized
+
+This approach relies on the pytest fixture dependency graph and teardown
+ordering to ensure proper sequencing, which is more reliable than using hooks
+which might run in an unpredictable order relative to fixture teardown.
"""
import json
@@ -59,8 +64,8 @@ def pytest_configure(config): # noqa: D103
"or in fish:\n"
"set -x HIVE_SIMULATOR http://127.0.0.1:3000"
)
- # TODO: Try and get these into fixtures; this is only here due to the "dynamic" parametrization
- # of client_type with hive_execution_clients.
+ # TODO: Try and get these into fixtures; this is only here due to the
+ # "dynamic" parametrization of client_type with hive_execution_clients.
config.hive_simulator_url = hive_simulator_url
config.hive_simulator = Simulation(url=hive_simulator_url)
try:
@@ -133,8 +138,8 @@ def pytest_report_header(config, start_path):
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""
- Make the setup, call, and teardown results available in the teardown phase of
- a test fixture (i.e., after yield has been called).
+ Make the setup, call, and teardown results available in the teardown phase
+ of a test fixture (i.e., after yield has been called).
This is used to get the test result and pass it to the hive test suite.
@@ -238,10 +243,11 @@ def hive_test(request, test_suite: HiveTestSuite):
"""
Propagate the pytest test case and its result to the hive server.
- This fixture handles both starting the test and ending it with all logs, including
- those generated during teardown of other fixtures. The approach of processing teardown
- logs directly in the teardown phase of this fixture ensures that the test gets properly
- finalized before the test suite is torn down.
+ This fixture handles both starting the test and ending it with all logs,
+ including those generated during teardown of other fixtures. The approach
+ of processing teardown logs directly in the teardown phase of this fixture
+ ensures that the test gets properly finalized before the test suite is torn
+ down.
"""
try:
test_case_description = request.getfixturevalue("test_case_description")
diff --git a/src/pytest_plugins/shared/benchmarking.py b/src/pytest_plugins/shared/benchmarking.py
index 6f198776655..c5688476b39 100644
--- a/src/pytest_plugins/shared/benchmarking.py
+++ b/src/pytest_plugins/shared/benchmarking.py
@@ -55,7 +55,10 @@ def gas_benchmark_value(request: pytest.FixtureRequest) -> int:
@pytest.fixture
def genesis_environment(request: pytest.FixtureRequest) -> Environment: # noqa: D103
- """Return an Environment instance with appropriate gas limit based on test type."""
+ """
+ Return an Environment instance with appropriate gas limit based on test
+ type.
+ """
if request.node.get_closest_marker("benchmark") is not None:
return Environment(gas_limit=BENCHMARKING_MAX_GAS)
return Environment()
@@ -63,7 +66,10 @@ def genesis_environment(request: pytest.FixtureRequest) -> Environment: # noqa:
@pytest.fixture
def env(request: pytest.FixtureRequest) -> Environment: # noqa: D103
- """Return an Environment instance with appropriate gas limit based on test type."""
+ """
+ Return an Environment instance with appropriate gas limit based on test
+ type.
+ """
if request.node.get_closest_marker("benchmark") is not None:
return Environment(gas_limit=BENCHMARKING_MAX_GAS)
return Environment()
diff --git a/src/pytest_plugins/shared/execute_fill.py b/src/pytest_plugins/shared/execute_fill.py
index 21b9b7ea5ba..0df88c2ef38 100644
--- a/src/pytest_plugins/shared/execute_fill.py
+++ b/src/pytest_plugins/shared/execute_fill.py
@@ -1,4 +1,6 @@
-"""Shared pytest fixtures and hooks for EEST generation modes (fill and execute)."""
+"""
+Shared pytest fixtures and hooks for EEST generation modes (fill and execute).
+"""
from typing import List
@@ -18,11 +20,12 @@
"env",
}
"""
-List of test parameters that have a default fixture value which can be retrieved and used
-for the test instance if it was not explicitly specified when calling from the test
-function.
+List of test parameters that have a default fixture value which can be
+retrieved and used for the test instance if it was not explicitly specified
+when calling from the test function.
-All parameter names included in this list must define a fixture in one of the plugins.
+All parameter names included in this list must define a fixture in one of the
+plugins.
"""
@@ -35,12 +38,12 @@ def pytest_configure(config: pytest.Config):
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
- Custom marker registration:
- https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
+ Custom marker registration:
+ https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
- called before the pytest-html plugin's pytest_configure to ensure that
- it uses the modified `htmlpath` option.
+ called before the pytest-html plugin's pytest_configure to ensure that
+ it uses the modified `htmlpath` option.
"""
if config.pluginmanager.has_plugin("pytest_plugins.filler.filler"):
for fixture_format in BaseFixture.formats.values():
@@ -154,7 +157,10 @@ def pytest_configure(config: pytest.Config):
@pytest.fixture(scope="function")
def test_case_description(request: pytest.FixtureRequest) -> str:
- """Fixture to extract and combine docstrings from the test class and the test function."""
+ """
+ Fixture to extract and combine docstrings from the test class and the test
+ function.
+ """
description_unavailable = (
"No description available - add a docstring to the python test class or function."
)
@@ -172,8 +178,8 @@ def test_case_description(request: pytest.FixtureRequest) -> str:
def pytest_make_parametrize_id(config: pytest.Config, val: str, argname: str):
"""
- Pytest hook called when generating test ids. We use this to generate
- more readable test ids for the generated tests.
+ Pytest hook called when generating test ids. We use this to generate more
+ readable test ids for the generated tests.
"""
del config
return f"{argname}_{val}"
diff --git a/src/pytest_plugins/shared/helpers.py b/src/pytest_plugins/shared/helpers.py
index bbe720b652f..3c55d59376f 100644
--- a/src/pytest_plugins/shared/helpers.py
+++ b/src/pytest_plugins/shared/helpers.py
@@ -30,11 +30,11 @@ def labeled_format_parameter_set(
| FixtureFormat,
) -> ParameterSet:
"""
- Return a parameter set from a fixture/execute format and parses a label if there's
- any.
+ Return a parameter set from a fixture/execute format and parses a label if
+ there's any.
- The label will be used in the test id and also will be added as a marker to the
- generated test case when filling/executing the test.
+ The label will be used in the test id and also will be added as a marker to
+ the generated test case when filling/executing the test.
"""
if isinstance(format_with_or_without_label, LabeledExecuteFormat) or isinstance(
format_with_or_without_label, LabeledFixtureFormat
diff --git a/src/pytest_plugins/shared/transaction_fixtures.py b/src/pytest_plugins/shared/transaction_fixtures.py
index be3b16a6d6d..cfe0cd6ef52 100644
--- a/src/pytest_plugins/shared/transaction_fixtures.py
+++ b/src/pytest_plugins/shared/transaction_fixtures.py
@@ -125,10 +125,12 @@ def type_4_default_transaction(sender, pre):
@pytest.fixture
def typed_transaction(request, fork):
"""
- Fixture that provides a Transaction object based on the parametrized tx type.
+ Fixture that provides a Transaction object based on the parametrized tx
+ type.
- This fixture works with the @pytest.mark.with_all_typed_transactions marker,
- which parametrizes the test with all transaction types supported by the fork.
+ This fixture works with the @pytest.mark.with_all_typed_transactions
+ marker, which parametrizes the test with all transaction types supported by
+ the fork.
The actual transaction type value comes from the marker's parametrization.
"""
diff --git a/src/pytest_plugins/solc/solc.py b/src/pytest_plugins/solc/solc.py
index e46d195478f..692c5237c9a 100644
--- a/src/pytest_plugins/solc/solc.py
+++ b/src/pytest_plugins/solc/solc.py
@@ -85,7 +85,8 @@ def pytest_configure(config: pytest.Config):
# Extract version number
try:
- # --version format is typically something like "0.8.24+commit.e11b9ed9.Linux.g++"
+ # --version format is typically something like
+ # "0.8.24+commit.e11b9ed9.Linux.g++"
version_str = version_line.split()[1].split("+")[0]
solc_version_semver = Version.parse(version_str)
except (IndexError, ValueError) as e:
diff --git a/src/pytest_plugins/spec_version_checker/spec_version_checker.py b/src/pytest_plugins/spec_version_checker/spec_version_checker.py
index 4091e88229e..b5fb41c93d7 100644
--- a/src/pytest_plugins/spec_version_checker/spec_version_checker.py
+++ b/src/pytest_plugins/spec_version_checker/spec_version_checker.py
@@ -70,18 +70,17 @@ def get_ref_spec_from_module(
Return the reference spec object defined in a module.
Args:
- module: The module to extract reference spec from
- github_token: Optional GitHub token for API authentication
+ module: The module to extract reference spec from
+ github_token: Optional GitHub token for API authentication
Raises:
- Exception: If the module path contains "eip" and the module
- does not define a reference spec.
+ Exception: If the module path contains "eip" and the module does
+ not define a reference spec.
Returns:
- spec_obj: Return None if the module path does not contain "eip",
- i.e., the module is not required to define a reference spec,
- otherwise, return the ReferenceSpec object as defined by the
- module.
+ spec_obj: Return None if the module path does not contain "eip",
+ i.e., the module is not required to define a reference spec, otherwise,
+ return the ReferenceSpec object as defined by the module.
"""
if not is_test_for_an_eip(str(module.__file__)):
@@ -115,13 +114,12 @@ def is_test_for_an_eip(input_string: str) -> bool:
def test_eip_spec_version(module: ModuleType, github_token: Optional[str] = None):
"""
- Test that the ReferenceSpec object as defined in the test module
- is not outdated when compared to the remote hash from
- ethereum/EIPs.
+ Test that the ReferenceSpec object as defined in the test module is not
+ outdated when compared to the remote hash from ethereum/EIPs.
Args:
- module: Module to test
- github_token: Optional GitHub token for API authentication
+ module: Module to test
+ github_token: Optional GitHub token for API authentication
"""
ref_spec = get_ref_spec_from_module(module, github_token=github_token)
@@ -157,9 +155,9 @@ def __init__(self, name: str, parent: Node, **kwargs: Any):
Initialize the test item.
Args:
- name: Name of the test
- parent: Parent node
- **kwargs: Additional keyword arguments
+ name: Name of the test
+ parent: Parent node
+ **kwargs: Additional keyword arguments
"""
super().__init__(name, parent)
@@ -202,12 +200,11 @@ def reportinfo(self) -> tuple[str, int, str]:
return "spec_version_checker", 0, f"{self.name}"
-def pytest_collection_modifyitems(
- session: pytest.Session, config: pytest.Config, items: List[Item]
-):
- """Insert a new test EIPSpecTestItem for every test module with 'eip' in its path."""
- del session
-
+def pytest_collection_modifyitems(config: pytest.Config, items: List[Item]):
+ """
+ Insert a new test EIPSpecTestItem for every test module with 'eip' in its
+ path.
+ """
github_token = config.github_token if hasattr(config, "github_token") else None
modules: Set[Module] = {item.parent for item in items if isinstance(item.parent, Module)}
diff --git a/tests/amsterdam/__init__.py b/tests/amsterdam/__init__.py
index b4d638e152d..89848292b52 100644
--- a/tests/amsterdam/__init__.py
+++ b/tests/amsterdam/__init__.py
@@ -1 +1,4 @@
-"""Test cases for EVM functionality introduced in Amsterdam, [EIP-7773: Hardfork Meta - Glamsterdam](https://eip.directory/eips/eip-7773).""" # noqa: E501
+"""
+Test cases for EVM functionality introduced in Amsterdam, [EIP-7773: Hardfork
+Meta - Glamsterdam](https://eip.directory/eips/eip-7773).
+"""
diff --git a/tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py b/tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py
index 7041a3bbcfc..34ae52f9036 100644
--- a/tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py
+++ b/tests/amsterdam/eip7928_block_level_access_lists/test_block_access_lists_invalid.py
@@ -1,7 +1,7 @@
"""
Test cases for invalid Block Access Lists.
-These tests verify that clients properly reject blocks with corrupted BALs
+These tests verify that clients properly reject blocks with corrupted BALs.
"""
import pytest
@@ -52,7 +52,10 @@ def test_bal_invalid_missing_nonce(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL is missing required nonce changes."""
+ """
+ Test that clients reject blocks where BAL is missing required nonce
+ changes.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -91,7 +94,9 @@ def test_bal_invalid_nonce_value(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL contains incorrect nonce value."""
+ """
+ Test that clients reject blocks where BAL contains incorrect nonce value.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -130,7 +135,10 @@ def test_bal_invalid_storage_value(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL contains incorrect storage values."""
+ """
+ Test that clients reject blocks where BAL contains incorrect storage
+ values.
+ """
sender = pre.fund_eoa(amount=10**18)
# Simple storage contract with canary values
@@ -190,7 +198,10 @@ def test_bal_invalid_tx_order(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL has incorrect transaction ordering."""
+ """
+ Test that clients reject blocks where BAL has incorrect transaction
+ ordering.
+ """
sender1 = pre.fund_eoa(amount=10**18)
sender2 = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -247,7 +258,10 @@ def test_bal_invalid_account(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL contains accounts that don't exist."""
+ """
+ Test that clients reject blocks where BAL contains accounts that don't
+ exist.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
phantom = pre.fund_eoa(amount=0)
@@ -295,7 +309,10 @@ def test_bal_invalid_duplicate_account(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL contains duplicate account entries."""
+ """
+ Test that clients reject blocks where BAL contains duplicate account
+ entries.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -337,7 +354,9 @@ def test_bal_invalid_account_order(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL has incorrect account ordering."""
+ """
+ Test that clients reject blocks where BAL has incorrect account ordering.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -454,7 +473,9 @@ def test_bal_invalid_missing_account(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL is missing an entire account."""
+ """
+ Test that clients reject blocks where BAL is missing an entire account.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
@@ -496,7 +517,9 @@ def test_bal_invalid_balance_value(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test that clients reject blocks where BAL contains incorrect balance value."""
+ """
+ Test that clients reject blocks where BAL contains incorrect balance value.
+ """
sender = pre.fund_eoa(amount=10**18)
receiver = pre.fund_eoa(amount=0)
diff --git a/tests/benchmark/__init__.py b/tests/benchmark/__init__.py
index 8c37b7d6e0f..ce3a8eda042 100644
--- a/tests/benchmark/__init__.py
+++ b/tests/benchmark/__init__.py
@@ -1,8 +1,9 @@
"""
-abstract: Benchmark tests for EVMs.
- Benchmark tests aim to maximize the usage of a specific opcode,
- precompile, or operation within a transaction or block. These can
- be executed against EVM implementations to ensure they handle
- pathological cases efficiently and correctly, allowing Ethereum to
- safely [Scale the L1](https://protocol.ethereum.foundation/).
+Benchmark tests for EVMs.
+
+Benchmark tests aim to maximize the usage of a specific opcode, precompile,
+or operation within a transaction or block. These can be executed against
+EVM implementations to ensure they handle pathological cases efficiently
+and correctly, allowing Ethereum to safely
+[Scale the L1](https://protocol.ethereum.foundation/).
"""
diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py
index 1e2e7813817..d51b47dda83 100644
--- a/tests/benchmark/conftest.py
+++ b/tests/benchmark/conftest.py
@@ -10,7 +10,10 @@
def pytest_generate_tests(metafunc):
- """Modify test generation to enforce default benchmark fork for benchmark tests."""
+ """
+ Modify test generation to enforce default benchmark fork for benchmark
+ tests.
+ """
benchmark_dir = Path(__file__).parent
test_file_path = Path(metafunc.definition.fspath)
diff --git a/tests/benchmark/helpers.py b/tests/benchmark/helpers.py
index ca3e0705727..c0e7f80ac63 100644
--- a/tests/benchmark/helpers.py
+++ b/tests/benchmark/helpers.py
@@ -9,7 +9,8 @@ def code_loop_precompile_call(calldata: Bytecode, attack_block: Bytecode, fork:
"""Create a code loop that calls a precompile with the given calldata."""
max_code_size = fork.max_code_size()
- # The attack contract is: CALLDATA_PREP + #JUMPDEST + [attack_block]* + JUMP(#)
+ # The attack contract is: CALLDATA_PREP + #JUMPDEST + [attack_block]* +
+ # JUMP(#)
jumpdest = Op.JUMPDEST
jump_back = Op.JUMP(len(calldata))
max_iters_loop = (max_code_size - len(calldata) - len(jumpdest) - len(jump_back)) // len(
diff --git a/tests/benchmark/test_worst_blocks.py b/tests/benchmark/test_worst_blocks.py
index b71f3035067..6d95ef657b0 100644
--- a/tests/benchmark/test_worst_blocks.py
+++ b/tests/benchmark/test_worst_blocks.py
@@ -1,8 +1,5 @@
"""
-abstract: Tests that benchmark EVMs in worst-case block scenarios.
- Tests that benchmark EVMs in worst-case block scenarios.
-
-Tests running worst-case block scenarios for EVMs.
+Tests that benchmark EVMs in worst-case block scenarios.
"""
import random
@@ -26,7 +23,10 @@
@pytest.fixture
def iteration_count(intrinsic_cost: int, gas_benchmark_value: int):
- """Calculate the number of iterations based on the gas limit and intrinsic cost."""
+ """
+ Calculate the number of iterations based on the gas limit and intrinsic
+ cost.
+ """
return gas_benchmark_value // intrinsic_cost
@@ -186,12 +186,14 @@ def test_block_full_data(
gas_benchmark_value: int,
):
"""Test a block with empty payload."""
- # Gas cost calculation based on EIP-7683: (https://eips.ethereum.org/EIPS/eip-7683)
+ # Gas cost calculation based on EIP-7683:
+ # (https://eips.ethereum.org/EIPS/eip-7683)
#
# tx.gasUsed = 21000 + max(
# STANDARD_TOKEN_COST * tokens_in_calldata
# + execution_gas_used
- # + isContractCreation * (32000 + INITCODE_WORD_COST * words(calldata)),
+ # + isContractCreation * (32000 +
+ # INITCODE_WORD_COST * words(calldata)),
# TOTAL_COST_FLOOR_PER_TOKEN * tokens_in_calldata)
#
# Simplified in this test case:
@@ -208,7 +210,8 @@ def test_block_full_data(
# Token accounting:
# tokens_in_calldata = zero_bytes + 4 * non_zero_bytes
#
- # So we calculate how many bytes we can fit into calldata based on available gas.
+ # So we calculate how many bytes we can fit into calldata based on
+ # available gas.
gas_available = gas_benchmark_value - intrinsic_cost
@@ -240,7 +243,10 @@ def test_block_full_access_list_and_data(
fork: Fork,
gas_benchmark_value: int,
):
- """Test a block with access lists (60% gas) and calldata (40% gas) using random mixed bytes."""
+ """
+ Test a block with access lists (60% gas) and calldata (40% gas) using
+ random mixed bytes.
+ """
attack_gas_limit = gas_benchmark_value
gas_available = attack_gas_limit - intrinsic_cost
@@ -271,7 +277,8 @@ def test_block_full_access_list_and_data(
)
]
- # Calculate calldata with 29% of gas for zero bytes and 71% for non-zero bytes
+ # Calculate calldata with 29% of gas for zero bytes and 71% for non-zero
+ # bytes
# Token accounting: tokens_in_calldata = zero_bytes + 4 * non_zero_bytes
# We want to split the gas budget:
# - 29% of gas_for_calldata for zero bytes
@@ -287,7 +294,8 @@ def test_block_full_access_list_and_data(
# Zero bytes: 1 token per byte
# Non-zero bytes: 4 tokens per byte
num_zero_bytes = tokens_for_zero_bytes # 1 token = 1 zero byte
- num_non_zero_bytes = tokens_for_non_zero_bytes // 4 # 4 tokens = 1 non-zero byte
+ # 4 tokens = 1 non-zero byte
+ num_non_zero_bytes = tokens_for_non_zero_bytes // 4
# Create calldata with mixed bytes
calldata = bytearray()
diff --git a/tests/benchmark/test_worst_bytecode.py b/tests/benchmark/test_worst_bytecode.py
index 9c38e010bb2..14227394040 100644
--- a/tests/benchmark/test_worst_bytecode.py
+++ b/tests/benchmark/test_worst_bytecode.py
@@ -1,7 +1,4 @@
"""
-abstract: Tests that benchmark EVMs in worst-case opcode scenarios.
- Tests that benchmark EVMs in worst-case opcode scenarios.
-
Tests that benchmark EVMs in worst-case opcode scenarios.
"""
@@ -56,30 +53,30 @@ def test_worst_bytecode_single_opcode(
gas_benchmark_value: int,
):
"""
- Test a block execution where a single opcode execution maxes out the gas limit,
- and the opcodes access a huge amount of contract code.
+ Test a block execution where a single opcode execution maxes out the gas
+ limit, and the opcodes access a huge amount of contract code.
- We first use a single block to deploy a factory contract that will be used to deploy
- a large number of contracts.
+ We first use a single block to deploy a factory contract that will be used
+ to deploy a large number of contracts.
This is done to avoid having a big pre-allocation size for the test.
- The test is performed in the last block of the test, and the entire block gas limit is
- consumed by repeated opcode executions.
+ The test is performed in the last block of the test, and the entire block
+ gas limit is consumed by repeated opcode executions.
"""
- # The attack gas limit is the gas limit which the target tx will use
- # The test will scale the block gas limit to setup the contracts accordingly to be
- # able to pay for the contract deposit. This has to take into account the 200 gas per byte,
- # but also the quadratic memory expansion costs which have to be paid each time the
- # memory is being setup
+ # The attack gas limit is the gas limit which the target tx will use The
+ # test will scale the block gas limit to setup the contracts accordingly to
+ # be able to pay for the contract deposit. This has to take into account
+ # the 200 gas per byte, but also the quadratic memory expansion costs which
+ # have to be paid each time the memory is being setup
attack_gas_limit = gas_benchmark_value
max_contract_size = fork.max_code_size()
gas_costs = fork.gas_costs()
- # Calculate the absolute minimum gas costs to deploy the contract
- # This does not take into account setting up the actual memory (using KECCAK256 and XOR)
- # so the actual costs of deploying the contract is higher
+ # Calculate the absolute minimum gas costs to deploy the contract This does
+ # not take into account setting up the actual memory (using KECCAK256 and
+ # XOR) so the actual costs of deploying the contract is higher
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
memory_gas_minimum = memory_expansion_gas_calculator(new_bytes=len(bytes(max_contract_size)))
code_deposit_gas_minimum = (
@@ -90,7 +87,8 @@ def test_worst_bytecode_single_opcode(
# Calculate the loop cost of the attacker to query one address
loop_cost = (
gas_costs.G_KECCAK_256 # KECCAK static cost
- + math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic cost for CREATE2
+ + math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic
+ # cost for CREATE2
+ gas_costs.G_VERY_LOW * 3 # ~MSTOREs+ADDs
+ gas_costs.G_COLD_ACCOUNT_ACCESS # Opcode cost
+ 30 # ~Gluing opcodes
@@ -101,8 +99,9 @@ def test_worst_bytecode_single_opcode(
attack_gas_limit - intrinsic_gas_cost_calc() - gas_costs.G_VERY_LOW * 4
) // loop_cost
- # Set the block gas limit to a relative high value to ensure the code deposit tx
- # fits in the block (there is enough gas available in the block to execute this)
+ # Set the block gas limit to a relative high value to ensure the code
+ # deposit tx fits in the block (there is enough gas available in the block
+ # to execute this)
minimum_gas_limit = code_deposit_gas_minimum * 2 * num_contracts
if env.gas_limit < minimum_gas_limit:
raise Exception(
@@ -111,16 +110,17 @@ def test_worst_bytecode_single_opcode(
"optimizing gas usage during the setup phase of this test."
)
- # The initcode will take its address as a starting point to the input to the keccak
- # hash function.
- # It will reuse the output of the hash function in a loop to create a large amount of
- # seemingly random code, until it reaches the maximum contract size.
+ # The initcode will take its address as a starting point to the input to
+ # the keccak hash function. It will reuse the output of the hash function
+ # in a loop to create a large amount of seemingly random code, until it
+ # reaches the maximum contract size.
initcode = (
Op.MSTORE(0, Op.ADDRESS)
+ While(
body=(
Op.SHA3(Op.SUB(Op.MSIZE, 32), 32)
- # Use a xor table to avoid having to call the "expensive" sha3 opcode as much
+ # Use a xor table to avoid having to call the "expensive" sha3
+ # opcode as much
+ sum(
(Op.PUSH32[xor_value] + Op.XOR + Op.DUP1 + Op.MSIZE + Op.MSTORE)
for xor_value in XOR_TABLE
@@ -129,16 +129,16 @@ def test_worst_bytecode_single_opcode(
),
condition=Op.LT(Op.MSIZE, max_contract_size),
)
- # Despite the whole contract has random bytecode, we make the first opcode be a STOP
- # so CALL-like attacks return as soon as possible, while EXTCODE(HASH|SIZE) work as
- # intended.
+ # Despite the whole contract has random bytecode, we make the first
+ # opcode be a STOP so CALL-like attacks return as soon as possible,
+ # while EXTCODE(HASH|SIZE) work as intended.
+ Op.MSTORE8(0, 0x00)
+ Op.RETURN(0, max_contract_size)
)
initcode_address = pre.deploy_contract(code=initcode)
- # The factory contract will simply use the initcode that is already deployed,
- # and create a new contract and return its address if successful.
+ # The factory contract will simply use the initcode that is already
+ # deployed, and create a new contract and return its address if successful.
factory_code = (
Op.EXTCODECOPY(
address=initcode_address,
@@ -160,8 +160,8 @@ def test_worst_bytecode_single_opcode(
)
factory_address = pre.deploy_contract(code=factory_code)
- # The factory caller will call the factory contract N times, creating N new contracts.
- # Calldata should contain the N value.
+ # The factory caller will call the factory contract N times, creating N new
+ # contracts. Calldata should contain the N value.
factory_caller_code = Op.CALLDATALOAD(0) + While(
body=Op.POP(Op.CALL(address=factory_address)),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
@@ -208,8 +208,8 @@ def test_worst_bytecode_single_opcode(
)
if len(attack_code) > max_contract_size:
- # TODO: A workaround could be to split the opcode code into multiple contracts
- # and call them in sequence.
+ # TODO: A workaround could be to split the opcode code into multiple
+ # contracts and call them in sequence.
raise ValueError(
f"Code size {len(attack_code)} exceeds maximum code size {max_contract_size}"
)
@@ -254,17 +254,18 @@ def test_worst_initcode_jumpdest_analysis(
"""
Test the jumpdest analysis performance of the initcode.
- This benchmark places a very long initcode in the memory and then invoke CREATE instructions
- with this initcode up to the block gas limit. The initcode itself has minimal execution time
- but forces the EVM to perform the full jumpdest analysis on the parametrized byte pattern.
- The initicode is modified by mixing-in the returned create address between CREATE invocations
- to prevent caching.
+ This benchmark places a very long initcode in the memory and then invoke
+ CREATE instructions with this initcode up to the block gas limit. The
+ initcode itself has minimal execution time but forces the EVM to perform
+ the full jumpdest analysis on the parametrized byte pattern. The initicode
+ is modified by mixing-in the returned create address between CREATE
+ invocations to prevent caching.
"""
max_code_size = fork.max_code_size()
initcode_size = fork.max_initcode_size()
- # Expand the initcode pattern to the transaction data so it can be used in CALLDATACOPY
- # in the main contract. TODO: tune the tx_data_len param.
+ # Expand the initcode pattern to the transaction data so it can be used in
+ # CALLDATACOPY in the main contract. TODO: tune the tx_data_len param.
tx_data_len = 1024
tx_data = pattern * (tx_data_len // len(pattern))
tx_data += (tx_data_len - len(tx_data)) * bytes(Op.JUMPDEST)
@@ -332,8 +333,9 @@ def test_worst_initcode_jumpdest_analysis(
@pytest.mark.parametrize(
"max_code_size_ratio, non_zero_data, value",
[
- # To avoid a blowup of combinations, the value dimension is only explored for
- # the non-zero data case, so isn't affected by code size influence.
+ # To avoid a blowup of combinations, the value dimension is only
+ # explored for the non-zero data case, so isn't affected by code size
+ # influence.
pytest.param(0, False, 0, id="0 bytes without value"),
pytest.param(0, False, 1, id="0 bytes with value"),
pytest.param(0.25, True, 0, id="0.25x max code size with non-zero data"),
@@ -356,7 +358,9 @@ def test_worst_create(
value: int,
gas_benchmark_value: int,
):
- """Test the CREATE and CREATE2 performance with different configurations."""
+ """
+ Test the CREATE and CREATE2 performance with different configurations.
+ """
max_code_size = fork.max_code_size()
code_size = int(max_code_size * max_code_size_ratio)
@@ -381,7 +385,9 @@ def test_worst_create(
# Create the benchmark contract which has the following design:
# ```
# PUSH(value)
- # [EXTCODECOPY(full initcode_template_contract) -- Conditional that non_zero_data is True]`
+ # [EXTCODECOPY(full initcode_template_contract)
+ # -> Conditional that non_zero_data is True]
+ #
# JUMPDEST (#)
# (CREATE|CREATE2)
# (CREATE|CREATE2)
@@ -407,10 +413,11 @@ def test_worst_create(
# - DUP3 refers to PUSH1(value) above.
Op.POP(Op.CREATE(value=Op.DUP3, offset=0, size=Op.DUP2))
if opcode == Op.CREATE
- # For CREATE2: we manually push the arguments because we leverage the return value of
- # previous CREATE2 calls as salt for the next CREATE2 call.
- # - DUP4 is targeting the PUSH1(value) from the code_prefix.
- # - DUP3 is targeting the EXTCODESIZE value pushed in code_prefix.
+ # For CREATE2: we manually push the arguments because we leverage the
+ # return value of previous CREATE2 calls as salt for the next CREATE2
+ # call.
+ # - DUP4 is targeting the PUSH1(value) from the code_prefix.
+ # - DUP3 is targeting the EXTCODESIZE value pushed in code_prefix.
else Op.DUP3 + Op.PUSH0 + Op.DUP4 + Op.CREATE2
)
code = code_loop_precompile_call(code_prefix, attack_block, fork)
@@ -444,20 +451,20 @@ def test_worst_creates_collisions(
gas_benchmark_value: int,
):
"""Test the CREATE and CREATE2 collisions performance."""
- # We deploy a "proxy contract" which is the contract that will be called in a loop
- # using all the gas in the block. This "proxy contract" is the one executing CREATE2
- # failing with a collision.
- # The reason why we need a "proxy contract" is that CREATE(2) failing with a collision will
- # consume all the available gas. If we try to execute the CREATE(2) directly without being
- # wrapped **and capped in gas** in a previous CALL, we would run out of gas very fast!
- #
- # The proxy contract calls CREATE(2) with empty initcode. The current call frame gas will
- # be exhausted because of the collision. For this reason the caller will carefully give us
- # the minimal gas necessary to execute the CREATE(2) and not waste any extra gas in the
- # CREATE(2)-failure.
- #
- # Note that these CREATE(2) calls will fail because in (**) below we pre-alloc contracts
- # with the same address as the ones that CREATE(2) will try to create.
+ # We deploy a "proxy contract" which is the contract that will be called in
+ # a loop using all the gas in the block. This "proxy contract" is the one
+ # executing CREATE2 failing with a collision. The reason why we need a
+ # "proxy contract" is that CREATE(2) failing with a collision will consume
+ # all the available gas. If we try to execute the CREATE(2) directly
+ # without being wrapped **and capped in gas** in a previous CALL, we would
+ # run out of gas very fast!
+ # The proxy contract calls CREATE(2) with empty initcode. The current call
+ # frame gas will be exhausted because of the collision. For this reason the
+ # caller will carefully give us the minimal gas necessary to execute the
+ # CREATE(2) and not waste any extra gas in the CREATE(2)-failure.
+ # Note that these CREATE(2) calls will fail because in (**) below we pre-
+ # alloc contracts with the same address as the ones that CREATE(2) will try
+ # to create.
proxy_contract = pre.deploy_contract(
code=Op.CREATE2(value=Op.PUSH0, salt=Op.PUSH0, offset=Op.PUSH0, size=Op.PUSH0)
if opcode == Op.CREATE2
@@ -465,8 +472,8 @@ def test_worst_creates_collisions(
)
gas_costs = fork.gas_costs()
- # The CALL to the proxy contract needs at a minimum gas corresponding to the CREATE(2)
- # plus extra required PUSH0s for arguments.
+ # The CALL to the proxy contract needs at a minimum gas corresponding to
+ # the CREATE(2) plus extra required PUSH0s for arguments.
min_gas_required = gas_costs.G_CREATE + gas_costs.G_BASE * (3 if opcode == Op.CREATE else 4)
code_prefix = Op.PUSH20(proxy_contract) + Op.PUSH3(min_gas_required)
attack_block = Op.POP(
@@ -477,7 +484,8 @@ def test_worst_creates_collisions(
code = code_loop_precompile_call(code_prefix, attack_block, fork)
tx_target = pre.deploy_contract(code=code)
- # (**) We deploy the contract that CREATE(2) will attempt to create so any attempt will fail.
+ # (**) We deploy the contract that CREATE(2) will attempt to create so any
+ # attempt will fail.
if opcode == Op.CREATE2:
addr = compute_create2_address(address=proxy_contract, salt=0, initcode=[])
pre.deploy_contract(address=addr, code=Op.INVALID)
diff --git a/tests/benchmark/test_worst_compute.py b/tests/benchmark/test_worst_compute.py
index d7437a77e4c..504b2310510 100644
--- a/tests/benchmark/test_worst_compute.py
+++ b/tests/benchmark/test_worst_compute.py
@@ -1,8 +1,5 @@
"""
-abstract: Tests that benchmark EVMs in worst-case compute scenarios.
- Tests that benchmark EVMs in worst-case compute scenarios.
-
-Tests that benchmark EVMs when running worst-case compute opcodes and precompile scenarios.
+Tests that benchmark EVMs in worst-case compute scenarios.
"""
import math
@@ -57,8 +54,8 @@ def neg(x: int) -> int:
def make_dup(index: int) -> Opcode:
"""
- Create a DUP instruction which duplicates the index-th (counting from 0) element
- from the top of the stack. E.g. make_dup(0) → DUP1.
+ Create a DUP instruction which duplicates the index-th (counting from 0)
+ element from the top of the stack. E.g. make_dup(0) → DUP1.
"""
assert 0 <= index < 16
return Opcode(0x80 + index, pushed_stack_items=1, min_stack_height=index + 1)
@@ -158,9 +155,9 @@ def test_worst_callvalue(
"""
Test running a block with as many CALLVALUE opcodes as possible.
- The `non_zero_value` parameter controls whether opcode must return non-zero value.
- The `from_origin` parameter controls whether the call frame is the immediate from the
- transaction or a previous CALL.
+ The `non_zero_value` parameter controls whether opcode must return non-zero
+ value. The `from_origin` parameter controls whether the call frame is the
+ immediate from the transaction or a previous CALL.
"""
max_code_size = fork.max_code_size()
@@ -222,11 +219,12 @@ def test_worst_returndatasize_nonzero(
gas_benchmark_value: int,
):
"""
- Test running a block which execute as many RETURNDATASIZE opcodes which return a non-zero
- buffer as possible.
+ Test running a block which execute as many RETURNDATASIZE opcodes which
+ return a non-zero buffer as possible.
- The `returned_size` parameter indicates the size of the returned data buffer.
- The `return_data_style` indicates how returned data is produced for the opcode caller.
+ The `returned_size` parameter indicates the size of the returned data
+ buffer. The `return_data_style` indicates how returned data is produced for
+ the opcode caller.
"""
max_code_size = fork.max_code_size()
@@ -271,7 +269,10 @@ def test_worst_returndatasize_zero(
fork: Fork,
gas_benchmark_value: int,
):
- """Test running a block with as many RETURNDATASIZE opcodes as possible with a zero buffer."""
+ """
+ Test running a block with as many RETURNDATASIZE opcodes as possible with a
+ zero buffer.
+ """
max_code_size = fork.max_code_size()
dummy_contract_call = Bytecode()
@@ -351,40 +352,44 @@ def test_worst_keccak(
max_code_size = fork.max_code_size()
- # Discover the optimal input size to maximize keccak-permutations, not keccak calls.
- # The complication of the discovery arises from the non-linear gas cost of memory expansion.
+ # Discover the optimal input size to maximize keccak-permutations, not
+ # keccak calls. The complication of the discovery arises from the non-
+ # linear gas cost of memory expansion.
max_keccak_perm_per_block = 0
optimal_input_length = 0
for i in range(1, 1_000_000, 32):
iteration_gas_cost = (
2 * gsc.G_VERY_LOW # PUSHN + PUSH1
+ gsc.G_KECCAK_256 # KECCAK256 static cost
- + math.ceil(i / 32) * gsc.G_KECCAK_256_WORD # KECCAK256 dynamic cost
+ + math.ceil(i / 32) * gsc.G_KECCAK_256_WORD # KECCAK256 dynamic
+ # cost
+ gsc.G_BASE # POP
)
- # From the available gas, we subtract the mem expansion costs considering we know the
- # current input size length i.
+ # From the available gas, we subtract the mem expansion costs
+ # considering we know the current input size length i.
available_gas_after_expansion = max(0, available_gas - mem_exp_gas_calculator(new_bytes=i))
# Calculate how many calls we can do.
num_keccak_calls = available_gas_after_expansion // iteration_gas_cost
# KECCAK does 1 permutation every 136 bytes.
num_keccak_permutations = num_keccak_calls * math.ceil(i / KECCAK_RATE)
- # If we found an input size that is better (reg permutations/gas), then save it.
+ # If we found an input size that is better (reg permutations/gas), then
+ # save it.
if num_keccak_permutations > max_keccak_perm_per_block:
max_keccak_perm_per_block = num_keccak_permutations
optimal_input_length = i
- # max_iters_loop contains how many keccak calls can be done per loop.
- # The loop is as big as possible bounded by the maximum code size.
+ # max_iters_loop contains how many keccak calls can be done per loop. The
+ # loop is as big as possible bounded by the maximum code size.
#
# The loop structure is: JUMPDEST + [attack iteration] + PUSH0 + JUMP
#
# Now calculate available gas for [attack iteration]:
- # Numerator = max_code_size-3. The -3 is for the JUMPDEST, PUSH0 and JUMP.
- # Denominator = (PUSHN + PUSH1 + KECCAK256 + POP) + PUSH1_DATA + PUSHN_DATA
- # TODO: the testing framework uses PUSH1(0) instead of PUSH0 which is suboptimal for the
- # attack, whenever this is fixed adjust accordingly.
+ # Numerator = max_code_size-3. The -3 is for the JUMPDEST, PUSH0 and JUMP
+ # Denominator = (PUSHN + PUSH1 + KECCAK256 + POP) + PUSH1_DATA + PUSHN_DATA
+ #
+ # TODO: the testing framework uses PUSH1(0) instead of PUSH0 which is
+ # suboptimal for the attack, whenever this is fixed adjust accordingly.
start_code = Op.JUMPDEST + Op.PUSH20[optimal_input_length]
loop_code = Op.POP(Op.SHA3(Op.PUSH0, Op.DUP1))
end_code = Op.POP + Op.JUMP(Op.PUSH0)
@@ -427,7 +432,10 @@ def test_worst_precompile_only_data_input(
bytes_per_unit_of_work: int,
gas_benchmark_value: int,
):
- """Test running a block with as many precompile calls which have a single `data` input."""
+ """
+ Test running a block with as many precompile calls which have a single
+ `data` input.
+ """
# Intrinsic gas cost is paid once.
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
available_gas = gas_benchmark_value - intrinsic_gas_calculator()
@@ -435,7 +443,8 @@ def test_worst_precompile_only_data_input(
gsc = fork.gas_costs()
mem_exp_gas_calculator = fork.memory_expansion_gas_calculator()
- # Discover the optimal input size to maximize precompile work, not precompile calls.
+ # Discover the optimal input size to maximize precompile work, not
+ # precompile calls.
max_work = 0
optimal_input_length = 0
for input_length in range(1, 1_000_000, 32):
@@ -450,11 +459,12 @@ def test_worst_precompile_only_data_input(
iteration_gas_cost = (
parameters_gas
+ +static_cost # Precompile static cost
- + math.ceil(input_length / 32) * per_word_dynamic_cost # Precompile dynamic cost
+ + math.ceil(input_length / 32) * per_word_dynamic_cost
+ # Precompile dynamic cost
+ gsc.G_BASE # POP
)
- # From the available gas, we subtract the mem expansion costs considering we know the
- # current input size length.
+ # From the available gas, we subtract the mem expansion costs
+ # considering we know the current input size length.
available_gas_after_expansion = max(
0, available_gas - mem_exp_gas_calculator(new_bytes=input_length)
)
@@ -462,7 +472,8 @@ def test_worst_precompile_only_data_input(
num_calls = available_gas_after_expansion // iteration_gas_cost
total_work = num_calls * math.ceil(input_length / bytes_per_unit_of_work)
- # If we found an input size that is better (reg permutations/gas), then save it.
+ # If we found an input size that is better (reg permutations/gas), then
+ # save it.
if total_work > max_work:
max_work = total_work
optimal_input_length = input_length
@@ -641,7 +652,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_odd_32b_exp_cover_windows",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L38
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L38
pytest.param(
ModExpInput(
base=192 * "FF",
@@ -650,7 +662,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_min_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L40
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L40
pytest.param(
ModExpInput(
base=8 * "FF",
@@ -659,7 +672,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_min_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L42
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L42
pytest.param(
ModExpInput(
base=40 * "FF",
@@ -668,7 +682,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_min_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L44
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L44
pytest.param(
ModExpInput(
base=32 * "FF",
@@ -677,7 +692,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_exp_208_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L46
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L46
pytest.param(
ModExpInput(
base=8 * "FF",
@@ -686,7 +702,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_exp_215_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L48
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L48
pytest.param(
ModExpInput(
base=8 * "FF",
@@ -695,7 +712,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_exp_298_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L50
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L50
pytest.param(
ModExpInput(
base=16 * "FF",
@@ -704,7 +722,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_pawel_2",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L52
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L52
pytest.param(
ModExpInput(
base=24 * "FF",
@@ -713,7 +732,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_pawel_3",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L54
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L54
pytest.param(
ModExpInput(
base=32 * "FF",
@@ -722,7 +742,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_pawel_4",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L56
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L56
pytest.param(
ModExpInput(
base=280 * "FF",
@@ -731,7 +752,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_408_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L58
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L58
pytest.param(
ModExpInput(
base=16 * "FF",
@@ -740,7 +762,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_400_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L60
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L60
pytest.param(
ModExpInput(
base=48 * "FF",
@@ -749,7 +772,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_408_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L62
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L62
pytest.param(
ModExpInput(
base=344 * "FF",
@@ -758,7 +782,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_616_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L64
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L64
pytest.param(
ModExpInput(
base=16 * "FF",
@@ -767,7 +792,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_600_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L66
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L66
pytest.param(
ModExpInput(
base=48 * "FF",
@@ -776,7 +802,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_600_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L68
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L68
pytest.param(
ModExpInput(
base=392 * "FF",
@@ -785,7 +812,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_800_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L70
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L70
pytest.param(
ModExpInput(
base=16 * "FF",
@@ -794,7 +822,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_800_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L72
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L72
pytest.param(
ModExpInput(
base=56 * "FF",
@@ -803,7 +832,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_767_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L74
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L74
pytest.param(
ModExpInput(
base=16 * "FF",
@@ -812,7 +842,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_852_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L76
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L76
pytest.param(
ModExpInput(
base=408 * "FF",
@@ -821,7 +852,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_867_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L78
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L78
pytest.param(
ModExpInput(
base=56 * "FF",
@@ -830,7 +862,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_996_gas_balanced",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L80
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L80
pytest.param(
ModExpInput(
base=448 * "FF",
@@ -839,7 +872,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_1045_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L82
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L82
pytest.param(
ModExpInput(
base=32 * "FF",
@@ -848,7 +882,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_677_gas_base_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L84
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L84
pytest.param(
ModExpInput(
base=24 * "FF",
@@ -857,7 +892,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_765_gas_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L86
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L86
pytest.param(
ModExpInput(
base=32 * "FF",
@@ -954,7 +990,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_1024_exp_2",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L122
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L122
pytest.param(
ModExpInput(
base="03",
@@ -963,7 +1000,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_example_1",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L124
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L124
pytest.param(
ModExpInput(
base="",
@@ -972,7 +1010,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_example_2",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L126
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L126
pytest.param(
ModExpInput(
base="e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
@@ -981,7 +1020,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_1_square",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L128
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L128
pytest.param(
ModExpInput(
base="e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
@@ -990,7 +1030,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_1_qube",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L130
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L130
pytest.param(
ModExpInput(
base="e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
@@ -999,7 +1040,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_1_pow_0x10001",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L132
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L132
pytest.param(
ModExpInput(
base="cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
@@ -1008,7 +1050,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_2_square",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L134
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L134
pytest.param(
ModExpInput(
base="cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
@@ -1017,7 +1060,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_2_qube",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L136
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L136
pytest.param(
ModExpInput(
base="cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
@@ -1026,7 +1070,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_2_pow_0x10001",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L138
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L138
pytest.param(
ModExpInput(
base="c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
@@ -1035,7 +1080,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_3_square",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L140
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L140
pytest.param(
ModExpInput(
base="c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
@@ -1044,7 +1090,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_3_qube",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L142
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L142
pytest.param(
ModExpInput(
base="c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
@@ -1053,7 +1100,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_3_pow_0x10001",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L144
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L144
pytest.param(
ModExpInput(
base="db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81",
@@ -1062,7 +1110,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_4_square",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L146
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L146
pytest.param(
ModExpInput(
base="db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81",
@@ -1071,7 +1120,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_4_qube",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L148
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L148
pytest.param(
ModExpInput(
base="db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81",
@@ -1080,7 +1130,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_4_pow_0x10001",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L150
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L150
pytest.param(
ModExpInput(
base="c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf",
@@ -1089,7 +1140,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_5_square",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L152
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L152
pytest.param(
ModExpInput(
base="c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf",
@@ -1098,7 +1150,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_5_qube",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L154
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L154
pytest.param(
ModExpInput(
base="c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf",
@@ -1107,7 +1160,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_nagydani_5_pow_0x10001",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L156
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L156
pytest.param(
ModExpInput(
base="ffffff",
@@ -1116,7 +1170,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_marius_1_even",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L158
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L158
pytest.param(
ModExpInput(
base="ffffffffffffffff76ffffffffffffff",
@@ -1125,7 +1180,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_guido_1_even",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L160
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L160
pytest.param(
ModExpInput(
base="e0060000a921212121212121ff000021",
@@ -1134,7 +1190,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_guido_2_even",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L162
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L162
pytest.param(
ModExpInput(
base="0193585a48e18aad777e9c1b54221a0f58140392e4f091cd5f42b2e8644a9384fbd58ae1edec2477ebf7edbf7c0a3f8bd21d1890ee87646feab3c47be716f842cc3da9b940af312dc54450a960e3fc0b86e56abddd154068e10571a96fff6259431632bc15695c6c8679057e66c2c25c127e97e64ee5de6ea1fc0a4a0e431343fed1daafa072c238a45841da86a9806680bc9f298411173210790359209cd454b5af7b4d5688b4403924e5f863d97e2c5349e1a04b54fcf385b1e9d7714bab8fbf5835f6ff9ed575e77dff7af5cbb641db5d537933bae1fa6555d6c12d6fb31ca27b57771f4aebfbe0bf95e8990c0108ffe7cbdaf370be52cf3ade594543af75ad9329d2d11a402270b5b9a6bf4b83307506e118fca4862749d04e916fc7a039f0d13f2a02e0eedb800199ec95df15b4ccd8669b52586879624d51219e72102fad810b5909b1e372ddf33888fb9beb09b416e4164966edbabd89e4a286be36277fc576ed519a15643dac602e92b63d0b9121f0491da5b16ef793a967f096d80b6c81ecaaffad7e3f06a4a5ac2796f1ed9f68e6a0fd5cf191f0c5c2eec338952ff8d31abc68bf760febeb57e088995ba1d7726a2fdd6d8ca28a181378b8b4ab699bfd4b696739bbf17a9eb2df6251143046137fdbbfacac312ebf67a67da9741b59600000000000",
@@ -1143,7 +1200,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_guido_3_even",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L166
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L166
pytest.param(
ModExpInput(
base="ffffffffffffffff",
@@ -1152,7 +1210,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_pawel_1_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L168
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L168
pytest.param(
ModExpInput(
base="ffffffffffffffffffffffffffffffff",
@@ -1161,7 +1220,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_pawel_2_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L170
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L170
pytest.param(
ModExpInput(
base="ffffffffffffffffffffffffffffffffffffffffffffffff",
@@ -1176,7 +1236,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_pawel_3_exp_8",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L172
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L172
pytest.param(
ModExpInput(
base="ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
@@ -1185,7 +1246,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_pawel_4_exp_heavy",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L174
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L174
pytest.param(
ModExpInput(
base="29356abadad68ad986c416de6f620bda0e1818b589e84f853a97391694d35496",
@@ -1194,7 +1256,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_1360n1",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L176
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L176
pytest.param(
ModExpInput(
base="d41afaeaea32f7409827761b68c41b6e535da4ede1f0800bfb4a6aed18394f6b",
@@ -1203,7 +1266,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_1360n2",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L178
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L178
pytest.param(
ModExpInput(
base="1a5be8fae3b3fda9ea329494ae8689c04fae4978ecccfa6a6bfb9f04b25846c0",
@@ -1212,7 +1276,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_1349n1",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L182
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L182
pytest.param(
ModExpInput(
base="0000000000000000000000000000000000000000000000000000000000000003",
@@ -1221,7 +1286,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_1152n1",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L184
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L184
pytest.param(
ModExpInput(
base="1fb473dd1171cf88116aa77ab3612c2c7d2cf466cc2386cc456130e2727c70b4",
@@ -1230,7 +1296,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_200n1",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L186
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L186
pytest.param(
ModExpInput(
base="1951441010b2b95a6e47a6075066a50a036f5ba978c050f2821df86636c0facb",
@@ -1239,7 +1306,8 @@ def test_worst_precompile_only_data_input(
),
id="mod_vul_common_200n2",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L188
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L188
pytest.param(
ModExpInput(
base="288254ba43e713afbe36c9f03b54c00fae4c0a82df1cf165eb46a21c20a48ca2",
@@ -1266,14 +1334,15 @@ def test_worst_modexp(
gas_benchmark_value: int,
):
"""
- Test running a block with as many calls to the MODEXP (5) precompile as possible.
- All the calls have the same parametrized input.
+ Test running a block with as many calls to the MODEXP (5) precompile as
+ possible. All the calls have the same parametrized input.
"""
# Skip the trailing zeros from the input to make EVM work even harder.
calldata = bytes(mod_exp_input).rstrip(b"\x00")
code = code_loop_precompile_call(
- Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE), # Copy the input to the memory.
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE), # Copy the input to the
+ # memory.
Op.POP(Op.STATICCALL(Op.GAS, 0x5, Op.PUSH0, Op.CALLDATASIZE, Op.PUSH0, Op.PUSH0)),
fork,
)
@@ -1298,8 +1367,8 @@ def test_worst_modexp(
pytest.param(
0x01,
[
- # The inputs below are a valid signature, thus ECRECOVER call won't
- # be short-circuited by validations and do actual work.
+ # The inputs below are a valid signature, thus ECRECOVER call
+ # won't be short-circuited by validations and do actual work.
"38D18ACB67D25C8BB9942764B62F18E17054F66A817BD4295423ADF9ED98873E",
"000000000000000000000000000000000000000000000000000000000000001B",
"38D18ACB67D25C8BB9942764B62F18E17054F66A817BD4295423ADF9ED98873E",
@@ -1317,7 +1386,8 @@ def test_worst_modexp(
],
id="bn128_add",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L326
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L326
pytest.param(
0x06,
[
@@ -1328,7 +1398,8 @@ def test_worst_modexp(
],
id="bn128_add_infinities",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L329
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L329
pytest.param(
0x06,
[
@@ -1348,7 +1419,8 @@ def test_worst_modexp(
],
id="bn128_mul",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L335
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L335
pytest.param(
0x07,
[
@@ -1358,7 +1430,8 @@ def test_worst_modexp(
],
id="bn128_mul_infinities_2_scalar",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L338
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L338
pytest.param(
0x07,
[
@@ -1368,7 +1441,8 @@ def test_worst_modexp(
],
id="bn128_mul_infinities_32_byte_scalar",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L341
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L341
pytest.param(
0x07,
[
@@ -1378,7 +1452,8 @@ def test_worst_modexp(
],
id="bn128_mul_1_2_2_scalar",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L344
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L344
pytest.param(
0x07,
[
@@ -1388,7 +1463,8 @@ def test_worst_modexp(
],
id="bn128_mul_1_2_32_byte_scalar",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L347
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L347
pytest.param(
0x07,
[
@@ -1398,7 +1474,8 @@ def test_worst_modexp(
],
id="bn128_mul_32_byte_coord_and_2_scalar",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L350
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L350
pytest.param(
0x07,
[
@@ -1441,7 +1518,8 @@ def test_worst_modexp(
],
id="bn128_one_pairing",
),
- # Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L353
+ # Ported from
+ # https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCase.cs#L353
pytest.param(0x08, [], id="ec_pairing_zero_input"),
pytest.param(
0x08,
@@ -1640,9 +1718,10 @@ def test_worst_modexp(
pytest.param(
bls12381_spec.Spec.G2MSM,
[
- # TODO: the //2 is required due to a limitation of the max contract size limit.
- # In a further iteration we can insert the inputs as calldata or storage and avoid
- # having to do PUSHes which has this limitation. This also applies to G1MSM.
+ # TODO: the //2 is required due to a limitation of the max
+ # contract size limit. In a further iteration we can insert the
+ # inputs as calldata or storage and avoid having to do PUSHes
+ # which has this limitation. This also applies to G1MSM.
(bls12381_spec.Spec.P2 + bls12381_spec.Scalar(bls12381_spec.Spec.Q))
* (len(bls12381_spec.Spec.G2MSM_DISCOUNT_TABLE) // 2),
],
@@ -1873,7 +1952,8 @@ def test_worst_jumpdests(
DEFAULT_BINOP_ARGS,
),
(
- # This has the cycle of 2, after two SUBs values are back to initials.
+ # This has the cycle of 2, after two SUBs values are back to
+ # initials.
Op.SUB,
DEFAULT_BINOP_ARGS,
),
@@ -1886,8 +1966,8 @@ def test_worst_jumpdests(
(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
# We want the first divisor to be slightly bigger than 2**128:
- # this is the worst case for the division algorithm with optimized paths
- # for division by 1 and 2 words.
+ # this is the worst case for the division algorithm with
+ # optimized paths for division by 1 and 2 words.
0x100000000000000000000000000000033,
),
),
@@ -1897,13 +1977,14 @@ def test_worst_jumpdests(
(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
# We want the first divisor to be slightly bigger than 2**64:
- # this is the worst case for the division algorithm with an optimized path
- # for division by 1 word.
+ # this is the worst case for the division algorithm with an
+ # optimized path for division by 1 word.
0x10000000000000033,
),
),
(
- # Same as DIV-0, but the numerator made positive, and the divisor made negative.
+ # Same as DIV-0, but the numerator made positive, and the divisor
+ # made negative.
Op.SDIV,
(
0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
@@ -1911,7 +1992,8 @@ def test_worst_jumpdests(
),
),
(
- # Same as DIV-1, but the numerator made positive, and the divisor made negative.
+ # Same as DIV-1, but the numerator made positive, and the divisor
+ # made negative.
Op.SDIV,
(
0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F,
@@ -1919,17 +2001,20 @@ def test_worst_jumpdests(
),
),
(
- # This scenario is not suitable for MOD because the values quickly become 0.
+ # This scenario is not suitable for MOD because the values quickly
+ # become 0.
Op.MOD,
DEFAULT_BINOP_ARGS,
),
(
- # This scenario is not suitable for SMOD because the values quickly become 0.
+ # This scenario is not suitable for SMOD because the values quickly
+ # become 0.
Op.SMOD,
DEFAULT_BINOP_ARGS,
),
(
- # This keeps the values unchanged, pow(2**256-1, 2**256-1, 2**256) == 2**256-1.
+ # This keeps the values unchanged, pow(2**256-1, 2**256-1, 2**256)
+ # == 2**256-1.
Op.EXP,
(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
@@ -2006,9 +2091,9 @@ def test_worst_binop_simple(
gas_benchmark_value: int,
):
"""
- Test running a block with as many binary instructions (takes two args, produces one value)
- as possible. The execution starts with two initial values on the stack, and the stack is
- balanced by the DUP2 instruction.
+ Test running a block with as many binary instructions (takes two args,
+ produces one value) as possible. The execution starts with two initial
+ values on the stack, and the stack is balanced by the DUP2 instruction.
"""
max_code_size = fork.max_code_size()
@@ -2044,8 +2129,8 @@ def test_worst_unop(
gas_benchmark_value: int,
):
"""
- Test running a block with as many unary instructions (takes one arg, produces one value)
- as possible.
+ Test running a block with as many unary instructions (takes one arg,
+ produces one value) as possible.
"""
max_code_size = fork.max_code_size()
@@ -2071,7 +2156,8 @@ def test_worst_unop(
# `key_mut` indicates the key isn't fixed.
@pytest.mark.parametrize("key_mut", [True, False])
-# `val_mut` indicates that at the end of each big-loop, the value of the target key changes.
+# `val_mut` indicates that at the end of each big-loop, the value of the target
+# key changes.
@pytest.mark.parametrize("val_mut", [True, False])
def test_worst_tload(
state_test: StateTestFiller,
@@ -2098,7 +2184,8 @@ def test_worst_tload(
if not key_mut and val_mut:
code_prefix = Op.JUMPDEST
loop_iter = Op.POP(Op.TLOAD(Op.CALLVALUE))
- code_val_mut = Op.TSTORE(Op.CALLVALUE, Op.GAS) # CALLVALUE configured in the tx
+ code_val_mut = Op.TSTORE(Op.CALLVALUE, Op.GAS) # CALLVALUE configured
+ # in the tx
if not key_mut and not val_mut:
code_prefix = Op.JUMPDEST
loop_iter = Op.POP(Op.TLOAD(Op.CALLVALUE))
@@ -2140,12 +2227,13 @@ def test_worst_tstore(
init_key = 42
code_prefix = Op.PUSH1(init_key) + Op.JUMPDEST
- # If `key_mut` is True, we mutate the key on every iteration of the big loop.
+ # If `key_mut` is True, we mutate the key on every iteration of the big
+ # loop.
code_key_mut = Op.POP + Op.GAS if key_mut else Bytecode()
code_suffix = code_key_mut + Op.JUMP(len(code_prefix) - 1)
- # If `dense_val_mut` is set, we use GAS as a cheap way of always storing a different value than
- # the previous one.
+ # If `dense_val_mut` is set, we use GAS as a cheap way of always storing a
+ # different value than the previous one.
loop_iter = Op.TSTORE(Op.DUP2, Op.GAS if dense_val_mut else Op.DUP1)
code_body_len = (max_code_size - len(code_prefix) - len(code_suffix)) // len(loop_iter)
@@ -2175,9 +2263,10 @@ def test_worst_shifts(
gas_benchmark_value: int,
):
"""
- Test running a block with as many shift instructions with non-trivial arguments.
- This test generates left-right pairs of shifts to avoid zeroing the argument.
- The shift amounts are randomly pre-selected from the constant pool of 15 values on the stack.
+ Test running a block with as many shift instructions with non-trivial
+ arguments. This test generates left-right pairs of shifts to avoid zeroing
+ the argument. The shift amounts are randomly pre-selected from the constant
+ pool of 15 values on the stack.
"""
max_code_size = fork.max_code_size()
@@ -2205,10 +2294,12 @@ def sar(x, s):
raise ValueError(f"Unexpected shift op: {shift_right}")
rng = random.Random(1) # Use random with a fixed seed.
- initial_value = 2**256 - 1 # The initial value to be shifted; should be negative for SAR.
+ initial_value = 2**256 - 1 # The initial value to be shifted; should be
+ # negative for SAR.
- # Create the list of shift amounts with 15 elements (max reachable by DUPs instructions).
- # For the worst case keep the values small and omit values divisible by 8.
+ # Create the list of shift amounts with 15 elements (max reachable by DUPs
+ # instructions). For the worst case keep the values small and omit values
+ # divisible by 8.
shift_amounts = [x + (x >= 8) + (x >= 15) for x in range(1, 16)]
code_prefix = sum(Op.PUSH1[sh] for sh in shift_amounts) + Op.JUMPDEST + Op.CALLDATALOAD(0)
@@ -2323,20 +2414,25 @@ def test_worst_mod(
gas_benchmark_value: int,
):
"""
- Test running a block with as many MOD instructions with arguments of the parametrized range.
+ Test running a block with as many MOD instructions with arguments of the
+ parametrized range.
+
The test program consists of code segments evaluating the "MOD chain":
mod[0] = calldataload(0)
mod[1] = numerators[indexes[0]] % mod[0]
mod[2] = numerators[indexes[1]] % mod[1] ...
- The "numerators" is a pool of 15 constants pushed to the EVM stack at the program start.
- The order of accessing the numerators is selected in a way the mod value remains in the range
- as long as possible.
+
+ The "numerators" is a pool of 15 constants pushed to the EVM stack at the
+ program start.
+
+ The order of accessing the numerators is selected in a way the mod value
+ remains in the range as long as possible.
"""
max_code_size = fork.max_code_size()
- # For SMOD we negate both numerator and modulus. The underlying computation is the same,
- # just the SMOD implementation will have to additionally handle the sign bits.
- # The result stays negative.
+ # For SMOD we negate both numerator and modulus. The underlying computation
+ # is the same, just the SMOD implementation will have to additionally
+ # handle the sign bits. The result stays negative.
should_negate = op == Op.SMOD
num_numerators = 15
@@ -2344,12 +2440,13 @@ def test_worst_mod(
numerator_max = 2**numerator_bits - 1
numerator_min = 2 ** (numerator_bits - 1)
- # Pick the modulus min value so that it is _unlikely_ to drop to the lower word count.
+ # Pick the modulus min value so that it is _unlikely_ to drop to the lower
+ # word count.
assert mod_bits >= 63
mod_min = 2 ** (mod_bits - 63)
- # Select the random seed giving the longest found MOD chain.
- # You can look for a longer one by increasing the numerators_min_len. This will activate
+ # Select the random seed giving the longest found MOD chain. You can look
+ # for a longer one by increasing the numerators_min_len. This will activate
# the while loop below.
match op, mod_bits:
case Op.MOD, 255:
@@ -2392,18 +2489,22 @@ def test_worst_mod(
mod = initial_mod
indexes = []
while mod >= mod_min:
- results = [n % mod for n in numerators] # Compute results for each numerator.
- i = max(range(len(results)), key=results.__getitem__) # And pick the best one.
+ # Compute results for each numerator.
+ results = [n % mod for n in numerators]
+ # And pick the best one.
+ i = max(range(len(results)), key=results.__getitem__)
mod = results[i]
indexes.append(i)
- assert len(indexes) > numerators_min_len # Disable if you want to find longer MOD chains.
+ # Disable if you want to find longer MOD chains.
+ assert len(indexes) > numerators_min_len
if len(indexes) > numerators_min_len:
break
seed += 1
print(f"{seed=}")
- # TODO: Don't use fixed PUSH32. Let Bytecode helpers to select optimal push opcode.
+ # TODO: Don't use fixed PUSH32. Let Bytecode helpers to select optimal push
+ # opcode.
code_constant_pool = sum((Op.PUSH32[n] for n in numerators), Bytecode())
code_prefix = code_constant_pool + Op.JUMPDEST
code_suffix = Op.JUMP(len(code_constant_pool))
@@ -2448,7 +2549,9 @@ def test_worst_memory_access(
big_memory_expansion: bool,
gas_benchmark_value: int,
):
- """Test running a block with as many memory access instructions as possible."""
+ """
+ Test running a block with as many memory access instructions as possible.
+ """
max_code_size = fork.max_code_size()
mem_exp_code = Op.MSTORE8(10 * 1024, 1) if big_memory_expansion else Bytecode()
@@ -2488,29 +2591,32 @@ def test_worst_modarith(
gas_benchmark_value: int,
):
"""
- Test running a block with as many "op" instructions with arguments of the parametrized range.
+ Test running a block with as many "op" instructions with arguments of the
+ parametrized range.
The test program consists of code segments evaluating the "op chain":
mod[0] = calldataload(0)
mod[1] = (fixed_arg op args[indexes[0]]) % mod[0]
mod[2] = (fixed_arg op args[indexes[1]]) % mod[1]
- The "args" is a pool of 15 constants pushed to the EVM stack at the program start.
+ The "args" is a pool of 15 constants pushed to the EVM stack at the program
+ start.
The "fixed_arg" is the 0xFF...FF constant added to the EVM stack by PUSH32
just before executing the "op".
- The order of accessing the numerators is selected in a way the mod value remains in the range
- as long as possible.
+ The order of accessing the numerators is selected in a way the mod value
+ remains in the range as long as possible.
"""
fixed_arg = 2**256 - 1
num_args = 15
max_code_size = fork.max_code_size()
- # Pick the modulus min value so that it is _unlikely_ to drop to the lower word count.
+ # Pick the modulus min value so that it is _unlikely_ to drop to the lower
+ # word count.
assert mod_bits >= 63
mod_min = 2 ** (mod_bits - 63)
- # Select the random seed giving the longest found op chain.
- # You can look for a longer one by increasing the op_chain_len. This will activate
- # the while loop below.
+ # Select the random seed giving the longest found op chain. You can look
+ # for a longer one by increasing the op_chain_len. This will activate the
+ # while loop below.
op_chain_len = 666
match op, mod_bits:
case Op.ADDMOD, 255:
@@ -2528,7 +2634,8 @@ def test_worst_modarith(
case Op.MULMOD, 127:
seed = 5
case Op.MULMOD, 63:
- # For this setup we were not able to find an op-chain longer than 600.
+ # For this setup we were not able to find an op-chain longer than
+ # 600.
seed = 4193
op_chain_len = 600
case _:
@@ -2545,11 +2652,13 @@ def test_worst_modarith(
indexes: list[int] = []
while mod >= mod_min and len(indexes) < op_chain_len:
results = [op_fn(a, fixed_arg) % mod for a in args]
- i = max(range(len(results)), key=results.__getitem__) # And pick the best one.
+ # And pick the best one.
+ i = max(range(len(results)), key=results.__getitem__)
mod = results[i]
indexes.append(i)
- assert len(indexes) == op_chain_len # Disable if you want to find longer op chains.
+ # Disable if you want to find longer op chains.
+ assert len(indexes) == op_chain_len
if len(indexes) == op_chain_len:
break
seed += 1
@@ -2561,8 +2670,8 @@ def test_worst_modarith(
+ sum(make_dup(len(args) - i) + Op.PUSH32[fixed_arg] + op for i in indexes)
+ Op.POP
)
- # Construct the final code. Because of the usage of PUSH32 the code segment is very long,
- # so don't try to include multiple of these.
+ # Construct the final code. Because of the usage of PUSH32 the code segment
+ # is very long, so don't try to include multiple of these.
code = code_constant_pool + Op.JUMPDEST + code_segment + Op.JUMP(len(code_constant_pool))
assert (max_code_size - len(code_segment)) < len(code) <= max_code_size
@@ -2608,8 +2717,9 @@ def test_amortized_bn128_pairings(
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
mem_exp_gas_calculator = fork.memory_expansion_gas_calculator()
- # This is a theoretical maximum number of pairings that can be done in a block.
- # It is only used for an upper bound for calculating the optimal number of pairings below.
+ # This is a theoretical maximum number of pairings that can be done in a
+ # block. It is only used for an upper bound for calculating the optimal
+ # number of pairings below.
maximum_number_of_pairings = (gas_benchmark_value - base_cost) // pairing_cost
# Discover the optimal number of pairings balancing two dimensions:
@@ -2620,19 +2730,21 @@ def test_amortized_bn128_pairings(
for i in range(1, maximum_number_of_pairings + 1):
# We'll pass all pairing arguments via calldata.
available_gas_after_intrinsic = gas_benchmark_value - intrinsic_gas_calculator(
- calldata=[0xFF] * size_per_pairing * i # 0xFF is to indicate non-zero bytes.
+ calldata=[0xFF] * size_per_pairing * i # 0xFF is to indicate non-
+ # zero bytes.
)
available_gas_after_expansion = max(
0,
available_gas_after_intrinsic - mem_exp_gas_calculator(new_bytes=i * size_per_pairing),
)
- # This is ignoring "glue" opcodes, but helps to have a rough idea of the right
- # cutting point.
+ # This is ignoring "glue" opcodes, but helps to have a rough idea of
+ # the right cutting point.
approx_gas_cost_per_call = gsc.G_WARM_ACCOUNT_ACCESS + base_cost + i * pairing_cost
num_precompile_calls = available_gas_after_expansion // approx_gas_cost_per_call
- num_pairings_done = num_precompile_calls * i # Each precompile call does i pairings.
+ num_pairings_done = num_precompile_calls * i # Each precompile call
+ # does i pairings.
if num_pairings_done > max_pairings:
max_pairings = num_pairings_done
@@ -2921,8 +3033,9 @@ def test_worst_return_revert(
# opcode(returned_size)
#
# ```
- # Filling the contract up to the max size is a cheap way of leveraging CODECOPY to return
- # non-zero bytes if requested. Note that since this is a pre-deploy this cost isn't
+ # Filling the contract up to the max size is a cheap way of leveraging
+ # CODECOPY to return non-zero bytes if requested. Note that since this
+ # is a pre-deploy this cost isn't
# relevant for the benchmark.
mem_preparation = Op.CODECOPY(size=return_size) if return_non_zero_data else Bytecode()
executable_code = mem_preparation + opcode(size=return_size)
@@ -2999,7 +3112,9 @@ def test_worst_clz_diff_input(
gas_benchmark_value: int,
env: Environment,
):
- """Test running a block with as many CLZ with different input as possible."""
+ """
+ Test running a block with as many CLZ with different input as possible.
+ """
tx_gas_limit = fork.transaction_gas_limit_cap() or env.gas_limit
max_code_size = fork.max_code_size()
diff --git a/tests/benchmark/test_worst_memory.py b/tests/benchmark/test_worst_memory.py
index 8952c05a455..689dcf06325 100644
--- a/tests/benchmark/test_worst_memory.py
+++ b/tests/benchmark/test_worst_memory.py
@@ -1,7 +1,4 @@
"""
-abstract: Tests that benchmark EVMs in the worst-case memory opcodes.
- Tests that benchmark EVMs in the worst-case memory opcodes.
-
Tests that benchmark EVMs in the worst-case memory opcodes.
"""
@@ -74,8 +71,9 @@ def test_worst_calldatacopy(
if size == 0 and non_zero_data:
pytest.skip("Non-zero data with size 0 is not applicable.")
- # If `non_zero_data` is True, we fill the calldata with deterministic random data.
- # Note that if `size == 0` and `non_zero_data` is a skipped case.
+ # If `non_zero_data` is True, we fill the calldata with deterministic
+ # random data. Note that if `size == 0` and `non_zero_data` is a skipped
+ # case.
data = Bytes([i % 256 for i in range(size)]) if non_zero_data else Bytes()
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
@@ -83,10 +81,11 @@ def test_worst_calldatacopy(
if min_gas > gas_benchmark_value:
pytest.skip("Minimum gas required for calldata ({min_gas}) is greater than the gas limit")
- # We create the contract that will be doing the CALLDATACOPY multiple times.
- #
- # If `non_zero_data` is True, we leverage CALLDATASIZE for the copy length. Otherwise, since we
- # don't send zero data explicitly via calldata, PUSH the target size and use DUP1 to copy it.
+ # We create the contract that will be doing the CALLDATACOPY multiple
+ # times.
+ # If `non_zero_data` is True, we leverage CALLDATASIZE for the copy length.
+ # Otherwise, since we don't send zero data explicitly via calldata, PUSH
+ # the target size and use DUP1 to copy it.
prefix = Bytecode() if non_zero_data or size == 0 else Op.PUSH3(size)
src_dst = 0 if fixed_src_dst else Op.MOD(Op.GAS, 7)
attack_block = Op.CALLDATACOPY(
@@ -97,11 +96,11 @@ def test_worst_calldatacopy(
tx_target = code_address
- # If the origin is CALL, we need to create a contract that will call the target contract with
- # the calldata.
+ # If the origin is CALL, we need to create a contract that will call the
+ # target contract with the calldata.
if origin == CallDataOrigin.CALL:
- # If `non_zero_data` is False we leverage just using zeroed memory. Otherwise, we
- # copy the calldata received from the transaction.
+ # If `non_zero_data` is False we leverage just using zeroed memory.
+ # Otherwise, we copy the calldata received from the transaction.
prefix = (
Op.CALLDATACOPY(Op.PUSH0, Op.PUSH0, Op.CALLDATASIZE) if non_zero_data else Bytecode()
) + Op.JUMPDEST
@@ -161,9 +160,9 @@ def test_worst_codecopy(
attack_block = Op.CODECOPY(src_dst, src_dst, Op.DUP1) # DUP1 copies size.
code = code_loop_precompile_call(code_prefix, attack_block, fork)
- # The code generated above is not guaranteed to be of max_code_size, so we pad it since
- # a test parameter targets CODECOPYing a contract with max code size. Padded bytecode values
- # are not relevant.
+ # The code generated above is not guaranteed to be of max_code_size, so we
+ # pad it since a test parameter targets CODECOPYing a contract with max
+ # code size. Padded bytecode values are not relevant.
code = code + Op.INVALID * (max_code_size - len(code))
assert len(code) == max_code_size, (
f"Code size {len(code)} is not equal to max code size {max_code_size}."
@@ -209,9 +208,10 @@ def test_worst_returndatacopy(
"""Test running a block filled with RETURNDATACOPY executions."""
max_code_size = fork.max_code_size()
- # Create the contract that will RETURN the data that will be used for RETURNDATACOPY.
- # Random-ish data is injected at different points in memory to avoid making the content
- # predictable. If `size` is 0, this helper contract won't be used.
+ # Create the contract that will RETURN the data that will be used for
+ # RETURNDATACOPY. Random-ish data is injected at different points in memory
+ # to avoid making the content predictable. If `size` is 0, this helper
+ # contract won't be used.
code = (
Op.MSTORE8(0, Op.GAS)
+ Op.MSTORE8(size // 2, Op.GAS)
@@ -220,7 +220,8 @@ def test_worst_returndatacopy(
)
helper_contract = pre.deploy_contract(code=code)
- # We create the contract that will be doing the RETURNDATACOPY multiple times.
+ # We create the contract that will be doing the RETURNDATACOPY multiple
+ # times.
returndata_gen = Op.STATICCALL(address=helper_contract) if size > 0 else Bytecode()
dst = 0 if fixed_dst else Op.MOD(Op.GAS, 7)
attack_iter = Op.RETURNDATACOPY(dst, Op.PUSH0, Op.RETURNDATASIZE)
@@ -236,8 +237,8 @@ def test_worst_returndatacopy(
# STATICCALL(address=helper_contract)
# JUMP(#)
# ```
- # The goal is that once per (big) loop iteration, the helper contract is called to
- # generate fresh returndata to continue calling RETURNDATACOPY.
+ # The goal is that once per (big) loop iteration, the helper contract is
+ # called to generate fresh returndata to continue calling RETURNDATACOPY.
max_iters_loop = (
max_code_size - 2 * len(returndata_gen) - len(jumpdest) - len(jump_back)
) // len(attack_iter)
diff --git a/tests/benchmark/test_worst_opcode.py b/tests/benchmark/test_worst_opcode.py
index e4e6e3bcf1b..9ee805a628b 100644
--- a/tests/benchmark/test_worst_opcode.py
+++ b/tests/benchmark/test_worst_opcode.py
@@ -1,8 +1,5 @@
"""
-abstract: Tests benchmark worst-case opcode scenarios.
- Tests benchmark worst-case opcode scenarios.
-
-Tests running worst-case opcodes scenarios for benchmarking purposes.
+Tests benchmark worst-case opcode scenarios.
"""
import pytest
diff --git a/tests/benchmark/test_worst_stateful_opcodes.py b/tests/benchmark/test_worst_stateful_opcodes.py
index f68783e61c3..f5cd6c0289f 100644
--- a/tests/benchmark/test_worst_stateful_opcodes.py
+++ b/tests/benchmark/test_worst_stateful_opcodes.py
@@ -1,7 +1,4 @@
"""
-abstract: Tests that benchmark EVMs for worst-case stateful opcodes.
- Tests that benchmark EVMs for worst-case stateful opcodes.
-
Tests that benchmark EVMs for worst-case stateful opcodes.
"""
@@ -55,14 +52,17 @@ def test_worst_address_state_cold(
env: Environment,
gas_benchmark_value: int,
):
- """Test running a block with as many stateful opcodes accessing cold accounts."""
+ """
+ Test running a block with as many stateful opcodes accessing cold accounts.
+ """
attack_gas_limit = gas_benchmark_value
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
- # For calculation robustness, the calculation below ignores "glue" opcodes like PUSH and POP.
- # It should be considered a worst-case number of accounts, and a few of them might not be
- # targeted before the attacking transaction runs out of gas.
+ # For calculation robustness, the calculation below ignores "glue" opcodes
+ # like PUSH and POP. It should be considered a worst-case number of
+ # accounts, and a few of them might not be targeted before the attacking
+ # transaction runs out of gas.
num_target_accounts = (
attack_gas_limit - intrinsic_gas_cost_calc()
) // gas_costs.G_COLD_ACCOUNT_ACCESS
@@ -70,10 +70,10 @@ def test_worst_address_state_cold(
blocks = []
post = {}
- # Setup
- # The target addresses are going to be constructed (in the case of absent=False) and called
- # as addr_offset + i, where i is the index of the account. This is to avoid
- # collisions with the addresses indirectly created by the testing framework.
+ # Setup The target addresses are going to be constructed (in the case of
+ # absent=False) and called as addr_offset + i, where i is the index of the
+ # account. This is to avoid collisions with the addresses indirectly
+ # created by the testing framework.
addr_offset = int.from_bytes(pre.fund_eoa(amount=0))
if not absent_accounts:
@@ -142,7 +142,10 @@ def test_worst_address_state_warm(
absent_target: bool,
gas_benchmark_value: int,
):
- """Test running a block with as many stateful opcodes doing warm access for an account."""
+ """
+ Test running a block with as many stateful opcodes doing warm access for an
+ account.
+ """
max_code_size = fork.max_code_size()
attack_gas_limit = gas_benchmark_value
@@ -253,7 +256,9 @@ def test_worst_storage_access_cold(
gas_benchmark_value: int,
tx_result: TransactionResult,
):
- """Test running a block with as many cold storage slot accesses as possible."""
+ """
+ Test running a block with as many cold storage slot accesses as possible.
+ """
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
attack_gas_limit = gas_benchmark_value
@@ -280,7 +285,8 @@ def test_worst_storage_access_cold(
execution_code_body = Op.SSTORE(Op.DUP1, Op.DUP1)
loop_cost += gas_costs.G_VERY_LOW * 2
elif storage_action == StorageAction.WRITE_NEW_VALUE:
- # The new value 2^256-1 is guaranteed to be different from the initial value.
+ # The new value 2^256-1 is guaranteed to be different from the initial
+ # value.
execution_code_body = Op.SSTORE(Op.DUP2, Op.NOT(0))
loop_cost += gas_costs.G_VERY_LOW * 3
elif storage_action == StorageAction.READ:
@@ -336,8 +342,9 @@ def test_worst_storage_access_cold(
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
- # To create the contract, we apply the slots_init code to initialize the storage slots
- # (int the case of absent_slots=False) and then copy the execution code to the contract.
+ # To create the contract, we apply the slots_init code to initialize the
+ # storage slots (int the case of absent_slots=False) and then copy the
+ # execution code to the contract.
creation_code = (
slots_init
+ Op.EXTCODECOPY(
@@ -393,7 +400,9 @@ def test_worst_storage_access_warm(
env: Environment,
gas_benchmark_value: int,
):
- """Test running a block with as many warm storage slot accesses as possible."""
+ """
+ Test running a block with as many warm storage slot accesses as possible.
+ """
attack_gas_limit = gas_benchmark_value
blocks = []
@@ -455,7 +464,10 @@ def test_worst_blockhash(
pre: Alloc,
gas_benchmark_value: int,
):
- """Test running a block with as many blockhash accessing oldest allowed block as possible."""
+ """
+ Test running a block with as many blockhash accessing oldest allowed block
+ as possible.
+ """
# Create 256 dummy blocks to fill the blockhash window.
blocks = [Block()] * 256
@@ -560,7 +572,10 @@ def test_worst_selfdestruct_existing(
env: Environment,
gas_benchmark_value: int,
):
- """Test running a block with as many SELFDESTRUCTs as possible for existing contracts."""
+ """
+ Test running a block with as many SELFDESTRUCTs as possible for existing
+ contracts.
+ """
attack_gas_limit = gas_benchmark_value
fee_recipient = pre.fund_eoa(amount=1)
@@ -574,12 +589,14 @@ def test_worst_selfdestruct_existing(
) + Op.RETURN(0, Op.EXTCODESIZE(selfdestructable_contract_addr))
initcode_address = pre.deploy_contract(code=initcode)
- # Calculate the number of contracts that can be deployed with the available gas.
+ # Calculate the number of contracts that can be deployed with the available
+ # gas.
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
loop_cost = (
gas_costs.G_KECCAK_256 # KECCAK static cost
- + math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic cost for CREATE2
+ + math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic
+ # cost for CREATE2
+ gas_costs.G_VERY_LOW * 3 # ~MSTOREs+ADDs
+ gas_costs.G_COLD_ACCOUNT_ACCESS # CALL to self-destructing contract
+ gas_costs.G_SELF_DESTRUCT
@@ -598,9 +615,9 @@ def test_worst_selfdestruct_existing(
num_contracts = (attack_gas_limit - base_costs) // loop_cost
expected_benchmark_gas_used = num_contracts * loop_cost + base_costs
- # Create a factory that deployes a new SELFDESTRUCT contract instance pre-funded depending on
- # the value_bearing parameter. We use CREATE2 so the caller contract can easily reproduce
- # the addresses in a loop for CALLs.
+ # Create a factory that deployes a new SELFDESTRUCT contract instance pre-
+ # funded depending on the value_bearing parameter. We use CREATE2 so the
+ # caller contract can easily reproduce the addresses in a loop for CALLs.
factory_code = (
Op.EXTCODECOPY(
address=initcode_address,
@@ -621,7 +638,8 @@ def test_worst_selfdestruct_existing(
+ Op.RETURN(0, 32)
)
- required_balance = num_contracts if value_bearing else 0 # 1 wei per contract
+ required_balance = num_contracts if value_bearing else 0 # 1 wei per
+ # contract
factory_address = pre.deploy_contract(code=factory_code, balance=required_balance)
factory_caller_code = Op.CALLDATALOAD(0) + While(
@@ -648,8 +666,8 @@ def test_worst_selfdestruct_existing(
+ While(
body=Op.POP(Op.CALL(address=Op.SHA3(32 - 20 - 1, 85)))
+ Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1)),
- # Only loop if we have enough gas to cover another iteration plus the
- # final storage gas.
+ # Only loop if we have enough gas to cover another iteration plus
+ # the final storage gas.
condition=Op.GT(Op.GAS, final_storage_gas + loop_cost),
)
+ Op.SSTORE(0, 42) # Done for successful tx execution assertion below.
@@ -700,8 +718,8 @@ def test_worst_selfdestruct_created(
gas_benchmark_value: int,
):
"""
- Test running a block with as many SELFDESTRUCTs as possible for deployed contracts in
- the same transaction.
+ Test running a block with as many SELFDESTRUCTs as possible for deployed
+ contracts in the same transaction.
"""
fee_recipient = pre.fund_eoa(amount=1)
env.fee_recipient = fee_recipient
@@ -779,7 +797,8 @@ def test_worst_selfdestruct_created(
sender=pre.fund_eoa(),
)
- post = {code_addr: Account(storage={0: 42})} # Check for successful execution.
+ post = {code_addr: Account(storage={0: 42})} # Check for successful
+ # execution.
state_test(
env=env,
pre=pre,
@@ -798,7 +817,10 @@ def test_worst_selfdestruct_initcode(
env: Environment,
gas_benchmark_value: int,
):
- """Test running a block with as many SELFDESTRUCTs as possible executed in initcode."""
+ """
+ Test running a block with as many SELFDESTRUCTs as possible executed in
+ initcode.
+ """
fee_recipient = pre.fund_eoa(amount=1)
env.fee_recipient = fee_recipient
@@ -861,7 +883,8 @@ def test_worst_selfdestruct_initcode(
sender=pre.fund_eoa(),
)
- post = {code_addr: Account(storage={0: 42})} # Check for successful execution.
+ post = {code_addr: Account(storage={0: 42})} # Check for successful
+ # execution.
state_test(
env=env,
pre=pre,
diff --git a/tests/berlin/eip2929_gas_cost_increases/__init__.py b/tests/berlin/eip2929_gas_cost_increases/__init__.py
index 9646b2063ca..baedc1cb649 100644
--- a/tests/berlin/eip2929_gas_cost_increases/__init__.py
+++ b/tests/berlin/eip2929_gas_cost_increases/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929)
- Test cases for [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929).
-""" # noqa: E501
+Tests for [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929).
+"""
diff --git a/tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py b/tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py
index 696f68b2d57..f8ad64a3d4b 100644
--- a/tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py
+++ b/tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py
@@ -1,7 +1,10 @@
"""
-abstract: Tests [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929)
- Test cases for [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929).
-""" # noqa: E501
+Tests EIP-2929 precompile warming behavior.
+
+Tests precompile warming behavior across fork transitions from
+[EIP-2929: Gas cost increases for state access opcodes]
+ (https://eips.ethereum.org/EIPS/eip-2929).
+"""
from typing import Iterator, Tuple
@@ -31,14 +34,17 @@ def precompile_addresses_in_predecessor_successor(
fork: Fork,
) -> Iterator[Tuple[Address, bool, bool]]:
"""
- Yield the addresses of precompiled contracts and whether they existed in the parent fork.
+ Yield the addresses of precompiled contracts and whether they existed in
+ the parent fork.
Args:
- fork (Fork): The transition fork instance containing precompiled contract information.
+ fork (Fork): The transition fork instance containing precompiled
+ contract information.
Yields:
- Iterator[Tuple[str, bool]]: A tuple containing the address in hexadecimal format and a
- boolean indicating whether the address has existed in the predecessor.
+ Iterator[Tuple[str, bool]]: A tuple containing the address in
+ hexadecimal format and a boolean indicating whether the address
+ has existed in the predecessor.
"""
precompile_range = range(0x01, 0x100)
@@ -84,14 +90,16 @@ def test_precompile_warming(
"""
Call BALANCE of a precompile addresses before and after a fork.
- According to EIP-2929, when a transaction begins, accessed_addresses is initialized to include:
+ According to EIP-2929, when a transaction begins, accessed_addresses is
+ initialized to include:
- tx.sender, tx.to
- and the set of all precompiles
This test verifies that:
- 1. Precompiles that exist in the predecessor fork are always "warm" (lower gas cost)
- 2. New precompiles added in a fork are "cold" before the fork and become "warm" after
-
+ 1. Precompiles that exist in the predecessor fork are always "warm" (lower
+ gas cost).
+ 2. New precompiles added in a fork are "cold" before the fork and become
+ "warm" after.
"""
sender = pre.fund_eoa()
call_cost_slot = 0
diff --git a/tests/berlin/eip2930_access_list/__init__.py b/tests/berlin/eip2930_access_list/__init__.py
index 24be6dfd092..25187646f76 100644
--- a/tests/berlin/eip2930_access_list/__init__.py
+++ b/tests/berlin/eip2930_access_list/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930)
- Test cases for [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930).
+Tests for [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930).
"""
diff --git a/tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py b/tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py
index 77ec323172e..cb5fe94b96c 100644
--- a/tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py
+++ b/tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py
@@ -1,6 +1,8 @@
"""
-abstract: Tests [EIP-2930: Access list transaction](https://eips.ethereum.org/EIPS/eip-2930).
-Original test by Ori: https://github.com/ethereum/tests/blob/v15.0/src/GeneralStateTestsFiller/stEIP1559/intrinsicGen.js.
+Tests [EIP-2930: Access list transaction](https://eips.ethereum.org/EIPS/eip-2930).
+
+Original test by Ori:
+https://github.com/ethereum/tests/blob/v15.0/src/GeneralStateTestsFiller/stEIP1559/intrinsicGen.js.
"""
from typing import List
@@ -163,7 +165,8 @@ def test_tx_intrinsic_gas(
if data_floor_gas_cost > intrinsic_gas_cost:
exception = TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST
elif data_floor_gas_cost == intrinsic_gas_cost:
- # Depending on the implementation, client might raise either exception.
+ # Depending on the implementation, client might raise either
+ # exception.
exception = [
TransactionException.INTRINSIC_GAS_TOO_LOW,
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST,
diff --git a/tests/byzantium/eip198_modexp_precompile/helpers.py b/tests/byzantium/eip198_modexp_precompile/helpers.py
index 9cc44fd25a1..6439c3bfe01 100644
--- a/tests/byzantium/eip198_modexp_precompile/helpers.py
+++ b/tests/byzantium/eip198_modexp_precompile/helpers.py
@@ -1,4 +1,6 @@
-"""Helper functions for the EIP-198 ModExp precompile tests."""
+"""
+Helper functions for the EIP-198 ModExp precompile tests.
+"""
from typing import Tuple
@@ -9,15 +11,16 @@
class ModExpInput(TestParameterGroup):
"""
- Helper class that defines the MODEXP precompile inputs and creates the
- call data from them.
+ Helper class that defines the MODEXP precompile inputs and creates the call
+ data from them.
Attributes:
base (str): The base value for the MODEXP precompile.
exponent (str): The exponent value for the MODEXP precompile.
modulus (str): The modulus value for the MODEXP precompile.
- extra_data (str): Defines extra padded data to be added at the end of the calldata
- to the precompile. Defaults to an empty string.
+ extra_data (str): Defines extra padded data to be added at the end of
+ the calldata to the precompile.
+ Defaults to an empty string.
"""
@@ -134,9 +137,11 @@ class ModExpOutput(TestParameterGroup):
Expected test result.
Attributes:
- call_success (bool): The return_code from CALL, 0 indicates unsuccessful call
- (out-of-gas), 1 indicates call succeeded.
- returned_data (str): The output returnData is the expected output of the call
+ call_success (bool): The return_code from CALL, 0 indicates
+ unsuccessful call (out-of-gas), 1 indicates call
+ succeeded.
+ returned_data(str): The output returnData is the expected
+ output of the call.
"""
diff --git a/tests/byzantium/eip198_modexp_precompile/test_modexp.py b/tests/byzantium/eip198_modexp_precompile/test_modexp.py
index 859a73b8e19..978963ec6d5 100644
--- a/tests/byzantium/eip198_modexp_precompile/test_modexp.py
+++ b/tests/byzantium/eip198_modexp_precompile/test_modexp.py
@@ -1,7 +1,8 @@
"""
-abstract: Test [EIP-198: MODEXP Precompile](https://eips.ethereum.org/EIPS/eip-198)
- Tests the MODEXP precompile, located at address 0x0000..0005. Test cases from the EIP are
- labelled with `EIP-198-caseX` in the test id.
+Test [EIP-198: MODEXP Precompile](https://eips.ethereum.org/EIPS/eip-198).
+
+Tests the MODEXP precompile, located at address 0x0000..0005. Test cases
+from the EIP are labelled with `EIP-198-caseX` in the test id.
"""
import pytest
@@ -94,7 +95,8 @@
),
id="EIP-198-case2",
),
- pytest.param( # Note: This is the only test case which goes out-of-gas.
+ pytest.param( # Note: This is the only test case which goes out-of-
+ # gas.
Bytes(
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000020"
@@ -135,7 +137,8 @@
id="EIP-198-case5-raw-input",
),
],
- ids=lambda param: param.__repr__(), # only required to remove parameter names (input/output)
+ ids=lambda param: param.__repr__(), # only required to remove parameter
+ # names (input/output)
)
def test_modexp(
state_test: StateTestFiller,
@@ -153,17 +156,18 @@ def test_modexp(
# Store the returned CALL status (success = 1, fail = 0) into slot 0:
+ Op.SSTORE(
0,
- # Setup stack to CALL into ModExp with the CALLDATA and CALL into it (+ pop value)
+ # Setup stack to CALL into ModExp with the CALLDATA and CALL into
+ # it (+ pop value)
Op.CALL(Op.GAS(), 0x05, 0, 0, Op.CALLDATASIZE(), 0, 0),
)
- # Store contract deployment code to deploy the returned data from ModExp as
- # contract code (16 bytes)
+ # Store contract deployment code to deploy the returned data from
+ # ModExp as contract code (16 bytes)
+ Op.MSTORE(
0,
(
- # Need to `ljust` this PUSH32 in order to ensure the code starts
- # in memory at offset 0 (memory right-aligns stack items which are not
- # 32 bytes)
+ # Need to `ljust` this PUSH32 in order to ensure the code
+ # starts in memory at offset 0 (memory right-aligns stack items
+ # which are not 32 bytes)
Op.PUSH32(
bytes(
Op.CODECOPY(0, 16, Op.SUB(Op.CODESIZE(), 16))
@@ -172,9 +176,11 @@ def test_modexp(
)
),
)
- # RETURNDATACOPY the returned data from ModExp into memory (offset 16 bytes)
+ # RETURNDATACOPY the returned data from ModExp into memory (offset 16
+ # bytes)
+ Op.RETURNDATACOPY(16, 0, Op.RETURNDATASIZE())
- # CREATE contract with the deployment code + the returned data from ModExp
+ # CREATE contract with the deployment code + the returned data from
+ # ModExp
+ Op.CREATE(0, 0, Op.ADD(16, Op.RETURNDATASIZE()))
# STOP (handy for tracing)
+ Op.STOP(),
diff --git a/tests/cancun/eip1153_tstore/__init__.py b/tests/cancun/eip1153_tstore/__init__.py
index 1ef8a2efb71..dabae5a7d57 100644
--- a/tests/cancun/eip1153_tstore/__init__.py
+++ b/tests/cancun/eip1153_tstore/__init__.py
@@ -1,4 +1,6 @@
-"""EIP-1153 Tests."""
+"""
+[EIP-1153](https://eips.ethereum.org/EIPS/eip-1153) Tests.
+"""
from enum import Enum, unique
from pprint import pprint
@@ -13,8 +15,8 @@ class PytestParameterEnum(Enum):
"""
Helper class for defining Pytest parameters used in test cases.
- This class helps define enum `value`s as `pytest.param` objects that then can
- be used to create a parametrize decorator that can be applied to tests,
+ This class helps define enum `value`s as `pytest.param` objects that then
+ can be used to create a parametrize decorator that can be applied to tests,
for example,
```python
@@ -23,25 +25,23 @@ def test_function(test_value):
pass
```
- Classes which derive from this class must define each test case as a different enum
- field with a dictionary as value.
+ Classes which derive from this class must define each test case as a
+ different enum field with a dictionary as value.
The dictionary must contain:
- i. A `description` key with a string value describing the test case.
- ii. (Optional) A `pytest_marks` key with a single mark or list of pytest
- marks to apply to the test case. For example,
-
- ```
- pytest_marks=pytest.mark.xfail
- ```
- or
-
- ```
- pytest_marks=[pytest.mark.xfail, pytest.mark.skipif]
- ```
- iii. (Optional) An `id` key with the name of the test.
-
- The rest of the keys in the dictionary are the parameters of the test case.
+ i. A `description` key with a string value describing the test case.
+ ii. (Optional) A `pytest_marks` key with a single mark or list of pytest
+ marks to apply to the test case. For example:
+ ```
+ pytest_marks=pytest.mark.xfail
+ ```
+ or
+ ```
+ pytest_marks=[pytest.mark.xfail, pytest.mark.skipif]
+ ```
+ iii. (Optional) An `id` key with the name of the test.
+
+ The rest of the keys in the dictionary are the parameters of the test case.
The test case ID is set as the enum name converted to lowercase.
"""
@@ -67,11 +67,15 @@ def param(self, names: List[str]):
@classmethod
def special_keywords(cls) -> List[str]:
- """Return the special dictionary keywords that are not test parameters."""
+ """
+ Return the special dictionary keywords that are not test parameters.
+ """
return ["description", "pytest_marks", "pytest_id"]
def names(self) -> List[str]:
- """Return the names of all the parameters included in the enum value dict."""
+ """
+ Return the names of all the parameters included in the enum value dict.
+ """
return sorted([k for k in self._value_.keys() if k not in self.special_keywords()])
@property
diff --git a/tests/cancun/eip1153_tstore/test_tload_calls.py b/tests/cancun/eip1153_tstore/test_tload_calls.py
index 27837bc797c..120f2060d65 100644
--- a/tests/cancun/eip1153_tstore/test_tload_calls.py
+++ b/tests/cancun/eip1153_tstore/test_tload_calls.py
@@ -1,6 +1,5 @@
"""
-Ethereum Transient Storage EIP Tests
-https://eips.ethereum.org/EIPS/eip-1153.
+[EIP-1153](https://eips.ethereum.org/EIPS/eip-1153) Transient Storage tests.
"""
import pytest
@@ -26,11 +25,11 @@ def test_tload_calls(state_test: StateTestFiller, pre: Alloc, call_type: Op):
"""
Ported .json vectors.
- (04_tloadAfterCallFiller.yml)
- Loading a slot after a call to another contract is 0.
+ (04_tloadAfterCallFiller.yml) Loading a slot after a call to another
+ contract is 0.
- (12_tloadDelegateCallFiller.yml)
- delegatecall reads transient storage in the context of the current address
+ (12_tloadDelegateCallFiller.yml) delegatecall reads transient storage in
+ the context of the current address
"""
# Storage variables
slot_a_tload_after_subcall_result = 0
@@ -71,10 +70,12 @@ def make_call(call_type: Op, address: Address) -> Bytecode:
post = {
address_to: Account(
storage={
- # other calls don't change context, there for tload updated in this account
+ # other calls don't change context, there for tload updated in
+ # this account
slot_a_tload_after_subcall_result: 10 if call_type == Op.CALL else 20,
slot_a_subcall_result: 1,
- # since context unchanged the subcall works as if continued execution
+ # since context unchanged the subcall works as if continued
+ # execution
slot_b_subcall_tload_result: 0 if call_type == Op.CALL else 10,
slot_b_subcall_updated_tload_result: 0 if call_type == Op.CALL else 20,
}
diff --git a/tests/cancun/eip1153_tstore/test_tload_reentrancy.py b/tests/cancun/eip1153_tstore/test_tload_reentrancy.py
index 4d66ea5115c..e0d7cc99095 100644
--- a/tests/cancun/eip1153_tstore/test_tload_reentrancy.py
+++ b/tests/cancun/eip1153_tstore/test_tload_reentrancy.py
@@ -1,6 +1,5 @@
"""
-Ethereum Transient Storage EIP Tests
-https://eips.ethereum.org/EIPS/eip-1153.
+[EIP-1153](https://eips.ethereum.org/EIPS/eip-1153) Transient Storage tests.
"""
from enum import Enum
@@ -53,8 +52,8 @@ def test_tload_reentrancy(
"""
Ported .json vectors.
- (05_tloadReentrancyFiller.yml)
- Reentrant calls access the same transient storage
+ (05_tloadReentrancyFiller.yml) Reentrant calls access the same transient
+ storage
"""
tload_value = 44
empty_value = 0
@@ -138,7 +137,8 @@ def make_call(call_type: Op) -> Bytecode:
slot_tload_in_subcall_result: (
0xFF # if call OOG, we fail to obtain the result
if call_return == Om.OOG
- # else delegate and callcode are working in the same context so tload works
+ # else delegate and callcode are working in the same
+ # context so tload works
else (
tload_value
if call_type == Op.DELEGATECALL or call_type == Op.CALLCODE
diff --git a/tests/cancun/eip1153_tstore/test_tstorage.py b/tests/cancun/eip1153_tstore/test_tstorage.py
index 990cd804e3f..95b576bd0fd 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage.py
@@ -1,9 +1,9 @@
"""
-abstract: Tests [EIP-1153: Transient Storage Opcodes](https://eips.ethereum.org/EIPS/eip-1153)
- Test [EIP-1153: Transient Storage Opcodes](https://eips.ethereum.org/EIPS/eip-1153). Ports
- and extends some tests from
- [ethereum/tests/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage).
-""" # noqa: E501
+EIP-1153 Transient Storage opcode tests.
+
+Ports and extends some tests from
+[ethereum/tests/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage).
+"""
from enum import unique
@@ -33,11 +33,15 @@
def test_transient_storage_unset_values(state_test: StateTestFiller, pre: Alloc):
"""
- Test that tload returns zero for unset values. Loading an arbitrary value is
- 0 at beginning of transaction: TLOAD(x) is 0.
-
- Based on [ethereum/tests/.../01_tloadBeginningTxnFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/01_tloadBeginningTxnFiller.yml)",
- """ # noqa: E501
+ Test that tload returns zero for unset values. Loading an arbitrary value
+ is 0 at beginning of transaction: TLOAD(x) is 0.
+
+ Based on
+ [ethereum/tests/.../01_tloadBeginningTxnFiller.yml]
+ (https://github.com/ethereum/tests/blob/
+ 9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/
+ stEIP1153-transientStorage/01_tloadBeginningTxnFiller.yml)",
+ """
env = Environment()
slots_under_test = [0, 1, 2, 2**128, 2**256 - 1]
@@ -69,8 +73,12 @@ def test_tload_after_tstore(state_test: StateTestFiller, pre: Alloc):
Loading after storing returns the stored value: TSTORE(x, y), TLOAD(x)
returns y.
- Based on [ethereum/tests/.../02_tloadAfterTstoreFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/02_tloadAfterTstoreFiller.yml)",
- """ # noqa: E501
+ Based on
+ [ethereum/tests/.../02_tloadAfterTstoreFiller.yml]
+ (https://github.com/ethereum/tests/blob/
+ 9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/
+ stEIP1153-transientStorage/02_tloadAfterTstoreFiller.yml)",
+ """
env = Environment()
slots_under_test = [0, 1, 2, 2**128, 2**256 - 1]
@@ -103,8 +111,13 @@ def test_tload_after_sstore(state_test: StateTestFiller, pre: Alloc):
Loading after storing returns the stored value: TSTORE(x, y), TLOAD(x)
returns y.
- Based on [ethereum/tests/.../18_tloadAfterStoreFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/18_tloadAfterStoreFiller.yml)",
- """ # noqa: E501
+ Based on
+ [ethereum/tests/.../18_tloadAfterStoreFiller.yml]
+ (https://github.com/ethereum/tests/blob/
+ 9b00b68593f5869eb51a6659e1cc983e875e616b/src/
+ EIPTestsFiller/StateTests/stEIP1153-transientStorage/
+ 18_tloadAfterStoreFiller.yml)",
+ """
env = Environment()
slots_under_test = [1, 3, 2**128, 2**256 - 1]
@@ -143,8 +156,12 @@ def test_tload_after_tstore_is_zero(state_test: StateTestFiller, pre: Alloc):
"""
Test that tload returns zero after tstore is called with zero.
- Based on [ethereum/tests/.../03_tloadAfterStoreIs0Filler.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/03_tloadAfterStoreIs0Filler.yml)",
- """ # noqa: E501
+ Based on [ethereum/tests/.../03_tloadAfterStoreIs0Filler.yml]
+ (https://github.com/ethereum/tests/blob/
+ 9b00b68593f5869eb51a6659e1cc983e875e616b/src/
+ EIPTestsFiller/StateTests/
+ stEIP1153-transientStorage/03_tloadAfterStoreIs0Filler.yml)",
+ """
env = Environment()
slots_to_write = [1, 4, 2**128, 2**256 - 2]
diff --git a/tests/cancun/eip1153_tstore/test_tstorage_clear_after_tx.py b/tests/cancun/eip1153_tstore/test_tstorage_clear_after_tx.py
index 8cf988bd763..fc7e75fd486 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage_clear_after_tx.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage_clear_after_tx.py
@@ -1,7 +1,4 @@
-"""
-Ethereum Transient Storage EIP Tests
-https://eips.ethereum.org/EIPS/eip-1153.
-"""
+"""EIP-1153 Transient Storage tests."""
from typing import Optional
@@ -34,10 +31,10 @@ def test_tstore_clear_after_deployment_tx(
evm_code_type: EVMCodeType,
):
"""
- First creates a contract, which TSTOREs a value 1 in slot 1.
- After creating the contract, a new tx will call this contract, storing TLOAD(1) into slot 1.
- The transient storage should be cleared after creating the contract (at tx-level), so
- the storage should stay empty.
+ First creates a contract, which TSTOREs a value 1 in slot 1. After creating
+ the contract, a new tx will call this contract, storing TLOAD(1) into slot
+ 1. The transient storage should be cleared after creating the contract (at
+ tx-level), so the storage should stay empty.
"""
env = Environment()
@@ -81,9 +78,9 @@ def test_tstore_clear_after_tx(
pre: Alloc,
):
"""
- First SSTOREs the TLOAD value of key 1 in slot 1. Then, it TSTOREs 1 in slot 1.
- The second tx will re-call the contract. The storage should stay empty,
- because the transient storage is cleared after the transaction.
+ First SSTOREs the TLOAD value of key 1 in slot 1. Then, it TSTOREs 1 in
+ slot 1. The second tx will re-call the contract. The storage should stay
+ empty, because the transient storage is cleared after the transaction.
"""
env = Environment()
diff --git a/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py
index 5ce6c65c686..efe83bcd13d 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage_create_contexts.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153)
- Test cases for `TSTORE` and `TLOAD` opcode calls in contract initcode.
-""" # noqa: E501
+Test transient storage in contract creation contexts.
+"""
from enum import unique
@@ -32,8 +31,8 @@
@unique
class InitcodeTestCases(PytestParameterEnum):
"""
- Defines test cases for transient storage opcode usage in contract constructor
- and deployed code.
+ Defines test cases for transient storage opcode usage in contract
+ constructor and deployed code.
"""
ONLY_CONSTRUCTOR_CODE = {
@@ -41,9 +40,11 @@ class InitcodeTestCases(PytestParameterEnum):
"Test TLOAD and TSTORE behavior in contract constructor without deployed code"
),
"constructor_code": (
- # test creator's transient storage inaccessible from constructor code
+ # test creator's transient storage inaccessible from constructor
+ # code
Op.SSTORE(0, Op.TLOAD(0))
- # test constructor code can use its own transient storage & creator storage unaffected
+ # test constructor code can use its own transient storage & creator
+ # storage unaffected
+ Op.TSTORE(0, 1)
+ Op.SSTORE(1, Op.TLOAD(0))
),
@@ -53,13 +54,15 @@ class InitcodeTestCases(PytestParameterEnum):
IN_CONSTRUCTOR_AND_DEPLOYED_CODE = {
"description": "Test TLOAD and TSTORE behavior in contract constructor and deployed code",
"constructor_code": (
- # test creator's transient storage inaccessible from constructor code
+ # test creator's transient storage inaccessible from constructor
+ # code
Op.SSTORE(0, Op.TLOAD(0))
),
"deploy_code": (
# test creator's transient storage inaccessible from deployed code
Op.SSTORE(1, Op.TLOAD(0))
- # test deploy code can use its own transient storage & creator storage unaffected
+ # test deploy code can use its own transient storage & creator
+ # storage unaffected
+ Op.TSTORE(1, 1)
+ Op.SSTORE(2, Op.TLOAD(1))
),
@@ -68,15 +71,18 @@ class InitcodeTestCases(PytestParameterEnum):
ACROSS_CONSTRUCTOR_AND_DEPLOYED_CODE_V0 = {
"description": ("Test TSTORE behavior across contract constructor and deploy code. "),
"constructor_code": (
- # constructor code should be able to store its own transient storage
+ # constructor code should be able to store its own transient
+ # storage
Op.TSTORE(1, 1)
),
"deploy_code": (
# test creator's transient storage inaccessible from deployed code
Op.SSTORE(0, Op.TLOAD(0))
- # test deploy code can use its own transient storage stored from constructor code
+ # test deploy code can use its own transient storage stored from
+ # constructor code
+ Op.SSTORE(1, Op.TLOAD(1))
- # test deploy code can use its own transient storage stored from deployed code
+ # test deploy code can use its own transient storage stored from
+ # deployed code
+ Op.TSTORE(2, 1)
+ Op.SSTORE(2, Op.TLOAD(2))
),
@@ -89,17 +95,19 @@ class InitcodeTestCases(PytestParameterEnum):
"constructor_code": (
# test creator's transient storage inaccessible from constructor
Op.SSTORE(0, Op.TLOAD(0))
- # constructor code should be able to use its own transient storage / creator storage
- # unaffected
+ # constructor code should be able to use its own transient storage
+ # / creator storage unaffected
+ Op.TSTORE(1, 1)
+ Op.SSTORE(1, Op.TLOAD(1))
),
"deploy_code": (
# test creator's transient storage inaccessible from deployed code
Op.SSTORE(2, Op.TLOAD(0))
- # test deploy code can use its own transient storage stored from constructor code
+ # test deploy code can use its own transient storage stored from
+ # constructor code
+ Op.SSTORE(3, Op.TLOAD(1))
- # test deploy code can use its own transient storage stored from deployed code
+ # test deploy code can use its own transient storage stored from
+ # deployed code
+ Op.TSTORE(2, 1)
+ Op.SSTORE(4, Op.TLOAD(2))
),
@@ -113,7 +121,8 @@ class InitcodeTestCases(PytestParameterEnum):
"deploy_code": (
# test creator's transient storage inaccessible from deployed code
Op.SSTORE(0, Op.TLOAD(0))
- # test deployed code can use its own transient storage & creator storage unaffected
+ # test deployed code can use its own transient storage & creator
+ # storage unaffected
+ Op.TSTORE(0, 1)
+ Op.SSTORE(1, Op.TLOAD(0))
),
@@ -126,10 +135,12 @@ class InitcodeTestCases(PytestParameterEnum):
class TestTransientStorageInContractCreation:
"""
Test transient storage in contract creation contexts.
- - TSTORE/TLOAD in initcode should not be able to access the creator's transient storage.
- - TSTORE/TLOAD in initcode should be able to access the created contract's transient
- storage.
- - TSTORE/TLOAD in creator contract should be able to use its own transient storage.
+ - TSTORE/TLOAD in initcode should not be able to access the creator's
+ transient storage.
+ - TSTORE/TLOAD in initcode should be able to access the created contract's
+ transient storage.
+ - TSTORE/TLOAD in creator contract should be able to use its own
+ transient storage.
"""
@pytest.fixture()
@@ -165,8 +176,8 @@ def creator_contract_code( # noqa: D102
)
),
)
- # Save the state of transient storage following call to storage; the transient
- # storage should not have been overwritten
+ # Save the state of transient storage following call to storage;
+ # the transient storage should not have been overwritten
+ Op.SSTORE(0, Op.TLOAD(0))
+ Op.SSTORE(1, Op.TLOAD(1))
+ Op.SSTORE(2, Op.TLOAD(2))
diff --git a/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py
index 99c83f05566..b710ff493e7 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage_execution_contexts.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153)
- Test cases for `TSTORE` and `TLOAD` opcode calls in different execution contexts.
-""" # noqa: E501
+Test EIP-1153 Transient Storage in execution contexts.
+"""
from enum import EnumMeta, unique
from typing import Dict, Mapping
@@ -33,9 +32,9 @@
class DynamicCallContextTestCases(EnumMeta):
"""
- Create dynamic transient storage test cases for contract sub-calls
- using CALLCODE and DELEGATECALL (these opcodes share the same
- signatures and test cases).
+ Create dynamic transient storage test cases for contract sub-calls using
+ CALLCODE and DELEGATECALL (these opcodes share the same signatures and test
+ cases).
"""
def __new__(cls, name, bases, classdict): # noqa: D102
@@ -258,14 +257,14 @@ class CallContextTestCases(PytestParameterEnum, metaclass=DynamicCallContextTest
+ Op.SSTORE(1, Op.TLOAD(0))
+ Op.STOP
),
- "callee_bytecode": Op.TSTORE(0, unchecked=True) # calling with stack underflow still fails
- + Op.STOP,
+ # calling with stack underflow still fails
+ "callee_bytecode": Op.TSTORE(0, unchecked=True) + Op.STOP,
"expected_caller_storage": {0: 0, 1: 420},
"expected_callee_storage": {},
}
STATICCALL_CAN_CALL_TLOAD = {
- # TODO: Not a very useful test; consider removing after implementing ethereum/tests
- # staticcall tests
+ # TODO: Not a very useful test; consider removing after implementing
+ # ethereum/tests staticcall tests
"pytest_id": "staticcalled_context_can_call_tload",
"description": ("A STATICCALL callee can not use transient storage."),
"caller_bytecode": (
@@ -274,7 +273,8 @@ class CallContextTestCases(PytestParameterEnum, metaclass=DynamicCallContextTest
+ Op.SSTORE(1, Op.TLOAD(0))
+ Op.STOP
),
- "callee_bytecode": Op.TLOAD(0) + Op.STOP, # calling tload does not cause the call to fail
+ # calling tload does not cause the call to fail
+ "callee_bytecode": Op.TLOAD(0) + Op.STOP,
"expected_caller_storage": {0: 1, 1: 420},
"expected_callee_storage": {},
}
diff --git a/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py b/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py
index 18fed8cea93..05044ef8ac8 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage_reentrancy_contexts.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153)
- Test cases for `TSTORE` and `TLOAD` opcode calls in reentrancy contexts.
-""" # noqa: E501
+Tests transient storage in reentrancy contexts.
+"""
from enum import EnumMeta, unique
from typing import Dict
@@ -59,11 +58,15 @@ def __new__(cls, name, bases, classdict): # noqa: D102
classdict[f"TSTORE_BEFORE_{opcode._name_}_HAS_NO_EFFECT"] = {
"description": (
- f"{opcode._name_} undoes the transient storage write from the failed call: "
- f"TSTORE(x, y), CALL(self, ...), TSTORE(x, z), {opcode._name_}, TLOAD(x) "
- "returns y."
- "",
- "Based on [ethereum/tests/.../08_revertUndoesTransientStoreFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/08_revertUndoesTransientStoreFiller.yml)", # noqa: E501
+ f"{opcode._name_} undoes the transient storage write "
+ "from the failed call: "
+ "TSTORE(x, y), CALL(self, ...), TSTORE(x, z), "
+ f"{opcode._name_}, TLOAD(x) returns y.",
+ "Based on [ethereum/tests/.../08_revertUndoes"
+ "TransientStoreFiller.yml](https://github.com/ethereum/"
+ "tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src"
+ "/EIPTestsFiller/StateTests/stEIP1153-transientStorage/"
+ "08_revertUndoesTransientStoreFiller.yml)",
),
"bytecode": Conditional(
condition=SETUP_CONDITION,
@@ -84,11 +87,13 @@ def __new__(cls, name, bases, classdict): # noqa: D102
classdict[f"{opcode._name_}_UNDOES_ALL"] = {
"description": (
- f"{opcode._name_} undoes all the transient storage writes to the same key ",
- "from a failed call. TSTORE(x, y), CALL(self, ...), TSTORE(x, z), ",
- f"TSTORE(x, z + 1) {opcode._name_}, TLOAD(x) returns y.",
+ f"{opcode._name_} undoes all the transient storage writes "
+ "to the same key from a failed call. "
+ "TSTORE(x, y), CALL(self, ...), TSTORE(x, z), "
+ f"TSTORE(x, z + 1) {opcode._name_}, TLOAD(x) returns y."
"",
- "Based on [ethereum/tests/.../09_revertUndoesAllFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/09_revertUndoesAllFiller.yml).", # noqa: E501
+ "Based on "
+ "[ethereum/tests/.../09_revertUndoesAllFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/09_revertUndoesAllFiller.yml).",
),
"bytecode": Conditional(
condition=SETUP_CONDITION,
@@ -106,7 +111,8 @@ def __new__(cls, name, bases, classdict): # noqa: D102
),
# reenter
if_false=(
- # store twice and revert/invalid; none of the stores should take effect
+ # store twice and revert/invalid; none of the stores
+ # should take effect
Op.TSTORE(0xFE, 0x201)
+ Op.TSTORE(0xFE, 0x202)
+ Op.TSTORE(0xFF, 0x201)
@@ -128,11 +134,17 @@ def __new__(cls, name, bases, classdict): # noqa: D102
classdict[f"{opcode._name_}_UNDOES_TSTORAGE_AFTER_SUCCESSFUL_CALL"] = {
"description": (
- f"{opcode._name_} undoes transient storage writes from inner calls that "
- "successfully returned. TSTORE(x, y), CALL(self, ...), CALL(self, ...), "
- f"TSTORE(x, y + 1), RETURN, {opcode._name_}, TLOAD(x) returns y."
- "",
- "Based on [ethereum/tests/.../10_revertUndoesStoreAfterReturnFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/10_revertUndoesStoreAfterReturnFiller.yml).", # noqa: E501
+ f"{opcode._name_} undoes transient storage writes from "
+ "inner calls that successfully returned. "
+ "TSTORE(x, y), CALL(self, ...), CALL(self, ...), "
+ f"TSTORE(x, y + 1), RETURN, {opcode._name_}, TLOAD(x) "
+ "returns y.",
+ "Based on [ethereum/tests/.../"
+ "10_revertUndoesStoreAfterReturnFiller.yml]"
+ "(https://github.com/ethereum/tests/blob/"
+ "9b00b68593f5869eb51a6659e1cc983e875e616b/src/"
+ "EIPTestsFiller/StateTests/stEIP1153-transientStorage/"
+ "10_revertUndoesStoreAfterReturnFiller.yml).",
),
"bytecode": Switch(
default_action=( # setup; make first reentrant sub-call
@@ -149,11 +161,13 @@ def __new__(cls, name, bases, classdict): # noqa: D102
ret_size=32,
),
)
- + Op.SSTORE(1, Op.MLOAD(32)) # should be 1 (successful call)
+ + Op.SSTORE(1, Op.MLOAD(32)) # should be 1 (successful
+ # call)
+ Op.SSTORE(3, Op.TLOAD(0xFF))
),
cases=[
- # the first, reentrant call, which reverts/receives invalid
+ # the first, reentrant call, which reverts/receives
+ # invalid
CalldataCase(
value=2,
action=(
@@ -162,7 +176,8 @@ def __new__(cls, name, bases, classdict): # noqa: D102
+ opcode_call
),
),
- # the second, reentrant call, which returns successfully
+ # the second, reentrant call, which returns
+ # successfully
CalldataCase(
value=3,
action=Op.TSTORE(0xFF, 0x101),
@@ -184,7 +199,10 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
"Reentrant calls access the same transient storage: "
"TSTORE(x, y), CALL(self, ...), TLOAD(x) returns y."
""
- "Based on [ethereum/tests/.../05_tloadReentrancyFiller.yml](https://github.com/ethereum/tests/tree/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage).", # noqa: E501
+ "Based on [ethereum/tests/.../05_tloadReentrancyFiller.yml]"
+ "(https://github.com/ethereum/tests/tree/"
+ "9b00b68593f5869eb51a6659e1cc983e875e616b/src/"
+ "EIPTestsFiller/StateTests/stEIP1153-transientStorage).",
),
"bytecode": Conditional(
condition=SETUP_CONDITION,
@@ -197,10 +215,16 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
}
TLOAD_AFTER_REENTRANT_TSTORE = {
"description": (
- "Successfully returned calls do not revert transient storage writes: "
- "TSTORE(x, y), CALL(self, ...), TSTORE(x, z), RETURN, TLOAD(x) returns z."
- ""
- "Based on [ethereum/tests/.../07_tloadAfterReentrancyStoreFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/07_tloadAfterReentrancyStoreFiller.yml).", # noqa: E501
+ "Successfully returned calls do not revert transient "
+ "storage writes: "
+ "TSTORE(x, y), CALL(self, ...), TSTORE(x, z), RETURN, TLOAD(x) "
+ "returns z."
+ "Based on [ethereum/tests/.../"
+ "07_tloadAfterReentrancyStoreFiller.yml](https://github.com/"
+ "ethereum/tests/blob/"
+ "9b00b68593f5869eb51a6659e1cc983e875e616b/src/"
+ "EIPTestsFiller/StateTests/stEIP1153-transientStorage/"
+ "07_tloadAfterReentrancyStoreFiller.yml).",
),
"bytecode": Conditional(
condition=SETUP_CONDITION,
@@ -209,7 +233,8 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
Op.TSTORE(0xFF, 0x100)
+ Op.SSTORE(1, Op.TLOAD(0xFF))
+ REENTRANT_CALL
- + Op.SSTORE(2, Op.TLOAD(0xFF)) # test value updated during reentrant call
+ + Op.SSTORE(2, Op.TLOAD(0xFF)) # test value updated during
+ # reentrant call
),
# reenter
if_false=Op.TSTORE(0xFF, 0x101),
@@ -221,7 +246,11 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
"Reentrant calls can manipulate the same transient storage: "
"TSTORE(x, y), CALL(self, ...), TSTORE(x, z), TLOAD(x) returns z."
""
- "Based on [ethereum/tests/.../06_tstoreInReentrancyCallFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/06_tstoreInReentrancyCallFiller.yml).", # noqa: E501
+ "Based on [ethereum/tests/.../06_tstoreInReentrancyCallFiller.yml]"
+ "(https://github.com/ethereum/tests/blob/"
+ "9b00b68593f5869eb51a6659e1cc983e875e616b/src/"
+ "EIPTestsFiller/StateTests/stEIP1153-transientStorage/"
+ "06_tstoreInReentrancyCallFiller.yml).",
),
"bytecode": Conditional(
condition=SETUP_CONDITION,
@@ -230,7 +259,8 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
Op.TSTORE(0xFF, 0x100)
+ Op.SSTORE(1, Op.TLOAD(0xFF))
+ REENTRANT_CALL
- + Op.SSTORE(3, Op.TLOAD(0xFF)) # test value updated during reentrant call
+ + Op.SSTORE(3, Op.TLOAD(0xFF)) # test value updated during
+ # reentrant call
),
# reenter
if_false=Op.TSTORE(0xFF, 0x101) + Op.SSTORE(2, Op.TLOAD(0xFF)),
@@ -239,9 +269,16 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
}
TSTORE_IN_CALL_THEN_TLOAD_RETURN_IN_STATICCALL = {
"description": (
- "A reentrant call followed by a reentrant subcall can call tload correctly: "
- "TSTORE(x, y), CALL(self, ...), STATICCALL(self, ...), TLOAD(x), RETURN returns y."
- "Based on [ethereum/tests/.../10_revertUndoesStoreAfterReturnFiller.yml](https://github.com/ethereum/tests/blob/9b00b68593f5869eb51a6659e1cc983e875e616b/src/EIPTestsFiller/StateTests/stEIP1153-transientStorage/10_revertUndoesStoreAfterReturnFiller.yml).", # noqa: E501
+ "A reentrant call followed by a reentrant subcall can "
+ "call tload correctly: "
+ "TSTORE(x, y), CALL(self, ...), STATICCALL(self, ...), "
+ "TLOAD(x), RETURN returns y."
+ "Based on [ethereum/tests/.../"
+ "10_revertUndoesStoreAfterReturnFiller.yml]"
+ "(https://github.com/ethereum/tests/blob/"
+ "9b00b68593f5869eb51a6659e1cc983e875e616b/src/"
+ "EIPTestsFiller/StateTests/stEIP1153-transientStorage/"
+ "10_revertUndoesStoreAfterReturnFiller.yml).",
),
"bytecode": Switch(
default_action=( # setup; make first reentrant sub-call
@@ -252,7 +289,8 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
+ Op.SSTORE(4, Op.TLOAD(0xFE))
),
cases=[
- # the first, reentrant call which calls tstore and a further reentrant staticcall
+ # the first, reentrant call which calls tstore and a further
+ # reentrant staticcall
CalldataCase(
value=2,
action=(
@@ -264,7 +302,8 @@ class ReentrancyTestCases(PytestParameterEnum, metaclass=DynamicReentrancyTestCa
+ Op.SSTORE(3, Op.MLOAD(0))
),
),
- # the second, reentrant call, which calls tload and return returns successfully
+ # the second, reentrant call, which calls tload and return
+ # returns successfully
CalldataCase(
value=3,
action=Op.MSTORE(0, Op.TLOAD(0xFE)) + Op.RETURN(0, 32),
diff --git a/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py b/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py
index fadd9fde032..6237c830990 100644
--- a/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py
+++ b/tests/cancun/eip1153_tstore/test_tstorage_selfdestruct.py
@@ -1,8 +1,9 @@
"""
-abstract: Tests for [EIP-1153: Transient Storage](https://eips.ethereum.org/EIPS/eip-1153)
- Test cases for `TSTORE` and `TLOAD` opcode calls in reentrancy after self-destruct, taking into
- account the changes in EIP-6780.
-""" # noqa: E501
+EIP-1153 Transient Storage with selfdestruct tests.
+
+Test cases for `TSTORE` and `TLOAD` opcode calls in reentrancy after
+self-destruct, taking into account the changes in EIP-6780.
+"""
from enum import unique
from typing import Dict
@@ -36,7 +37,10 @@
def call_option(option_number: int) -> Bytecode:
- """Return the bytecode for a call to the callee contract with the given option number."""
+ """
+ Return the bytecode for a call to the callee contract with the given option
+ number.
+ """
return Op.MSTORE(value=option_number) + Op.CALL(
address=Op.SLOAD(0),
args_offset=0,
@@ -49,8 +53,8 @@ def call_option(option_number: int) -> Bytecode:
@unique
class SelfDestructCases(PytestParameterEnum):
"""
- Transient storage test cases for different reentrancy calls which involve the contract
- self-destructing.
+ Transient storage test cases for different reentrancy calls which involve
+ the contract self-destructing.
"""
TLOAD_AFTER_SELFDESTRUCT_PRE_EXISTING_CONTRACT = {
@@ -225,7 +229,10 @@ def test_reentrant_selfdestructing_call(
callee_bytecode: Bytecode,
expected_storage: Dict,
):
- """Test transient storage in different reentrancy contexts after selfdestructing."""
+ """
+ Test transient storage in different reentrancy contexts after
+ selfdestructing.
+ """
env = Environment()
caller_address = pre.deploy_contract(code=caller_bytecode)
diff --git a/tests/cancun/eip1153_tstore/test_tstore_reentrancy.py b/tests/cancun/eip1153_tstore/test_tstore_reentrancy.py
index 30d200b4fb6..557eb211ee1 100644
--- a/tests/cancun/eip1153_tstore/test_tstore_reentrancy.py
+++ b/tests/cancun/eip1153_tstore/test_tstore_reentrancy.py
@@ -1,7 +1,4 @@
-"""
-Ethereum Transient Storage EIP Tests
-https://eips.ethereum.org/EIPS/eip-1153.
-"""
+"""EIP-1153 Transient Storage tests."""
from enum import Enum
@@ -69,10 +66,12 @@ def test_tstore_reentrancy(
Revert undoes the transient storage writes from a call.
(09_revertUndoesAllFiller.yml)
- Revert undoes all the transient storage writes to the same key from the failed call.
+ Revert undoes all the transient storage writes to the same key
+ from the failed call.
(11_tstoreDelegateCallFiller.yml)
- delegatecall manipulates transient storage in the context of the current address.
+ delegatecall manipulates transient storage in the context of
+ the current address.
(13_tloadStaticCallFiller.yml)
Transient storage cannot be manipulated in a static context, tstore reverts
@@ -200,7 +199,8 @@ def make_call(call_type: Op) -> Bytecode:
if call_type == Op.STATICCALL or call_return == Om.OOG
else tload_value_set_in_call
),
- # external tstore overrides value in upper level only in delegate and callcode
+ # external tstore overrides value in upper level only in
+ # delegate and callcode
slot_tload_after_call: (
tload_value_set_in_call
if on_successful_delegate_or_callcode
@@ -208,7 +208,8 @@ def make_call(call_type: Op) -> Bytecode:
),
slot_tload_1_after_call: 12 if on_successful_delegate_or_callcode else 0,
slot_tstore_overwrite: 50,
- # tstore in static call not allowed, reentrancy means external call here
+ # tstore in static call not allowed, reentrancy means
+ # external call here
slot_subcall_worked: 0 if on_failing_calls else 1,
}
)
diff --git a/tests/cancun/eip4788_beacon_root/conftest.py b/tests/cancun/eip4788_beacon_root/conftest.py
index 3a28b279ce5..12497eead19 100644
--- a/tests/cancun/eip4788_beacon_root/conftest.py
+++ b/tests/cancun/eip4788_beacon_root/conftest.py
@@ -34,7 +34,10 @@ def timestamp() -> int: # noqa: D103
@pytest.fixture
def beacon_roots() -> Iterator[bytes]:
- """By default, return an iterator that returns the keccak of an internal counter."""
+ """
+ By default, return an iterator that returns the keccak of an internal
+ counter.
+ """
class BeaconRoots:
def __init__(self) -> None:
@@ -97,7 +100,8 @@ def contract_call_code(call_type: Op, call_value: int, call_gas: int) -> Bytecod
if call_type == Op.CALL or call_type == Op.CALLCODE:
contract_call_code += Op.SSTORE(
0x00, # store the result of the contract call in storage[0]
- call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501
+ # https://github.com/ethereum/execution-spec-tests/issues/348
+ call_type( # type: ignore
call_gas,
Spec.BEACON_ROOTS_ADDRESS,
call_value,
@@ -111,7 +115,8 @@ def contract_call_code(call_type: Op, call_value: int, call_gas: int) -> Bytecod
# delegatecall and staticcall use one less argument
contract_call_code += Op.SSTORE(
0x00,
- call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501
+ # https://github.com/ethereum/execution-spec-tests/issues/348
+ call_type( # type: ignore
call_gas,
Spec.BEACON_ROOTS_ADDRESS,
args_start,
@@ -181,7 +186,9 @@ def auto_access_list() -> bool:
@pytest.fixture
def access_list(auto_access_list: bool, timestamp: int) -> List[AccessList]:
- """Access list included in the transaction to call the beacon root contract."""
+ """
+ Access list included in the transaction to call the beacon root contract.
+ """
if auto_access_list:
return [
AccessList(
@@ -204,7 +211,8 @@ def tx_data(timestamp: int) -> bytes:
@pytest.fixture
def tx_type() -> int:
"""
- Transaction type to call the caller contract or the beacon root contract directly.
+ Transaction type to call the caller contract or the beacon root contract
+ directly.
By default use a type 2 transaction.
"""
@@ -263,8 +271,8 @@ def post(
call_beacon_root_contract: bool,
) -> Dict:
"""
- Prepare expected post state for a single contract call based upon the success or
- failure of the call, and the validity of the timestamp input.
+ Prepare expected post state for a single contract call based upon the
+ success or failure of the call, and the validity of the timestamp input.
"""
storage = Storage()
if not call_beacon_root_contract:
diff --git a/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py b/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py
index 69db3d19ab6..7ffb004920f 100644
--- a/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py
+++ b/tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py
@@ -1,21 +1,20 @@
"""
-abstract: Tests beacon block root for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788)
- Test the exposed beacon chain root in the EVM for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788).
+Tests beacon block root for [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788).
-note: Adding a new test
- Add a function that is named `test_` and takes at least the following arguments:
+Note: To add a new test, add a function that is named `test_`.
- - state_test
- - env
- - pre
- - tx
- - post
- - valid_call
+It must take at least the following arguments:
- All other `pytest.fixtures` can be parametrized to generate new combinations and test
- cases.
+- `state_test`
+- `env`
+- `pre`
+- `tx`
+- `post`
+- `valid_call`
-""" # noqa: E501
+All other `pytest.fixtures` can be parametrized to generate new
+combinations and test cases.
+"""
from itertools import count
from typing import Callable, Dict, Iterator, List
@@ -44,7 +43,10 @@
def count_factory(start: int, step: int = 1) -> Callable[[], Iterator[int]]:
- """Create a factory that returns fresh count iterators to avoid state persistence."""
+ """
+ Create a factory that returns fresh count iterators to avoid state
+ persistence.
+ """
return lambda: count(start, step)
@@ -80,7 +82,9 @@ def test_beacon_root_contract_calls(
post: Dict,
):
"""
- Tests the beacon root contract call using various call contexts:
+ Test calling the beacon root contract in various call contexts.
+
+ These call contexts are tested:
- `CALL`
- `DELEGATECALL`
- `CALLCODE`
@@ -90,10 +94,10 @@ def test_beacon_root_contract_calls(
- extra gas (valid call)
- insufficient gas (invalid call).
- The expected result is that the contract call will be executed if the gas amount is met
- and return the correct`parent_beacon_block_root`. Otherwise the call will be invalid, and not
- be executed. This is highlighted within storage by storing the return value of each call
- context.
+ The expected result is that the contract call will be executed if the gas
+ amount is met and return the correct`parent_beacon_block_root`. Otherwise
+ the call will be invalid, and not be executed. This is highlighted within
+ storage by storing the return value of each call context.
"""
blockchain_test(
pre=pre,
@@ -153,11 +157,12 @@ def test_beacon_root_contract_timestamps(
post: Dict,
):
"""
- Tests the beacon root contract call across for various valid and invalid timestamps.
+ Tests the beacon root contract call across for various valid and invalid
+ timestamps.
The expected result is that the contract call will return the correct
- `parent_beacon_block_root` for a valid input timestamp and return the zero'd 32 bytes value
- for an invalid input timestamp.
+ `parent_beacon_block_root` for a valid input timestamp and return the
+ zero'd 32 bytes value for an invalid input timestamp.
"""
blockchain_test(
pre=pre,
@@ -187,7 +192,9 @@ def test_calldata_lengths(
tx: Transaction,
post: Dict,
):
- """Tests the beacon root contract call using multiple invalid input lengths."""
+ """
+ Tests the beacon root contract call using multiple invalid input lengths.
+ """
blockchain_test(
pre=pre,
blocks=[Block(txs=[tx], parent_beacon_block_root=beacon_root, timestamp=timestamp)],
@@ -216,10 +223,11 @@ def test_beacon_root_equal_to_timestamp(
post: Dict,
):
"""
- Tests the beacon root contract call where the beacon root is equal to the timestamp.
+ Tests the beacon root contract call where the beacon root is equal to the
+ timestamp.
- The expected result is that the contract call will return the `parent_beacon_block_root`,
- as all timestamps used are valid.
+ The expected result is that the contract call will return the
+ `parent_beacon_block_root`, as all timestamps used are valid.
"""
blockchain_test(
pre=pre,
@@ -240,7 +248,10 @@ def test_tx_to_beacon_root_contract(
tx: Transaction,
post: Dict,
):
- """Tests the beacon root contract using a transaction with different types and data lengths."""
+ """
+ Tests the beacon root contract using a transaction with different types and
+ data lengths.
+ """
blockchain_test(
pre=pre,
blocks=[Block(txs=[tx], parent_beacon_block_root=beacon_root, timestamp=timestamp)],
@@ -288,7 +299,10 @@ def test_beacon_root_selfdestruct(
tx: Transaction,
post: Dict,
):
- """Tests that self destructing the beacon root address transfers actors balance correctly."""
+ """
+ Tests that self destructing the beacon root address transfers actors
+ balance correctly.
+ """
# self destruct actor
self_destruct_actor_address = pre.deploy_contract(
Op.SELFDESTRUCT(Spec.BEACON_ROOTS_ADDRESS),
@@ -373,17 +387,20 @@ def test_multi_block_beacon_root_timestamp_calls(
call_value: int,
):
"""
- Tests multiple blocks where each block writes a timestamp to storage and contains one
- transaction that calls the beacon root contract multiple times.
+ Tests multiple blocks where each block writes a timestamp to storage and
+ contains one transaction that calls the beacon root contract multiple
+ times.
- The blocks might overwrite the historical roots buffer, or not, depending on the `timestamps`,
- and whether they increment in multiples of `Spec.HISTORY_BUFFER_LENGTH` or not.
+ The blocks might overwrite the historical roots buffer, or not, depending
+ on the `timestamps`, and whether they increment in multiples of
+ `Spec.HISTORY_BUFFER_LENGTH` or not.
By default, the beacon roots are the keccak of the block number.
- Each transaction checks the current timestamp and also all previous timestamps, and verifies
- that the beacon root is correct for all of them if the timestamp is supposed to be in the
- buffer, which might have been overwritten by a later block.
+ Each transaction checks the current timestamp and also all previous
+ timestamps, and verifies that the beacon root is correct for all of them if
+ the timestamp is supposed to be in the buffer, which might have been
+ overwritten by a later block.
"""
# Create fresh iterator to avoid state persistence between test phases
timestamps = timestamps_factory()
@@ -415,9 +432,9 @@ def test_multi_block_beacon_root_timestamp_calls(
current_call_account_code = Bytecode()
current_call_account_expected_storage = Storage()
- # We are going to call the beacon roots contract once for every timestamp of the current
- # and all previous blocks, and check that the returned beacon root is still correct only
- # if it was not overwritten.
+ # We are going to call the beacon roots contract once for every
+ # timestamp of the current and all previous blocks, and check that the
+ # returned beacon root is still correct only if it was not overwritten.
for t in all_timestamps:
current_call_account_code += Op.MSTORE(0, t)
call_valid = (
@@ -462,7 +479,8 @@ def test_multi_block_beacon_root_timestamp_calls(
parent_beacon_block_root=beacon_root,
timestamp=timestamp,
withdrawals=[
- # Also withdraw to the beacon root contract and the system address
+ # Also withdraw to the beacon root contract and the system
+ # address
Withdrawal(
address=Spec.BEACON_ROOTS_ADDRESS,
amount=1,
@@ -503,8 +521,9 @@ def test_beacon_root_transition(
fork: Fork,
):
"""
- Tests the fork transition to cancun and verifies that blocks with timestamp lower than the
- transition timestamp do not contain beacon roots in the pre-deployed contract.
+ Tests the fork transition to cancun and verifies that blocks with timestamp
+ lower than the transition timestamp do not contain beacon roots in the
+ pre-deployed contract.
"""
# Create fresh iterator to avoid state persistence between test phases
timestamps = timestamps_factory()
@@ -527,7 +546,8 @@ def test_beacon_root_transition(
transitioned = fork.header_beacon_root_required(i, timestamp)
if transitioned:
- # We've transitioned, the current timestamp must contain a value in the contract
+ # We've transitioned, the current timestamp must contain a value in
+ # the contract
timestamps_in_beacon_root_contract.append(timestamp)
timestamps_storage[timestamp_index] = timestamp
roots_storage[timestamp_index] = beacon_root
@@ -539,9 +559,10 @@ def test_beacon_root_transition(
current_call_account_code = Bytecode()
current_call_account_expected_storage = Storage()
- # We are going to call the beacon roots contract once for every timestamp of the current
- # and all previous blocks, and check that the returned beacon root is correct only
- # if it was after the transition timestamp.
+ # We are going to call the beacon roots contract once for every
+ # timestamp of the current and all previous blocks, and check that the
+ # returned beacon root is correct only if it was after the transition
+ # timestamp.
for t in all_timestamps:
current_call_account_code += Op.MSTORE(0, t)
call_valid = (
@@ -586,7 +607,8 @@ def test_beacon_root_transition(
parent_beacon_block_root=beacon_root if transitioned else None,
timestamp=timestamp,
withdrawals=[
- # Also withdraw to the beacon root contract and the system address
+ # Also withdraw to the beacon root contract and the system
+ # address
Withdrawal(
address=Spec.BEACON_ROOTS_ADDRESS,
amount=1,
@@ -625,8 +647,8 @@ def test_no_beacon_root_contract_at_transition(
fork: Fork,
):
"""
- Tests the fork transition to cancun in the case where the beacon root pre-deploy was not
- deployed in time for the fork.
+ Tests the fork transition to cancun in the case where the beacon root
+ pre-deploy was not deployed in time for the fork.
"""
assert fork.header_beacon_root_required(1, timestamp)
blocks: List[Block] = [
@@ -635,7 +657,8 @@ def test_no_beacon_root_contract_at_transition(
parent_beacon_block_root=next(beacon_roots),
timestamp=timestamp,
withdrawals=[
- # Also withdraw to the beacon root contract and the system address
+ # Also withdraw to the beacon root contract and the system
+ # address
Withdrawal(
address=Spec.BEACON_ROOTS_ADDRESS,
amount=1,
@@ -652,7 +675,8 @@ def test_no_beacon_root_contract_at_transition(
)
]
pre[Spec.BEACON_ROOTS_ADDRESS] = Account(
- code=b"", # Remove the code that is automatically allocated on Cancun fork
+ code=b"", # Remove the code that is automatically allocated on Cancun
+ # fork
nonce=0,
balance=0,
)
@@ -667,9 +691,8 @@ def test_no_beacon_root_contract_at_transition(
balance=int(1e9),
),
caller_address: Account(
- storage={
- 0: 1
- }, # Successful call because the contract is not there, but nothing else is stored
+ storage={0: 1}, # Successful call because the contract is not there, but
+ # nothing else is stored
),
}
blockchain_test(
@@ -704,8 +727,8 @@ def test_beacon_root_contract_deploy(
fork: Fork,
):
"""
- Tests the fork transition to cancun deploying the contract during Shanghai and verifying the
- code deployed and its functionality after Cancun.
+ Tests the fork transition to cancun deploying the contract during Shanghai
+ and verifying the code deployed and its functionality after Cancun.
"""
assert fork.header_beacon_root_required(1, timestamp)
tx_gas_limit = 0x3D090
@@ -746,7 +769,8 @@ def test_beacon_root_contract_deploy(
),
timestamp=timestamp // 2,
withdrawals=[
- # Also withdraw to the beacon root contract and the system address
+ # Also withdraw to the beacon root contract and the
+ # system address
Withdrawal(
address=Spec.BEACON_ROOTS_ADDRESS,
amount=1,
@@ -773,7 +797,8 @@ def test_beacon_root_contract_deploy(
parent_beacon_block_root=beacon_root,
timestamp=timestamp,
withdrawals=[
- # Also withdraw to the beacon root contract and the system address
+ # Also withdraw to the beacon root contract and the
+ # system address
Withdrawal(
address=Spec.BEACON_ROOTS_ADDRESS,
amount=1,
@@ -800,7 +825,8 @@ def test_beacon_root_contract_deploy(
expected_code = fork.pre_allocation_blockchain()[Spec.BEACON_ROOTS_ADDRESS]["code"]
pre[Spec.BEACON_ROOTS_ADDRESS] = Account(
- code=b"", # Remove the code that is automatically allocated on Cancun fork
+ code=b"", # Remove the code that is automatically allocated on Cancun
+ # fork
nonce=0,
balance=0,
)
diff --git a/tests/cancun/eip4844_blobs/conftest.py b/tests/cancun/eip4844_blobs/conftest.py
index 2388a053c82..6da44f93c16 100644
--- a/tests/cancun/eip4844_blobs/conftest.py
+++ b/tests/cancun/eip4844_blobs/conftest.py
@@ -64,7 +64,9 @@ def parent_excess_blob_gas(
parent_excess_blobs: int | None,
blob_gas_per_blob: int,
) -> int | None:
- """Calculate the excess blob gas of the parent block from the excess blobs."""
+ """
+ Calculate the excess blob gas of the parent block from the excess blobs.
+ """
if parent_excess_blobs is None:
return None
assert parent_excess_blobs >= 0
@@ -79,7 +81,8 @@ def excess_blob_gas(
block_base_fee_per_gas: int,
) -> int | None:
"""
- Calculate the excess blob gas of the block under test from the parent block.
+ Calculate the excess blob gas of the block under test from the parent
+ block.
Value can be overloaded by a test case to provide a custom excess blob gas.
"""
@@ -100,7 +103,8 @@ def correct_excess_blob_gas(
block_base_fee_per_gas: int,
) -> int:
"""
- Calculate the correct excess blob gas of the block under test from the parent block.
+ Calculate the correct excess blob gas of the block under test from the
+ parent block.
Should not be overloaded by a test case.
"""
@@ -160,7 +164,9 @@ def env(
block_base_fee_per_gas: int,
genesis_excess_blob_gas: int,
) -> Environment:
- """Prepare the environment of the genesis block for all blockchain tests."""
+ """
+ Prepare the environment of the genesis block for all blockchain tests.
+ """
return Environment(
excess_blob_gas=genesis_excess_blob_gas,
blob_gas_used=0,
@@ -213,7 +219,8 @@ def tx_max_priority_fee_per_gas() -> int:
@pytest.fixture
def tx_max_fee_per_blob_gas_multiplier() -> int:
"""
- Return default max fee per blob gas multiplier for transactions sent during test.
+ Return default max fee per blob gas multiplier for transactions sent during
+ test.
Can be overloaded by a test case to provide a custom max fee per blob gas
multiplier.
@@ -224,7 +231,8 @@ def tx_max_fee_per_blob_gas_multiplier() -> int:
@pytest.fixture
def tx_max_fee_per_blob_gas_delta() -> int:
"""
- Return default max fee per blob gas delta for transactions sent during test.
+ Return default max fee per blob gas delta for transactions sent during
+ test.
Can be overloaded by a test case to provide a custom max fee per blob gas
delta.
@@ -263,22 +271,21 @@ def non_zero_blob_gas_used_genesis_block(
block_base_fee_per_gas: int,
) -> Block | None:
"""
- For test cases with a non-zero blobGasUsed field in the
- original genesis block header we must instead utilize an
- intermediate block to act on its behalf.
-
- Genesis blocks with a non-zero blobGasUsed field are invalid as
- they do not have any blob txs.
-
- For the intermediate block to align with default genesis values,
- we must add TARGET_BLOB_GAS_PER_BLOCK to the excessBlobGas of the
- genesis value, expecting an appropriate drop to the intermediate block.
- Similarly, we must add parent_blobs to the intermediate block within
- a blob tx such that an equivalent blobGasUsed field is wrote.
-
- For forks >= Osaka where the MAX_BLOBS_PER_TX is introduced, we
- split the blobs across multiple transactions to respect the
- MAX_BLOBS_PER_TX limit.
+ For test cases with a non-zero blobGasUsed field in the original genesis
+ block header we must instead utilize an intermediate block to act on its
+ behalf.
+
+ Genesis blocks with a non-zero blobGasUsed field are invalid as they do not
+ have any blob txs.
+
+ For the intermediate block to align with default genesis values, we must
+ add TARGET_BLOB_GAS_PER_BLOCK to the excessBlobGas of the genesis value,
+ expecting an appropriate drop to the intermediate block. Similarly, we must
+ add parent_blobs to the intermediate block within a blob tx such that an
+ equivalent blobGasUsed field is wrote.
+
+ For forks >= Osaka where the MAX_BLOBS_PER_TX is introduced, we split the
+ blobs across multiple transactions to respect the MAX_BLOBS_PER_TX limit.
"""
if parent_blobs == 0:
return None
@@ -300,9 +307,10 @@ def non_zero_blob_gas_used_genesis_block(
empty_account_destination = pre.fund_eoa(0)
blob_gas_price_calculator = fork.blob_gas_price_calculator(block_number=1)
- # Split blobs into chunks when MAX_BLOBS_PER_TX < MAX_BLOBS_PER_BLOCK to respect per-tx limits.
- # Allows us to keep single txs for forks where per-tx and per-block limits are equal, hitting
- # coverage for block level blob gas validation when parent_blobs > MAX_BLOBS_PER_BLOCK.
+ # Split blobs into chunks when MAX_BLOBS_PER_TX < MAX_BLOBS_PER_BLOCK to
+ # respect per-tx limits. Allows us to keep single txs for forks where per-
+ # tx and per-block limits are equal, hitting coverage for block level blob
+ # gas validation when parent_blobs > MAX_BLOBS_PER_BLOCK.
max_blobs_per_tx = (
fork.max_blobs_per_tx()
if fork.max_blobs_per_tx() < fork.max_blobs_per_block()
diff --git a/tests/cancun/eip4844_blobs/spec.py b/tests/cancun/eip4844_blobs/spec.py
index 5eb5a29a7ee..a531fb81ac0 100644
--- a/tests/cancun/eip4844_blobs/spec.py
+++ b/tests/cancun/eip4844_blobs/spec.py
@@ -87,8 +87,8 @@ def get_min_excess_blob_gas_for_blob_gas_price(
blob_gas_price: int,
) -> int:
"""
- Get the minimum required excess blob gas value to get a given blob gas cost in a
- block.
+ Get the minimum required excess blob gas value to get a given blob gas
+ cost in a block.
"""
current_excess_blob_gas = 0
current_blob_gas_price = 1
@@ -106,7 +106,10 @@ def get_min_excess_blobs_for_blob_gas_price(
fork: Fork,
blob_gas_price: int,
) -> int:
- """Get the minimum required excess blobs to get a given blob gas cost in a block."""
+ """
+ Get the minimum required excess blobs to get a given blob gas cost in a
+ block.
+ """
gas_per_blob = fork.blob_gas_per_blob()
return (
cls.get_min_excess_blob_gas_for_blob_gas_price(
@@ -122,23 +125,27 @@ def get_blob_combinations(
blob_count: int,
max_blobs_per_tx: int,
) -> List[Tuple[int, ...]]:
- """Get all possible combinations of blobs that result in a given blob count."""
+ """
+ Get all possible combinations of blobs that result in a given blob
+ count.
+ """
combinations = [
seq
for i in range(
blob_count + 1, 0, -1
- ) # We can have from 1 to at most MAX_BLOBS_PER_BLOCK blobs per block
+ ) # We can have from 1 to at most MAX_BLOBS_PER_BLOCK blobs per
+ # block
for seq in itertools.combinations_with_replacement(
range(1, min(blob_count + 1, max_blobs_per_tx) + 1), i
) # We iterate through all possible combinations
- if sum(seq)
- == blob_count # And we only keep the ones that match the expected blob count
- and all(tx_blobs <= max_blobs_per_tx for tx_blobs in seq) # Validate each tx
+ # And we only keep the ones that match the expected blob count
+ if sum(seq) == blob_count and all(tx_blobs <= max_blobs_per_tx for tx_blobs in seq)
+ # Validate each tx
]
- # We also add the reversed version of each combination, only if it's not
- # already in the list. E.g. (4, 1) is added from (1, 4) but not
- # (1, 1, 1, 1, 1) because its reversed version is identical.
+ # We also add the reversed version of each combination, only if it's
+ # not already in the list. E.g. (4, 1) is added from (1, 4) but not (1,
+ # 1, 1, 1, 1) because its reversed version is identical.
combinations += [
tuple(reversed(x)) for x in combinations if tuple(reversed(x)) not in combinations
]
@@ -147,8 +154,8 @@ def get_blob_combinations(
@classmethod
def all_valid_blob_combinations(cls, fork: Fork) -> List[Tuple[int, ...]]:
"""
- Return all valid blob tx combinations for a given block,
- assuming the given MAX_BLOBS_PER_BLOCK, whilst respecting MAX_BLOBS_PER_TX.
+ Return all valid blob tx combinations for a given block, assuming the
+ given MAX_BLOBS_PER_BLOCK, whilst respecting MAX_BLOBS_PER_TX.
"""
max_blobs_per_block = fork.max_blobs_per_block()
max_blobs_per_tx = fork.max_blobs_per_tx()
diff --git a/tests/cancun/eip4844_blobs/test_blob_txs.py b/tests/cancun/eip4844_blobs/test_blob_txs.py
index 166fb5ced43..b242595ef60 100644
--- a/tests/cancun/eip4844_blobs/test_blob_txs.py
+++ b/tests/cancun/eip4844_blobs/test_blob_txs.py
@@ -1,19 +1,18 @@
"""
-abstract: Tests blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+Tests blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+Note: To add a new test, add a function that is named `test_`.
-note: Adding a new test
- Add a function that is named `test_` and takes at least the following arguments:
+It must at least use the following arguments:
- - blockchain_test or state_test
- - pre
- - env
- - block or txs
+- `blockchain_test` or `state_test`
+- `pre`
+- `env`
+- `block` or `txs`.
- All other `pytest.fixture` fixtures can be parametrized to generate new combinations and test cases.
-
-""" # noqa: E501
+All other `pytest.fixture` fixtures can be parametrized to generate new
+combinations and test cases.
+"""
from typing import List, Optional, Tuple
@@ -126,8 +125,8 @@ def total_account_minimum_balance( # noqa: D103
blob_hashes_per_tx: List[List[bytes]],
) -> int:
"""
- Calculate minimum balance required for the account to be able to send
- the transactions in the block of the test.
+ Calculate minimum balance required for the account to be able to send the
+ transactions in the block of the test.
"""
minimum_cost = 0
for tx_blob_count in [len(x) for x in blob_hashes_per_tx]:
@@ -147,7 +146,9 @@ def total_account_transactions_fee( # noqa: D103
tx_max_priority_fee_per_gas: int,
blob_hashes_per_tx: List[List[bytes]],
) -> int:
- """Calculate actual fee for the blob transactions in the block of the test."""
+ """
+ Calculate actual fee for the blob transactions in the block of the test.
+ """
total_cost = 0
for tx_blob_count in [len(x) for x in blob_hashes_per_tx]:
blob_cost = blob_gas_price * blob_gas_per_blob * tx_blob_count
@@ -175,8 +176,8 @@ def tx_error() -> Optional[TransactionException]:
"""
Error produced by the block transactions (no error).
- Can be overloaded on test cases where the transactions are expected
- to fail.
+ Can be overloaded on test cases where the transactions are expected to
+ fail.
"""
return None
@@ -230,8 +231,8 @@ def txs( # noqa: D103
@pytest.fixture
def account_balance_modifier() -> int:
"""
- Account balance modifier for the source account of all tests.
- See `pre` fixture.
+ Account balance modifier for the source account of all tests. See `pre`
+ fixture.
"""
return 0
@@ -243,9 +244,9 @@ def state_env(
"""
Prepare the environment for all state test cases.
- Main difference is that the excess blob gas is not increased by the target, as
- there is no genesis block -> block 1 transition, and therefore the excess blob gas
- is not decreased by the target.
+ Main difference is that the excess blob gas is not increased by the target,
+ as there is no genesis block -> block 1 transition, and therefore the
+ excess blob gas is not decreased by the target.
"""
return Environment(
excess_blob_gas=excess_blob_gas if excess_blob_gas else 0,
@@ -255,8 +256,8 @@ def state_env(
@pytest.fixture
def engine_api_error_code() -> Optional[EngineAPIError]:
"""
- Engine API error code to be returned by the client on consumption
- of the erroneous block in hive.
+ Engine API error code to be returned by the client on consumption of the
+ erroneous block in hive.
"""
return None
@@ -268,8 +269,8 @@ def block_error(
"""
Error produced by the block transactions (no error).
- Can be overloaded on test cases where the transactions are expected
- to fail.
+ Can be overloaded on test cases where the transactions are expected to
+ fail.
"""
return tx_error
@@ -385,15 +386,16 @@ def test_valid_blob_tx_combinations(
block: Block,
):
"""
- Test all valid blob combinations in a single block, assuming a given value of
- `MAX_BLOBS_PER_BLOCK`.
+ Test all valid blob combinations in a single block, assuming a given value
+ of `MAX_BLOBS_PER_BLOCK`.
- This assumes a block can include from 1 and up to `MAX_BLOBS_PER_BLOCK` transactions where all
- transactions contain at least 1 blob, and the sum of all blobs in a block is at
- most `MAX_BLOBS_PER_BLOCK`.
+ This assumes a block can include from 1 and up to `MAX_BLOBS_PER_BLOCK`
+ transactions where all transactions contain at least 1 blob, and the sum of
+ all blobs in a block is at most `MAX_BLOBS_PER_BLOCK`.
- This test is parametrized with all valid blob transaction combinations for a given block, and
- therefore if value of `MAX_BLOBS_PER_BLOCK` changes, this test is automatically updated.
+ This test is parametrized with all valid blob transaction combinations for
+ a given block, and therefore if value of `MAX_BLOBS_PER_BLOCK` changes,
+ this test is automatically updated.
"""
blockchain_test(
pre=pre,
@@ -407,8 +409,8 @@ def generate_invalid_tx_max_fee_per_blob_gas_tests(
fork: Fork,
) -> List:
"""
- Return a list of tests for invalid blob transactions due to insufficient max fee per blob gas
- parametrized for each different fork.
+ Return a list of tests for invalid blob transactions due to insufficient
+ max fee per blob gas parametrized for each different fork.
"""
min_base_fee_per_blob_gas = fork.min_base_fee_per_blob_gas()
minimum_excess_blobs_for_first_increment = SpecHelpers.get_min_excess_blobs_for_blob_gas_price(
@@ -422,9 +424,12 @@ def generate_invalid_tx_max_fee_per_blob_gas_tests(
tests = []
tests.append(
pytest.param(
- minimum_excess_blobs_for_first_increment - 1, # blob gas price is 1
- fork.target_blobs_per_block() + 1, # blob gas cost increases to above the minimum
- min_base_fee_per_blob_gas, # tx max_blob_gas_cost is the minimum
+ # blob gas price is 1
+ minimum_excess_blobs_for_first_increment - 1,
+ # blob gas cost increases to above the minimum
+ fork.target_blobs_per_block() + 1,
+ # tx max_blob_gas_cost is the minimum
+ min_base_fee_per_blob_gas,
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS,
id="insufficient_max_fee_per_blob_gas",
marks=pytest.mark.exception_test,
@@ -433,11 +438,12 @@ def generate_invalid_tx_max_fee_per_blob_gas_tests(
if (next_base_fee_per_blob_gas - min_base_fee_per_blob_gas) > 1:
tests.append(
pytest.param(
- minimum_excess_blobs_for_first_increment
- - 1, # blob gas price is one less than the minimum
- fork.target_blobs_per_block() + 1, # blob gas cost increases to above the minimum
- next_base_fee_per_blob_gas
- - 1, # tx max_blob_gas_cost is one less than the minimum
+ # blob gas price is one less than the minimum
+ minimum_excess_blobs_for_first_increment - 1,
+ # blob gas cost increases to above the minimum
+ fork.target_blobs_per_block() + 1,
+ # tx max_blob_gas_cost is one less than the minimum
+ next_base_fee_per_blob_gas - 1,
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS,
id="insufficient_max_fee_per_blob_gas_one_less_than_next",
marks=pytest.mark.exception_test,
@@ -448,7 +454,8 @@ def generate_invalid_tx_max_fee_per_blob_gas_tests(
pytest.param(
0, # blob gas price is the minimum
0, # blob gas cost stays put at 1
- min_base_fee_per_blob_gas - 1, # tx max_blob_gas_cost is one less than the minimum
+ # tx max_blob_gas_cost is one less than the minimum
+ min_base_fee_per_blob_gas - 1,
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS,
id="insufficient_max_fee_per_blob_gas_one_less_than_min",
marks=pytest.mark.exception_test,
@@ -589,12 +596,12 @@ def test_invalid_block_blob_count(
block: Block,
):
"""
- Test all invalid blob combinations in a single block, where the sum of all blobs in a block is
- at `MAX_BLOBS_PER_BLOCK + 1`.
+ Test all invalid blob combinations in a single block, where the sum of all
+ blobs in a block is at `MAX_BLOBS_PER_BLOCK + 1`.
This test is parametrized with all blob transaction combinations exceeding
- `MAX_BLOBS_PER_BLOCK` by one for a given block, and
- therefore if value of `MAX_BLOBS_PER_BLOCK` changes, this test is automatically updated.
+ `MAX_BLOBS_PER_BLOCK` by one for a given block, and therefore if value of
+ `MAX_BLOBS_PER_BLOCK` changes, this test is automatically updated.
"""
blockchain_test(
pre=pre,
@@ -636,7 +643,8 @@ def test_insufficient_balance_blob_tx(
- Transactions with and without priority fee
- Transactions with and without value
- Transactions with and without calldata
- - Transactions with max fee per blob gas lower or higher than the priority fee
+ - Transactions with max fee per blob gas lower or higher than the priority
+ fee
"""
assert len(txs) == 1
state_test(
@@ -677,14 +685,15 @@ def test_sufficient_balance_blob_tx(
txs: List[Transaction],
):
"""
- Check that transaction is accepted when user can exactly afford the blob gas specified (and
- max_fee_per_gas would be enough for current block).
+ Check that transaction is accepted when user can exactly afford the blob
+ gas specified (and max_fee_per_gas would be enough for current block).
- Transactions with max fee equal or higher than current block base fee
- Transactions with and without priority fee
- Transactions with and without value
- Transactions with and without calldata
- - Transactions with max fee per blob gas lower or higher than the priority fee
+ - Transactions with max fee per blob gas lower or higher than the priority
+ fee
"""
assert len(txs) == 1
state_test(
@@ -728,15 +737,16 @@ def test_sufficient_balance_blob_tx_pre_fund_tx(
header_verify: Optional[Header],
):
"""
- Check that transaction is accepted when user can exactly afford the blob gas specified (and
- max_fee_per_gas would be enough for current block) because a funding transaction is
- prepended in the same block.
+ Check that transaction is accepted when user can exactly afford the blob
+ gas specified (and max_fee_per_gas would be enough for current block)
+ because a funding transaction is prepended in the same block.
- Transactions with max fee equal or higher than current block base fee
- Transactions with and without priority fee
- Transactions with and without value
- Transactions with and without calldata
- - Transactions with max fee per blob gas lower or higher than the priority fee
+ - Transactions with max fee per blob gas lower or higher than the priority
+ fee
"""
pre_funding_sender = pre.fund_eoa(amount=(21_000 * 100) + total_account_minimum_balance)
txs = [
@@ -808,14 +818,16 @@ def test_blob_gas_subtraction_tx(
total_account_transactions_fee: int,
):
"""
- Check that the blob gas fee for a transaction is subtracted from the sender balance before the
- transaction is executed.
+ Check that the blob gas fee for a transaction is subtracted from the sender
+ balance before the transaction is executed.
- Transactions with max fee equal or higher than current block base fee
- Transactions with and without value
- Transactions with and without calldata
- - Transactions with max fee per blob gas lower or higher than the priority fee
- - Transactions where an externally owned account sends funds to the sender mid execution
+ - Transactions with max fee per blob gas lower or higher than the priority
+ fee
+ - Transactions where an externally owned account sends funds to the sender
+ mid execution
"""
assert len(txs) == 1
post = {
@@ -851,10 +863,11 @@ def test_insufficient_balance_blob_tx_combinations(
block: Block,
):
"""
- Reject all valid blob transaction combinations in a block, but block is invalid.
+ Reject all valid blob transaction combinations in a block, but block is
+ invalid.
- - The amount of blobs is correct but the user cannot afford the
- transaction total cost
+ - The amount of blobs is correct but the user cannot afford the transaction
+ total cost
"""
blockchain_test(
pre=pre,
@@ -867,7 +880,10 @@ def test_insufficient_balance_blob_tx_combinations(
def generate_invalid_tx_blob_count_tests(
fork: Fork,
) -> List:
- """Return a list of tests for invalid blob transactions due to invalid blob counts."""
+ """
+ Return a list of tests for invalid blob transactions due to invalid blob
+ counts.
+ """
return [
pytest.param(
[0],
@@ -1007,7 +1023,8 @@ def test_invalid_blob_hash_versioning_multiple_txs(
- Multiple blob transactions with single blob all with invalid version
- Multiple blob transactions with multiple blobs all with invalid version
- - Multiple blob transactions with multiple blobs only one with invalid version
+ - Multiple blob transactions with multiple blobs only one with invalid
+ version
"""
blockchain_test(
pre=pre,
@@ -1029,10 +1046,14 @@ def test_invalid_blob_tx_contract_creation(
txs: List[Transaction],
header_verify: Optional[Header],
):
- """Reject blocks that include blob transactions that have nil to value (contract creating)."""
+ """
+ Reject blocks that include blob transactions that have nil to value
+ (contract creating).
+ """
assert len(txs) == 1
assert txs[0].blob_versioned_hashes is not None and len(txs[0].blob_versioned_hashes) == 1
- # Replace the transaction with a contract creating one, only in the RLP version
+ # Replace the transaction with a contract creating one, only in the RLP
+ # version
contract_creating_tx = txs[0].copy(to=None).with_signature_and_sender()
txs[0].rlp_override = contract_creating_tx.rlp()
blockchain_test(
@@ -1047,8 +1068,8 @@ def test_invalid_blob_tx_contract_creation(
],
header_verify=header_verify,
# Skipped due to the T8N not receiving the invalid transaction,
- # instead we are passing a valid transaction to T8N and then the transaction
- # is replaced directly in the block RLP.
+ # instead we are passing a valid transaction to T8N and then
+ # the transaction is replaced directly in the block RLP.
skip_exception_verification=True,
)
],
@@ -1071,7 +1092,10 @@ def opcode(
tx_max_priority_fee_per_gas: int,
tx_value: int,
) -> Tuple[Bytecode, Storage.StorageDictType]:
- """Build bytecode and post to test each opcode that accesses transaction information."""
+ """
+ Build bytecode and post to test each opcode that accesses transaction
+ information.
+ """
if request.param == Op.ORIGIN:
return (
Op.SSTORE(0, Op.ORIGIN),
@@ -1137,7 +1161,8 @@ def test_blob_tx_attribute_opcodes(
state_env: Environment,
):
"""
- Test opcodes that read transaction attributes work properly for blob type transactions.
+ Test opcodes that read transaction attributes work properly for blob type
+ transactions.
- ORIGIN
- CALLER
@@ -1189,7 +1214,9 @@ def test_blob_tx_attribute_value_opcode(
opcode: Tuple[Bytecode, Storage.StorageDictType],
state_env: Environment,
):
- """Test the VALUE opcode with different blob type transaction value amounts."""
+ """
+ Test the VALUE opcode with different blob type transaction value amounts.
+ """
code, storage = opcode
destination_account = pre.deploy_contract(code=code)
tx = Transaction(
@@ -1255,7 +1282,8 @@ def test_blob_tx_attribute_calldata_opcodes(
state_env: Environment,
):
"""
- Test calldata related opcodes to verify their behavior is not affected by blobs.
+ Test calldata related opcodes to verify their behavior is not affected by
+ blobs.
- CALLDATALOAD
- CALLDATASIZE
@@ -1289,9 +1317,12 @@ def test_blob_tx_attribute_calldata_opcodes(
)
-@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 2]) # always below data fee
-@pytest.mark.parametrize("tx_max_fee_per_blob_gas_delta", [0, 1]) # normal and above priority fee
-@pytest.mark.parametrize("tx_max_fee_per_gas", [100]) # always above priority fee (FOR CANCUN)
+# always below data fee:
+@pytest.mark.parametrize("tx_max_priority_fee_per_gas", [0, 2])
+# normal and above priority fee:
+@pytest.mark.parametrize("tx_max_fee_per_blob_gas_delta", [0, 1])
+# always above priority fee (FOR CANCUN)
+@pytest.mark.parametrize("tx_max_fee_per_gas", [100])
@pytest.mark.parametrize("opcode", [Op.GASPRICE], indirect=True)
@pytest.mark.parametrize("tx_gas", [500_000])
@pytest.mark.valid_from("Cancun")
@@ -1311,7 +1342,8 @@ def test_blob_tx_attribute_gasprice_opcode(
state_env: Environment,
):
"""
- Test GASPRICE opcode to sanity check that the blob gas fee does not affect its calculation.
+ Test GASPRICE opcode to sanity check that the blob gas fee does not affect
+ its calculation.
- No priority fee
- Priority fee below data fee
@@ -1382,8 +1414,9 @@ def test_blob_type_tx_pre_fork(
"""
Reject blocks with blob type transactions before Cancun fork.
- Blocks sent by NewPayloadV2 (Shanghai) that contain blob type transactions, furthermore blobs
- field within NewPayloadV2 method must be computed as INVALID, due to an invalid block hash.
+ Blocks sent by NewPayloadV2 (Shanghai) that contain blob type transactions,
+ furthermore blobs field within NewPayloadV2 method must be computed as
+ INVALID, due to an invalid block hash.
"""
assert len(txs) == 1
state_test(
diff --git a/tests/cancun/eip4844_blobs/test_blob_txs_full.py b/tests/cancun/eip4844_blobs/test_blob_txs_full.py
index ba6c091a707..d71b81f22c0 100644
--- a/tests/cancun/eip4844_blobs/test_blob_txs_full.py
+++ b/tests/cancun/eip4844_blobs/test_blob_txs_full.py
@@ -1,8 +1,4 @@
-"""
-abstract: Tests full blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test full blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
-
-""" # noqa: E501
+"""Tests full blob type transactions for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)."""
from typing import List, Optional
@@ -135,10 +131,10 @@ def tx_max_fee_per_blob_gas( # noqa: D103
@pytest.fixture
def tx_error() -> Optional[TransactionException]:
"""
- Even though the final block we are producing in each of these tests is invalid, and some of the
- transactions will be invalid due to the format in the final block, none of the transactions
- should be rejected by the transition tool because they are being sent to it with the correct
- format.
+ Even though the final block we are producing in each of these tests is
+ invalid, and some of the transactions will be invalid due to the format in
+ the final block, none of the transactions should be rejected by the
+ transition tool because they are being sent to it with the correct format.
"""
return None
@@ -214,8 +210,8 @@ def blocks(
header_blob_gas_used = 0
block_error = None
if any(txs_wrapped_blobs):
- # This is a block exception because the invalid block is only created in the RLP version,
- # not in the transition tool.
+ # This is a block exception because the invalid block is only created
+ # in the RLP version, not in the transition tool.
block_error = [
BlockException.RLP_STRUCTURES_ENCODING,
TransactionException.TYPE_3_TX_WITH_FULL_BLOBS,
@@ -289,8 +285,8 @@ def test_reject_valid_full_blob_in_block_rlp(
blocks: List[Block],
):
"""
- Test valid blob combinations where one or more txs in the block
- serialized version contain a full blob (network version) tx.
+ Test valid blob combinations where one or more txs in the block serialized
+ version contain a full blob (network version) tx.
"""
blockchain_test(
pre=pre,
diff --git a/tests/cancun/eip4844_blobs/test_blobhash_opcode.py b/tests/cancun/eip4844_blobs/test_blobhash_opcode.py
index b22883be036..c265bd0214a 100644
--- a/tests/cancun/eip4844_blobs/test_blobhash_opcode.py
+++ b/tests/cancun/eip4844_blobs/test_blobhash_opcode.py
@@ -1,21 +1,21 @@
"""
-abstract: Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test cases for the `BLOBHASH` opcode in
- [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
-note: Adding a new test
- Add a function that is named `test_` and takes at least the following arguments:
+Note: Adding a new test Add a function that is named `test_` and
+takes at least the following arguments.
- - blockchain_test
- - pre
- - tx
- - post
+Required arguments:
+- `blockchain_test`
+- `pre`
+- `tx`
+- `post`
- Additional custom `pytest.fixture` fixtures can be added and parametrized for new test cases.
+Additional custom `pytest.fixture` fixtures can be added and parametrized
+for
+new test cases.
- There is no specific structure to follow within this test module.
-
-""" # noqa: E501
+There is no specific structure to follow within this test module.
+"""
from typing import List
@@ -81,11 +81,10 @@ class BlobhashScenario:
@staticmethod
def create_blob_hashes_list(length: int, max_blobs_per_tx: int) -> List[List[Hash]]:
"""
- Create list of MAX_BLOBS_PER_TX blob hashes
- using `random_blob_hashes`.
+ Create list of MAX_BLOBS_PER_TX blob hashes using `random_blob_hashes`.
- Cycle over random_blob_hashes to get a large list of
- length: MAX_BLOBS_PER_TX * length
+ Cycle over random_blob_hashes to get a large list of length:
+ MAX_BLOBS_PER_TX * length
-> [0x01, 0x02, 0x03, 0x04, ..., 0x0A, 0x0B, 0x0C, 0x0D]
Then split list into smaller chunks of MAX_BLOBS_PER_TX
@@ -104,9 +103,8 @@ def blobhash_sstore(index: int, max_blobs_per_tx: int):
"""
Return BLOBHASH sstore to the given index.
- If the index is out of the valid bounds, 0x01 is written
- in storage, as we later check it is overwritten by
- the BLOBHASH sstore.
+ If the index is out of the valid bounds, 0x01 is written in storage, as
+ we later check it is overwritten by the BLOBHASH sstore.
"""
invalidity_check = Op.SSTORE(index, 0x01)
if index < 0 or index >= max_blobs_per_tx:
@@ -158,9 +156,9 @@ def test_blobhash_gas_cost(
"""
Tests `BLOBHASH` opcode gas cost using a variety of indexes.
- Asserts that the gas consumption of the `BLOBHASH` opcode is correct by ensuring
- it matches `HASH_OPCODE_GAS = 3`. Includes both valid and invalid random
- index sizes from the range `[0, 2**256-1]`, for tx types 2 and 3.
+ Asserts that the gas consumption of the `BLOBHASH` opcode is correct by
+ ensuring it matches `HASH_OPCODE_GAS = 3`. Includes both valid and invalid
+ random index sizes from the range `[0, 2**256-1]`, for tx types 2 and 3.
"""
gas_measure_code = CodeGasMeasure(
code=Op.BLOBHASH(blobhash_index),
@@ -279,11 +277,11 @@ def test_blobhash_invalid_blob_index(
max_blobs_per_tx: int,
):
"""
- Tests that the `BLOBHASH` opcode returns a zeroed `bytes32` value for invalid
- indexes.
+ Tests that the `BLOBHASH` opcode returns a zeroed `bytes32` value for
+ invalid indexes.
- Includes cases where the index is negative (`index < 0`) or
- exceeds the maximum number of `blob_versioned_hash` values stored:
+ Includes cases where the index is negative (`index < 0`) or exceeds the
+ maximum number of `blob_versioned_hash` values stored:
(`index >= len(tx.message.blob_versioned_hashes)`).
It confirms that the returned value is a zeroed `bytes32` for each case.
diff --git a/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py b/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py
index b3ca64fd9c2..41d85d2b255 100644
--- a/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py
+++ b/tests/cancun/eip4844_blobs/test_blobhash_opcode_contexts.py
@@ -1,9 +1,6 @@
"""
-abstract: Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test case for `BLOBHASH` opcode calls across different contexts
- in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
-
-""" # noqa: E501
+Tests `BLOBHASH` opcode in [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+"""
from enum import Enum
from typing import Iterable, List
@@ -34,8 +31,8 @@
class BlobhashContext(Enum):
"""
- A utility class for mapping common EVM opcodes in different contexts
- to specific bytecode (with BLOBHASH), addresses and contracts.
+ A utility class for mapping common EVM opcodes in different contexts to
+ specific bytecode (with BLOBHASH), addresses and contracts.
"""
BLOBHASH_SSTORE = "blobhash_sstore"
@@ -52,9 +49,7 @@ def code(self, *, indexes=Iterable[int]):
"""
Map opcode context to bytecode that utilizes the BLOBHASH opcode.
- Args:
- indexes: The indexes to request using the BLOBHASH opcode
-
+ Args: indexes: The indexes to request using the BLOBHASH opcode
"""
match self:
case BlobhashContext.BLOBHASH_SSTORE:
@@ -82,8 +77,8 @@ def deploy_contract(
Deploy a contract with the given context and indexes.
Args:
- pre: The pre state to deploy the contract on
- indexes: The indexes to request using the BLOBHASH opcode
+ pre: The pre state to deploy the contract on
+ indexes: The indexes to request using the BLOBHASH opcode
"""
match self:
@@ -147,7 +142,9 @@ def deploy_contract(
def simple_blob_hashes(
max_blobs_per_tx: int,
) -> List[Hash]:
- """Return a simple list of blob versioned hashes ranging from bytes32(1 to 4)."""
+ """
+ Return a simple list of blob versioned hashes ranging from bytes32(1 to 4).
+ """
return add_kzg_version(
[(1 << x) for x in range(max_blobs_per_tx)],
Spec.BLOB_COMMITMENT_VERSION_KZG,
@@ -177,11 +174,13 @@ def test_blobhash_opcode_contexts(
state_test: StateTestFiller,
):
"""
- Tests that the `BLOBHASH` opcode functions correctly when called in different contexts.
+ Tests that the `BLOBHASH` opcode functions correctly when called in
+ different contexts.
- `BLOBHASH` opcode on the top level of the call stack.
- `BLOBHASH` opcode on the max value.
- - `BLOBHASH` opcode on `CALL`, `DELEGATECALL`, `STATICCALL`, and `CALLCODE`.
+ - `BLOBHASH` opcode on `CALL`, `DELEGATECALL`, `STATICCALL`, and
+ `CALLCODE`.
- `BLOBHASH` opcode on Initcode.
- `BLOBHASH` opcode on `CREATE` and `CREATE2`.
- `BLOBHASH` opcode on transaction types 0, 1 and 2.
@@ -292,11 +291,13 @@ def test_blobhash_opcode_contexts_tx_types(
state_test: StateTestFiller,
):
"""
- Tests that the `BLOBHASH` opcode functions correctly when called in different contexts.
+ Tests that the `BLOBHASH` opcode functions correctly when called in
+ different contexts.
- `BLOBHASH` opcode on the top level of the call stack.
- `BLOBHASH` opcode on the max value.
- - `BLOBHASH` opcode on `CALL`, `DELEGATECALL`, `STATICCALL`, and `CALLCODE`.
+ - `BLOBHASH` opcode on `CALL`, `DELEGATECALL`, `STATICCALL`, and
+ `CALLCODE`.
- `BLOBHASH` opcode on Initcode.
- `BLOBHASH` opcode on `CREATE` and `CREATE2`.
- `BLOBHASH` opcode on transaction types 0, 1 and 2.
diff --git a/tests/cancun/eip4844_blobs/test_excess_blob_gas.py b/tests/cancun/eip4844_blobs/test_excess_blob_gas.py
index b25ebc0dc0b..298460ce7db 100644
--- a/tests/cancun/eip4844_blobs/test_excess_blob_gas.py
+++ b/tests/cancun/eip4844_blobs/test_excess_blob_gas.py
@@ -1,25 +1,29 @@
"""
-abstract: Tests `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
-
-note: Adding a new test
- Add a function that is named `test_` and takes at least the following arguments:
-
- - blockchain_test
- - env
- - pre
- - blocks
- - post
- - correct_excess_blob_gas
-
- The following arguments can be parametrized to generate new combinations and test cases:
-
- - new_blobs: Number of blobs in the block (automatically split across transactions as needed)
-
- All other `pytest.fixture` fixtures can be parametrized to generate new combinations and test
- cases.
-
-""" # noqa: E501
+Tests `excessBlobGas` and `blobGasUsed` block fields for EIP-4844.
+
+Tests `excessBlobGas` and `blobGasUsed` block fields for
+[EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
+Note: Adding a new test Add a function that is named `test_` and
+takes at least the following arguments.
+
+Required arguments:
+- `blockchain_test`
+- `env`
+- `pre`
+- `blocks`
+- `post`
+- `correct_excess_blob_gas`
+
+The following arguments can be parametrized to generate new combinations
+and
+test cases:
+
+- new_blobs: Number of blobs in the block (automatically split across
+transactions as needed)
+
+All other `pytest.fixture` fixtures can be parametrized to generate new
+combinations and test cases.
+"""
import itertools
from typing import Callable, Dict, Iterator, List, Mapping, Optional, Tuple
@@ -55,7 +59,9 @@
@pytest.fixture
def parent_excess_blobs(fork: Fork) -> int: # noqa: D103
- """By default we start with an intermediate value between the target and max."""
+ """
+ By default we start with an intermediate value between the target and max.
+ """
return (fork.max_blobs_per_block() + fork.target_blobs_per_block()) // 2 + 1
@@ -304,12 +310,14 @@ def test_correct_excess_blob_gas_calculation(
correct_excess_blob_gas: int,
):
"""
- Test calculation of the `excessBlobGas` increase/decrease across
- multiple blocks with and without blobs.
+ Test calculation of the `excessBlobGas` increase/decrease across multiple
+ blocks with and without blobs.
- With parent block containing `[0, MAX_BLOBS_PER_BLOCK]` blobs
- - With parent block containing `[0, TARGET_BLOBS_PER_BLOCK]` equivalent value of excess blob gas
- """ # noqa: E501
+
+ - With parent block containing `[0, TARGET_BLOBS_PER_BLOCK]` equivalent
+ value of excess blob gas
+ """
blockchain_test(
pre=pre,
post=post,
@@ -321,8 +329,8 @@ def test_correct_excess_blob_gas_calculation(
def generate_blob_gas_cost_increases_tests(delta: int) -> Callable[[Fork], List[int]]:
"""
- Generate a list of block excess blob gas values where the blob gas price increases
- based on fork properties.
+ Generate a list of block excess blob gas values where the blob gas price
+ increases based on fork properties.
"""
def generator_function(fork: Fork) -> List[int]:
@@ -365,8 +373,8 @@ def test_correct_increasing_blob_gas_costs(
correct_excess_blob_gas: int,
):
"""
- Test calculation of the `excessBlobGas` and blob gas tx costs at
- value points where the cost increases to interesting amounts.
+ Test calculation of the `excessBlobGas` and blob gas tx costs at value
+ points where the cost increases to interesting amounts.
- At the first blob gas cost increase (1 to 2)
- At total transaction data cost increase to `> 2^32`
@@ -402,8 +410,8 @@ def test_correct_decreasing_blob_gas_costs(
correct_excess_blob_gas: int,
):
"""
- Test calculation of the `excessBlobGas` and blob gas tx costs at
- value points where the cost decreases to interesting amounts.
+ Test calculation of the `excessBlobGas` and blob gas tx costs at value
+ points where the cost decreases to interesting amounts.
See test_correct_increasing_blob_gas_costs.
"""
@@ -433,8 +441,8 @@ def test_invalid_zero_excess_blob_gas_in_header(
):
"""
Test rejection of blocks where the `excessBlobGas` in the header drops to
- zero in a block with or without data blobs, but the excess blobs in the parent are
- greater than target.
+ zero in a block with or without data blobs, but the excess blobs in the
+ parent are greater than target.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
@@ -529,8 +537,10 @@ def test_invalid_excess_blob_gas_above_target_change(
"""
Test rejection of blocks where the `excessBlobGas`.
- - decreases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block with zero blobs
- - increases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block with max blobs
+ - decreases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block
+ with zero blobs.
+ - increases more than `TARGET_BLOB_GAS_PER_BLOCK` in a single block
+ with max blobs.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
@@ -572,8 +582,8 @@ def test_invalid_static_excess_blob_gas(
parent_excess_blob_gas: int,
):
"""
- Test rejection of blocks where the `excessBlobGas` remains unchanged
- but the parent blobs included are not `TARGET_BLOBS_PER_BLOCK`.
+ Test rejection of blocks where the `excessBlobGas` remains unchanged but
+ the parent blobs included are not `TARGET_BLOBS_PER_BLOCK`.
Test is parametrized to `MAX_BLOBS_PER_BLOCK` and `TARGET_BLOBS_PER_BLOCK`.
"""
@@ -659,7 +669,8 @@ def test_invalid_static_excess_blob_gas_from_zero_on_blobs_above_target(
Test rejection of blocks where the `excessBlobGas` does not increase from
zero, even when the included blobs is above target.
- Test is parametrized to `[TARGET_BLOBS_PER_BLOCK+1, MAX_BLOBS_PER_BLOCK]` new blobs.
+ Test is parametrized to `[TARGET_BLOBS_PER_BLOCK+1, MAX_BLOBS_PER_BLOCK]`
+ new blobs.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
@@ -708,9 +719,9 @@ def test_invalid_excess_blob_gas_change(
Test rejection of blocks where the `excessBlobGas` changes to an invalid
value.
- Given a parent block containing `[0, MAX_BLOBS_PER_BLOCK]` blobs, test an invalid
- `excessBlobGas` value by changing it by `[-TARGET_BLOBS_PER_BLOCK, TARGET_BLOBS_PER_BLOCK]`
- from the correct value.
+ Given a parent block containing `[0, MAX_BLOBS_PER_BLOCK]` blobs, test an
+ invalid `excessBlobGas` value by changing it by `[-TARGET_BLOBS_PER_BLOCK,
+ TARGET_BLOBS_PER_BLOCK]` from the correct value.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
@@ -760,8 +771,8 @@ def test_invalid_negative_excess_blob_gas(
Test rejection of blocks where the `excessBlobGas` changes to the two's
complement equivalent of the negative value after subtracting target blobs.
- Reasoning is that the `excessBlobGas` is a `uint64`, so it cannot be negative, and
- we test for a potential underflow here.
+ Reasoning is that the `excessBlobGas` is a `uint64`, so it cannot be
+ negative, and we test for a potential underflow here.
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
@@ -810,8 +821,10 @@ def test_invalid_non_multiple_excess_blob_gas(
Test rejection of blocks where the `excessBlobGas` changes to a value that
is not a multiple of Spec.GAS_PER_BLOB`.
- - Parent block contains `TARGET_BLOBS_PER_BLOCK + 1` blobs, but `excessBlobGas` is off by +/-1
- - Parent block contains `TARGET_BLOBS_PER_BLOCK - 1` blobs, but `excessBlobGas` is off by +/-1
+ - Parent block contains `TARGET_BLOBS_PER_BLOCK + 1` blobs, but
+ `excessBlobGas` is off by +/-1
+ - Parent block contains `TARGET_BLOBS_PER_BLOCK - 1` blobs, but
+ `excessBlobGas` is off by +/-1
"""
if header_excess_blob_gas is None:
raise Exception("test case is badly formatted")
diff --git a/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py b/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py
index 1d11234da00..115a0569e01 100644
--- a/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py
+++ b/tests/cancun/eip4844_blobs/test_excess_blob_gas_fork_transition.py
@@ -1,8 +1,8 @@
"""
-abstract: Tests `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) at fork transition.
- Test `excessBlobGas` and `blobGasUsed` block fields for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844) at fork
- transition.
-""" # noqa: E501
+Test `excessBlobGas` & `blobGasUsed` block fields at fork transition.
+
+Tests for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+"""
from typing import List, Mapping
@@ -49,8 +49,8 @@ def block_gas_limit(fork: Fork) -> int: # noqa: D103
@pytest.fixture
def genesis_environment(block_gas_limit: int, block_base_fee_per_gas: int) -> Environment:
"""
- Genesis environment that enables existing transition tests to be used of BPO forks.
- Compatible with all fork transitions.
+ Genesis environment that enables existing transition tests to be used of
+ BPO forks. Compatible with all fork transitions.
"""
return Environment(
base_fee_per_gas=(block_base_fee_per_gas * BASE_FEE_MAX_CHANGE_DENOMINATOR) // 7,
@@ -158,8 +158,8 @@ def pre_fork_excess_blobs(
pre_fork_blocks: List[Block],
) -> int:
"""
- Return the cumulative excess blobs up until the fork given the pre_fork_blobs_per_block
- and the target blobs in the fork prior.
+ Return the cumulative excess blobs up until the fork given the
+ pre_fork_blobs_per_block and the target blobs in the fork prior.
"""
if not fork.supports_blobs(timestamp=0):
return 0
@@ -331,11 +331,12 @@ def test_invalid_pre_fork_block_with_blob_fields(
blob_gas_used_present: bool,
):
"""
- Test block rejection when `excessBlobGas` and/or `blobGasUsed` fields are present on a pre-fork
- block.
+ Test block rejection when `excessBlobGas` and/or `blobGasUsed` fields are
+ present on a pre-fork block.
- Blocks sent by NewPayloadV2 (Shanghai) that contain `excessBlobGas` and `blobGasUsed` fields
- must be rejected with the appropriate `EngineAPIError.InvalidParams` error error.
+ Blocks sent by NewPayloadV2 (Shanghai) that contain `excessBlobGas` and
+ `blobGasUsed` fields must be rejected with the appropriate
+ `EngineAPIError.InvalidParams` error error.
"""
header_modifier = Header(
excess_blob_gas=0 if excess_blob_gas_present else None,
@@ -376,11 +377,12 @@ def test_invalid_post_fork_block_without_blob_fields(
blob_gas_used_missing: bool,
):
"""
- Test block rejection when `excessBlobGas` and/or `blobGasUsed` fields are missing on a
- post-fork block.
+ Test block rejection when `excessBlobGas` and/or `blobGasUsed` fields are
+ missing on a post-fork block.
- Blocks sent by NewPayloadV3 (Cancun) without `excessBlobGas` and `blobGasUsed` fields must be
- rejected with the appropriate `EngineAPIError.InvalidParams` error.
+ Blocks sent by NewPayloadV3 (Cancun) without `excessBlobGas` and
+ `blobGasUsed` fields must be rejected with the appropriate
+ `EngineAPIError.InvalidParams` error.
"""
header_modifier = Header()
if excess_blob_gas_missing:
@@ -432,8 +434,8 @@ def test_fork_transition_excess_blob_gas_at_blob_genesis(
"""
Test `excessBlobGas` calculation in the header when the fork is activated.
- Also produce enough blocks to test the blob gas price increase when the block is full with
- `SpecHelpers.max_blobs_per_block()` blobs.
+ Also produce enough blocks to test the blob gas price increase when the
+ block is full with `SpecHelpers.max_blobs_per_block()` blobs.
"""
blockchain_test(
pre=pre,
@@ -499,7 +501,9 @@ def test_fork_transition_excess_blob_gas_post_blob_genesis(
post_fork_blocks: List[Block],
post: Mapping[Address, Account],
):
- """Test `excessBlobGas` calculation in the header when the fork is activated."""
+ """
+ Test `excessBlobGas` calculation in the header when the fork is activated.
+ """
blockchain_test(
pre=pre,
post=post,
diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py
index 44257dc67df..24562a11e2e 100644
--- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py
+++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile.py
@@ -1,31 +1,31 @@
"""
-abstract: Tests point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+Tests point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
-note: Adding a new test
- Add a function that is named `test_` and takes at least the following arguments:
+Note: Adding a new test Add a function that is named `test_` and
+takes at least the following arguments.
- - blockchain_test | state_test
- - pre
- - tx
- - post
+Required arguments:
+- `blockchain_test` or `state_test`
+- `pre`
+- `tx`
+- `post`
- The following arguments *need* to be parametrized or the test will not be generated:
+The following arguments *need* to be parametrized or the test will not be
+generated:
- - versioned_hash
- - kzg_commitment
- - z
- - y
- - kzg_proof
- - result
+- `versioned_hash`
+- `kzg_commitment`
+- `z`
+- `y`
+- `kzg_proof`
+- `result`
- These values correspond to a single call of the precompile, and `result` refers to
- whether the call should succeed or fail.
+These values correspond to a single call of the precompile, and `result`
+refers to whether the call should succeed or fail.
- All other `pytest.fixture` fixtures can be parametrized to generate new combinations and test
- cases.
-
-""" # noqa: E501
+All other `pytest.fixture` fixtures can be parametrized to generate new
+combinations and test cases.
+"""
import glob
import json
@@ -110,8 +110,8 @@ def call_gas() -> int:
"""
Amount of gas to pass to the precompile.
- Defaults to Spec.POINT_EVALUATION_PRECOMPILE_GAS, but can be parametrized to
- test different amounts.
+ Defaults to Spec.POINT_EVALUATION_PRECOMPILE_GAS, but can be parametrized
+ to test different amounts.
"""
return Spec.POINT_EVALUATION_PRECOMPILE_GAS
@@ -144,7 +144,8 @@ def precompile_caller_code(call_opcode: Op, call_gas: int) -> Bytecode:
precompile_caller_code = Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
precompile_caller_code += Op.SSTORE(
key_call_return_code,
- call_opcode( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501
+ # https://github.com/ethereum/execution-spec-tests/issues/348
+ call_opcode( # type: ignore
gas=call_gas,
address=Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS,
args_offset=0x00,
@@ -226,8 +227,8 @@ def post(
precompile_input: bytes,
) -> Dict:
"""
- Prepare expected post for each test, depending on the success or
- failure of the precompile call.
+ Prepare expected post for each test, depending on the success or failure of
+ the precompile call.
"""
expected_storage: Storage.StorageDictType = {}
# CALL operation return code
@@ -291,8 +292,9 @@ def test_valid_inputs(
"""
Test valid sanity precompile calls that are expected to succeed.
- - `kzg_commitment` and `kzg_proof` are set to values such that `p(z)==0` for all values of `z`,
- hence `y` is tested to be zero, and call to be successful.
+ - `kzg_commitment` and `kzg_proof` are set to values such that `p(z)==0`
+ for all values of `z`, hence `y` is tested to be zero, and call to be
+ successful.
"""
state_test(
env=Environment(),
@@ -348,7 +350,8 @@ def test_invalid_inputs(
- Correct proof, commitment, z and y, but incorrect lengths
- Null inputs
- Zero inputs
- - Correct proof, commitment, z and y, but incorrect version versioned hash
+ - Correct proof, commitment, z and y, but incorrect version versioned
+ hash
"""
state_test(
env=Environment(),
@@ -425,8 +428,8 @@ def get_point_evaluation_test_files_in_directory(path: str) -> list[str]:
def all_external_vectors() -> List:
"""
- Test for the Point Evaluation Precompile from external sources,
- contained in ./point_evaluation_vectors/.
+ Test for the Point Evaluation Precompile from external sources, contained
+ in ./point_evaluation_vectors/.
"""
test_cases = []
@@ -453,7 +456,8 @@ def test_external_vectors(
post: Dict,
):
"""
- Test precompile calls using external test vectors compiled from different sources.
+ Test precompile calls using external test vectors compiled from different
+ sources.
- `go_kzg_4844_verify_kzg_proof.json`: test vectors from the
[go-kzg-4844](https://github.com/crate-crypto/go-kzg-4844) repository.
@@ -531,16 +535,17 @@ def test_tx_entry_point(
proof_correct: bool,
):
"""
- Test calling the Point Evaluation Precompile directly as
- transaction entry point, and measure the gas consumption.
+ Test calling the Point Evaluation Precompile directly as transaction entry
+ point, and measure the gas consumption.
- - Using `gas_limit` with exact necessary gas, insufficient gas and extra gas.
+ - Using `gas_limit` with exact necessary gas, insufficient gas and extra
+ gas.
- Using correct and incorrect proofs
"""
sender = pre.fund_eoa()
- # Starting from EIP-7623, we need to use an access list to raise the intrinsic gas cost to be
- # above the floor data cost.
+ # Starting from EIP-7623, we need to use an access list to raise the
+ # intrinsic gas cost to be above the floor data cost.
access_list = [AccessList(address=Address(i), storage_keys=[]) for i in range(1, 10)]
# Gas is appended the intrinsic gas cost of the transaction
@@ -614,7 +619,9 @@ def test_precompile_before_fork(
tx: Transaction,
precompile_caller_address: Address,
):
- """Test calling the Point Evaluation Precompile before the appropriate fork."""
+ """
+ Test calling the Point Evaluation Precompile before the appropriate fork.
+ """
post = {
precompile_caller_address: Account(
storage={1: 1},
@@ -667,7 +674,9 @@ def test_precompile_during_fork(
precompile_input: bytes,
sender: EOA,
):
- """Test calling the Point Evaluation Precompile during the appropriate fork."""
+ """
+ Test calling the Point Evaluation Precompile during the appropriate fork.
+ """
# Blocks before fork
blocks = [
Block(
diff --git a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py
index 02a80eb3de9..f8eb3d96f29 100644
--- a/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py
+++ b/tests/cancun/eip4844_blobs/test_point_evaluation_precompile_gas.py
@@ -1,8 +1,9 @@
"""
-abstract: Tests gas usage on point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844)
- Test gas usage on point evaluation precompile for [EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+Tests gas usage on point evaluation precompile for EIP-4844.
-""" # noqa: E501
+Tests gas usage on point evaluation precompile for
+[EIP-4844: Shard Blob Transactions](https://eips.ethereum.org/EIPS/eip-4844).
+"""
from typing import Dict, Literal
@@ -103,7 +104,8 @@ def precompile_caller_code(
+ copy_opcode_cost(fork, len(precompile_input))
)
if call_type == Op.CALL or call_type == Op.CALLCODE:
- precompile_caller_code += call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501
+ # https://github.com/ethereum/execution-spec-tests/issues/348
+ precompile_caller_code += call_type( # type: ignore
call_gas,
Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS,
0x00,
@@ -115,7 +117,8 @@ def precompile_caller_code(
overhead_cost += (push_operations_cost * 6) + (calldatasize_cost * 1)
elif call_type == Op.DELEGATECALL or call_type == Op.STATICCALL:
# Delegatecall and staticcall use one less argument
- precompile_caller_code += call_type( # type: ignore # https://github.com/ethereum/execution-spec-tests/issues/348 # noqa: E501
+ # https://github.com/ethereum/execution-spec-tests/issues/348
+ precompile_caller_code += call_type( # type: ignore
call_gas,
Spec.POINT_EVALUATION_PRECOMPILE_ADDRESS,
0x00,
@@ -163,8 +166,8 @@ def post(
call_gas: int,
) -> Dict:
"""
- Prepare expected post for each test, depending on the success or
- failure of the precompile call and the gas usage.
+ Prepare expected post for each test, depending on the success or failure of
+ the precompile call and the gas usage.
"""
if proof == "correct":
expected_gas_usage = (
@@ -205,11 +208,12 @@ def test_point_evaluation_precompile_gas_usage(
post: Dict,
):
"""
- Test point evaluation precompile gas usage under different call contexts and gas limits.
+ Test point evaluation precompile gas usage under different call contexts
+ and gas limits.
- - Test using all call types (CALL, DELEGATECALL, CALLCODE, STATICCALL)
- - Test using different gas limits (exact gas, insufficient gas, extra gas)
- - Test using correct and incorrect proofs
+ - Test using all call types (CALL, DELEGATECALL, CALLCODE, STATICCALL) -
+ Test using different gas limits (exact gas, insufficient gas, extra gas) -
+ Test using correct and incorrect proofs
"""
state_test(
env=Environment(),
diff --git a/tests/cancun/eip5656_mcopy/common.py b/tests/cancun/eip5656_mcopy/common.py
index 1c975f4f840..a4aef625cae 100644
--- a/tests/cancun/eip5656_mcopy/common.py
+++ b/tests/cancun/eip5656_mcopy/common.py
@@ -1,7 +1,7 @@
"""
-Common procedures to test
-[EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656).
-""" # noqa: E501
+Common procedures to test [EIP-5656: MCOPY - Memory copying
+instruction](https://eips.ethereum.org/EIPS/eip-5656).
+"""
from copy import copy
@@ -16,7 +16,8 @@ def mcopy(*, src: int, dest: int, length: int, memory: bytes) -> bytes:
res = bytearray(copy(memory))
- # If the destination or source are larger than the memory, we need to extend the memory
+ # If the destination or source are larger than the memory, we need to
+ # extend the memory
max_byte_index = max(src, dest) + length
if max_byte_index > len(memory):
res.extend(b"\x00" * (max_byte_index - len(memory)))
diff --git a/tests/cancun/eip5656_mcopy/test_mcopy.py b/tests/cancun/eip5656_mcopy/test_mcopy.py
index cd1fdda802a..0ad39679604 100644
--- a/tests/cancun/eip5656_mcopy/test_mcopy.py
+++ b/tests/cancun/eip5656_mcopy/test_mcopy.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656)
- Test copy operations of [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656).
-
-""" # noqa: E501
+Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656).
+"""
from typing import Mapping
@@ -54,8 +52,8 @@ def code_bytecode(
code_storage: Storage,
) -> Bytecode:
"""
- Prepare bytecode and storage for the test, based on the starting memory and the final
- memory that resulted from the copy.
+ Prepare bytecode and storage for the test, based on the starting memory and
+ the final memory that resulted from the copy.
"""
bytecode = Bytecode()
@@ -90,8 +88,8 @@ def code_bytecode(
Op.MLOAD(w * 0x20),
)
- # If the memory was extended beyond the initial range, store the last word of the resulting
- # memory into storage too
+ # If the memory was extended beyond the initial range, store the last word
+ # of the resulting memory into storage too
if len(final_memory) > len(initial_memory):
last_word = ceiling_division(len(final_memory), 0x20) - 1
bytecode += Op.SSTORE(
@@ -187,7 +185,8 @@ def test_valid_mcopy_operations(
tx: Transaction,
):
"""
- Perform MCOPY operations using different offsets and lengths:
+ Perform MCOPY operations using different offsets and lengths.
+
- Zero inputs
- Memory rewrites (copy from and to the same location)
- Memory overwrites (copy from and to different locations)
@@ -214,7 +213,10 @@ def test_mcopy_on_empty_memory(
post: Mapping[str, Account],
tx: Transaction,
):
- """Perform MCOPY operations on an empty memory, using different offsets and lengths."""
+ """
+ Perform MCOPY operations on an empty memory, using different offsets and
+ lengths.
+ """
state_test(
env=Environment(),
pre=pre,
diff --git a/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py b/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py
index 85049128097..33dcc5e3c79 100644
--- a/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py
+++ b/tests/cancun/eip5656_mcopy/test_mcopy_contexts.py
@@ -1,8 +1,8 @@
"""
-abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656)
- Test memory copy under different call contexts [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656).
+Test memory copy under different call contexts.
-""" # noqa: E501
+Tests for [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656).
+"""
from itertools import cycle, islice
from typing import Mapping
@@ -39,8 +39,8 @@ def callee_bytecode(
call_opcode: Op,
) -> Bytecode:
"""
- Callee simply performs mcopy operations that should not have any effect on the
- caller context.
+ Callee simply performs mcopy operations that should not have any effect on
+ the caller context.
"""
bytecode = Bytecode()
@@ -76,7 +76,8 @@ def initial_memory(
ret = bytes(list(islice(cycle(range(0x01, 0x100)), initial_memory_length)))
if call_opcode in [Op.CREATE, Op.CREATE2]:
- # We also need to put the callee_bytecode as initcode in memory for create operations
+ # We also need to put the callee_bytecode as initcode in memory for
+ # create operations
ret = bytes(callee_bytecode) + ret[len(callee_bytecode) :]
assert len(ret) == initial_memory_length
@@ -97,8 +98,8 @@ def caller_bytecode(
caller_storage: Storage,
) -> Bytecode:
"""
- Prepare bytecode and storage for the test, based on the starting memory and the final
- memory that resulted from the copy.
+ Prepare bytecode and storage for the test, based on the starting memory and
+ the final memory that resulted from the copy.
"""
bytecode = Bytecode()
@@ -116,8 +117,8 @@ def caller_bytecode(
bytecode += Op.SSTORE(100_000, Op.MSIZE())
caller_storage[100_000] = ceiling_division(len(initial_memory), 0x20) * 0x20
- # Store all memory in the initial range to verify the MCOPY in the subcall did not affect
- # this level's memory
+ # Store all memory in the initial range to verify the MCOPY in the subcall
+ # did not affect this level's memory
for w in range(0, len(initial_memory) // 0x20):
bytecode += Op.SSTORE(w, Op.MLOAD(w * 0x20))
caller_storage[w] = initial_memory[w * 0x20 : w * 0x20 + 0x20]
@@ -171,8 +172,8 @@ def test_no_memory_corruption_on_upper_call_stack_levels(
tx: Transaction,
):
"""
- Perform a subcall with any of the following opcodes, which uses MCOPY during its execution,
- and verify that the caller's memory is unaffected.
+ Perform a subcall with any of the following opcodes, which uses MCOPY
+ during its execution, and verify that the caller's memory is unaffected.
"""
state_test(
env=Environment(),
@@ -197,8 +198,8 @@ def test_no_memory_corruption_on_upper_create_stack_levels(
tx: Transaction,
):
"""
- Perform a subcall with any of the following opcodes, which uses MCOPY during its execution,
- and verify that the caller's memory is unaffected:
+ Perform a subcall with any of the following opcodes, which uses MCOPY
+ during its execution, and verify that the caller's memory is unaffected:
- `CREATE`
- `CREATE2`.
diff --git a/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py b/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py
index 639d007542a..6a9801d0098 100644
--- a/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py
+++ b/tests/cancun/eip5656_mcopy/test_mcopy_memory_expansion.py
@@ -1,9 +1,10 @@
"""
-abstract: Tests [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656)
- Test copy operations of [EIP-5656: MCOPY - Memory copying instruction](https://eips.ethereum.org/EIPS/eip-5656)
- that produce a memory expansion, and potentially an out-of-gas error.
+Test MCOPY with memory expansion and potential OOG errors.
-""" # noqa: E501
+Test copy operations of [EIP-5656: MCOPY - Memory copying
+instruction](https://eips.ethereum.org/EIPS/eip-5656) that produce
+a memory expansion, and potentially an out-of-gas error.
+"""
import itertools
from typing import List, Mapping
@@ -74,11 +75,11 @@ def call_exact_cost(
tx_access_list: List[AccessList],
) -> int:
"""
- Return the exact cost of the subcall, based on the initial memory and the length of the
- copy.
+ Return the exact cost of the subcall, based on the initial memory and the
+ length of the copy.
"""
- # Starting from EIP-7623, we need to use an access list to raise the intrinsic gas cost to be
- # above the floor data cost.
+ # Starting from EIP-7623, we need to use an access list to raise the
+ # intrinsic gas cost to be above the floor data cost.
cost_memory_bytes = fork.memory_expansion_gas_calculator()
gas_costs = fork.gas_costs()
tx_intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
@@ -218,7 +219,10 @@ def test_mcopy_memory_expansion(
post: Mapping[str, Account],
tx: Transaction,
):
- """Perform MCOPY operations that expand the memory, and verify the gas it costs to do so."""
+ """
+ Perform MCOPY operations that expand the memory, and verify the gas it
+ costs to do so.
+ """
state_test(
env=env,
pre=pre,
@@ -279,8 +283,8 @@ def test_mcopy_huge_memory_expansion(
tx: Transaction,
):
"""
- Perform MCOPY operations that expand the memory by huge amounts, and verify that it correctly
- runs out of gas.
+ Perform MCOPY operations that expand the memory by huge amounts, and verify
+ that it correctly runs out of gas.
"""
state_test(
env=env,
diff --git a/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py b/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py
index ea59c47f2c0..d46db3d6509 100644
--- a/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py
+++ b/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py
@@ -51,20 +51,26 @@ def test_dynamic_create2_selfdestruct_collision(
"""
Dynamic Create2->Suicide->Create2 collision scenario.
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then on a different call, in the same tx, perform a self-destruct.
+ Perform a CREATE2, make sure that the initcode sets at least a couple of
+ storage keys, then on a different call, in the same tx, perform a
+ self-destruct.
Then:
- a) on the same tx, attempt to recreate the contract <=== Covered in this test
- 1) and create2 contract already in the state
- 2) and create2 contract is not in the state
- b) on a different tx, attempt to recreate the contract
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then in a different tx, perform a self-destruct.
+ a) on the same tx, attempt to recreate the contract
+ -> Covered in this test
+ 1) and create2 contract already in the state
+ 2) and create2 contract is not in the state
+ b) on a different tx, attempt to recreate the contract
+
+ Perform a CREATE2, make sure that the initcode sets at least a couple
+ of storage keys, then in a different tx, perform a self-destruct.
+
Then:
- a) on the same tx, attempt to recreate the contract
- b) on a different tx, attempt to recreate the contract
- Verify that the test case described in
- https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06+Mainnet+Halting+Event
+ a) on the same tx, attempt to recreate the contract
+ b) on a different tx, attempt to recreate the contract
+
+ Check the test case described in
+ https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/
+ 22156575/2024-01-06+Mainnet+Halting+Event
"""
assert call_create2_contract_in_between or call_create2_contract_at_the_end, "invalid test"
@@ -79,7 +85,8 @@ def test_dynamic_create2_selfdestruct_collision(
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
- sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct calls
+ sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
+ # calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
@@ -131,7 +138,8 @@ def test_dynamic_create2_selfdestruct_collision(
+ Op.CALL(100000, address_code, 0, 0, 0, 0, 0)
# Call to the created account to trigger selfdestruct
+ Op.CALL(100000, call_address_in_between, first_call_value, 0, 0, 0, 0)
- # Make a subcall that do CREATE2 collision and returns its address as the result
+ # Make a subcall that do CREATE2 collision and returns its address as
+ # the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
@@ -149,7 +157,8 @@ def test_dynamic_create2_selfdestruct_collision(
sender = pre.fund_eoa(7000000000000000000)
if create2_dest_already_in_state:
- # Create2 address already in the state, e.g. deployed in a previous block
+ # Create2 address already in the state, e.g. deployed in a previous
+ # block
pre[create2_address] = Account(
balance=pre_existing_create2_balance,
nonce=1,
@@ -182,7 +191,8 @@ def test_dynamic_create2_selfdestruct_collision(
}
)
- # Calculate the destination account expected balance for the selfdestruct/sendall calls
+ # Calculate the destination account expected balance for the
+ # selfdestruct/sendall calls
sendall_destination_balance = (
pre_existing_create2_balance if create2_dest_already_in_state else first_create2_value
)
@@ -224,20 +234,28 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
"""
Dynamic Create2->Suicide->Create2 collision scenario.
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then on a different call, in the same tx, perform a self-destruct.
+ Perform a CREATE2, make sure that the initcode sets at least a couple of
+ storage keys, then on a different call, in the same tx, perform a
+ self-destruct.
+
Then:
- a) on the same tx, attempt to recreate the contract
- 1) and create2 contract already in the state
- 2) and create2 contract is not in the state
- b) on a different tx, attempt to recreate the contract <=== Covered in this test
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then in a different tx, perform a self-destruct.
+ a) on the same tx, attempt to recreate the contract
+ 1) and create2 contract already in the state
+ 2) and create2 contract is not in the state
+ b) on a different tx, attempt to recreate the contract
+ -> Covered in this test
+
+ Perform a CREATE2, make sure that the initcode sets at
+ least a couple of storage keys, then in a different tx, perform a
+ self-destruct.
+
Then:
- a) on the same tx, attempt to recreate the contract
- b) on a different tx, attempt to recreate the contract
- Verify that the test case described in
- https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06+Mainnet+Halting+Event
+ a) on the same tx, attempt to recreate the contract
+ b) on a different tx, attempt to recreate the contract
+
+ Check the test case described in
+ https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06
+ +Mainnet+Halting+Event
"""
# assert call_create2_contract_at_the_end, "invalid test"
@@ -252,7 +270,8 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
- sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct calls
+ sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
+ # calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
@@ -311,7 +330,8 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
# Create the second contract that performs the second transaction
address_to_second = pre.deploy_contract(
code=Op.JUMPDEST()
- # Make a subcall that do CREATE2 collision and returns its address as the result
+ # Make a subcall that do CREATE2 collision and returns its address as
+ # the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
@@ -329,7 +349,8 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
sender = pre.fund_eoa(7000000000000000000)
if create2_dest_already_in_state:
- # Create2 address already in the state, e.g. deployed in a previous block
+ # Create2 address already in the state, e.g. deployed in a previous
+ # block
pre[create2_address] = Account(
balance=pre_existing_create2_balance,
nonce=1,
@@ -350,8 +371,9 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
)
)
- # after Cancun Create2 initcode is only executed if the contract did not already exist
- # and before it will always be executed as the first tx deletes the account
+ # after Cancun Create2 initcode is only executed if the contract did not
+ # already exist and before it will always be executed as the first tx
+ # deletes the account
post[address_create2_storage] = Account(
storage={
create2_constructor_worked: int(fork < Cancun or not create2_dest_already_in_state)
@@ -369,9 +391,12 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
post[address_to_second] = Account(
storage={
code_worked: 0x01,
- # Second create2 will not collide before Cancun as the first tx calls selfdestruct
- # After cancun it will collide only if create2_dest_already_in_state otherwise the
- # first tx creates and deletes it
+ # Second create2 will not collide before Cancun as the first tx
+ # calls selfdestruct
+ #
+ # After cancun it will collide only if
+ # create2_dest_already_in_state otherwise the first tx creates and
+ # deletes it
second_create2_result: (
(0x00 if create2_dest_already_in_state else create2_address)
if fork >= Cancun
@@ -380,14 +405,16 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
}
)
- # Calculate the destination account expected balance for the selfdestruct/sendall calls
+ # Calculate the destination account expected balance for the
+ # selfdestruct/sendall calls
sendall_destination_balance = 0
if create2_dest_already_in_state:
sendall_destination_balance += pre_existing_create2_balance
if fork >= Cancun:
- # first create2 fails, but first calls ok. the account is not removed on cancun
- # therefore with the second create2 it is not successful
+ # first create2 fails, but first calls ok. the account is not
+ # removed on cancun therefore with the second create2 it is not
+ # successful
sendall_destination_balance += first_call_value
else:
# first create2 fails, first calls totally removes the account
@@ -396,8 +423,9 @@ def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
if call_create2_contract_at_the_end:
sendall_destination_balance += second_create2_value
else:
- # if no account in the state, first create2 successful, first call successful and removes
- # because it is removed in the next transaction second create2 successful
+ # if no account in the state, first create2 successful, first call
+ # successful and removes because it is removed in the next transaction
+ # second create2 successful
sendall_destination_balance = first_create2_value + first_call_value
if call_create2_contract_at_the_end:
sendall_destination_balance += second_create2_value
@@ -448,20 +476,28 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
blockchain_test: BlockchainTestFiller,
):
"""
- Dynamic Create2->Suicide->Create2 collision scenario over multiple transactions.
+ Dynamic Create2->Suicide->Create2 collision scenario over multiple
+ transactions.
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then on a different call, in the same or different tx but same block, perform a self-destruct.
+ Perform a CREATE2, make sure that the initcode sets at least a couple of
+ storage keys, then on a different call, in the same or different tx but
+ same block, perform a self-destruct.
Then:
- a) on the same tx, attempt to recreate the contract
- b) on a different tx, attempt to recreate the contract
- Perform a CREATE2, make sure that the initcode sets at least a couple of storage keys,
- then in a different tx, perform a self-destruct.
+ a) on the same tx, attempt to recreate the contract
+ b) on a different tx, attempt to recreate the contract
+
+ Perform a CREATE2, make sure that the initcode sets at least a
+ couple of storage keys, then in a different tx, perform a self-destruct.
+
Then:
- a) on the same tx, attempt to recreate the contract <=== Covered in this test
- b) on a different tx, attempt to recreate the contract <=== Covered in this test
- Verify that the test case described in
- https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06+Mainnet+Halting+Event
+ a) on the same tx, attempt to recreate the contract
+ -> Covered in this test
+ b) on a different tx, attempt to recreate the contract
+ -> Covered in this test
+
+ Check the test case described in
+ https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06
+ +Mainnet+Halting+Event
"""
if recreate_on_first_tx:
assert selfdestruct_on_first_tx, "invalid test"
@@ -477,7 +513,8 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
- sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct calls
+ sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
+ # calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
@@ -540,7 +577,8 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
if recreate_on_first_tx:
first_tx_code += (
- # Make a subcall that do CREATE2 collision and returns its address as the result
+ # Make a subcall that do CREATE2 collision and returns its address
+ # as the result
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
@@ -551,7 +589,8 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
else:
second_tx_code += (
- # Make a subcall that do CREATE2 collision and returns its address as the result
+ # Make a subcall that do CREATE2 collision and returns its address
+ # as the result
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
@@ -566,7 +605,8 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
first_tx_code += Op.SSTORE(part_1_worked, 1)
second_tx_code += Op.SSTORE(part_2_worked, 1)
- # Create the main contract that uses conditional logic to handle both transactions
+ # Create the main contract that uses conditional logic to handle both
+ # transactions
address_to = pre.deploy_contract(
code=Conditional(
# Depending on the tx, execute the first or second tx code
@@ -585,8 +625,9 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
# Create2 address only exists if it was pre-existing and after cancun
account_will_exist_with_code = not selfdestruct_on_first_tx and fork >= Cancun
- # If the contract is self-destructed and we also attempt to recreate it on the first tx,
- # the second call on the second tx will only place balance in the account
+ # If the contract is self-destructed and we also attempt to recreate it on
+ # the first tx, the second call on the second tx will only place balance in
+ # the account
account_will_exist_with_balance = selfdestruct_on_first_tx and recreate_on_first_tx
post[create2_address] = (
@@ -609,14 +650,16 @@ def test_dynamic_create2_selfdestruct_collision_multi_tx(
part_2_worked: 0x01,
# First create2 always works
first_create2_result: create2_address,
- # Second create2 only works if we successfully self-destructed on the first tx
+ # Second create2 only works if we successfully self-destructed on
+ # the first tx
second_create2_result: (
create2_address if selfdestruct_on_first_tx and not recreate_on_first_tx else 0x00
),
}
)
- # Calculate the destination account expected balance for the selfdestruct/sendall calls
+ # Calculate the destination account expected balance for the
+ # selfdestruct/sendall calls
sendall_destination_balance = first_create2_value + first_call_value
if not account_will_exist_with_balance:
diff --git a/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py b/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py
index 9a851c6b26c..36d13853c46 100644
--- a/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py
+++ b/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py
@@ -1,7 +1,4 @@
-"""
-Suicide scenario requested test
-https://github.com/ethereum/tests/issues/1325.
-"""
+"""Suicide scenario requested test https://github.com/ethereum/tests/issues/1325."""
from typing import SupportsBytes
@@ -163,12 +160,14 @@ def test_reentrancy_selfdestruct_revert(
if first_suicide in [Op.CALLCODE, Op.DELEGATECALL]:
if fork >= Cancun:
- # On Cancun even callcode/delegatecall does not remove the account, so the value remain
+ # On Cancun even callcode/delegatecall does not remove the account,
+ # so the value remain
post[executor_contract_address] = Account(
storage={
0x01: 0x01, # First call to contract S->suicide success
0x02: 0x00, # Second call to contract S->suicide reverted
- 0x03: 16, # Reverted value to check that revert really worked
+ 0x03: 16, # Reverted value to check that revert really
+ # worked
},
)
else:
@@ -184,7 +183,8 @@ def test_reentrancy_selfdestruct_revert(
balance=executor_contract_init_balance,
)
- # On Cancun suicide no longer destroys the account from state, just cleans the balance
+ # On Cancun suicide no longer destroys the account from state, just cleans
+ # the balance
if first_suicide in [Op.CALL]:
post[executor_contract_address] = Account(
storage={
@@ -194,7 +194,8 @@ def test_reentrancy_selfdestruct_revert(
},
)
if fork >= Cancun:
- # On Cancun suicide does not remove the account, just sends the balance
+ # On Cancun suicide does not remove the account, just sends the
+ # balance
post[selfdestruct_contract_address] = Account(
balance=0, code=selfdestruct_contract_bytecode, storage={}
)
diff --git a/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py b/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py
index b8396261625..79daddd8d25 100644
--- a/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py
+++ b/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py
@@ -1,8 +1,8 @@
"""
-abstract: Tests [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780)
- Tests for [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780).
+SELFDESTRUCT only in same transaction tests.
-""" # noqa: E501
+Tests for [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780).
+"""
from itertools import cycle
from typing import Dict, List
@@ -37,8 +37,9 @@
Address of a pre-existing contract that self-destructs.
"""
-# Sentinel value to indicate that the self-destructing contract address should be used, only for
-# use in `pytest.mark.parametrize`, not for use within the test method itself.
+# Sentinel value to indicate that the self-destructing contract address should
+# be used, only for use in `pytest.mark.parametrize`, not for use within the
+# test method itself.
SELF_ADDRESS = Address(0x01)
# Sentinel value to indicate that the contract should not self-destruct.
NO_SELFDESTRUCT = Address(0x00)
@@ -59,9 +60,11 @@ def sendall_recipient_addresses(request: pytest.FixtureRequest, pre: Alloc) -> L
"""
List of addresses that receive the SENDALL operation in any test.
- If the test case requires a pre-existing contract, it will be deployed here.
+ If the test case requires a pre-existing contract, it will be deployed
+ here.
- By default the list is a single pre-deployed contract that unconditionally sets storage.
+ By default the list is a single pre-deployed contract that unconditionally
+ sets storage.
"""
address_list = getattr(request, "param", [PRE_DEPLOY_CONTRACT_1])
deployed_contracts: Dict[str, Address] = {}
@@ -88,8 +91,8 @@ def selfdestruct_code_preset(
bytecode = Op.SSTORE(0, Op.ADD(Op.SLOAD(0), 1))
if len(sendall_recipient_addresses) != 1:
- # Load the recipient address from calldata, each test case needs to pass the addresses as
- # calldata
+ # Load the recipient address from calldata, each test case needs to
+ # pass the addresses as calldata
bytecode += Conditional(
# We avoid having the caller to give us our own address by checking
# against a constant that is a magic number
@@ -119,8 +122,8 @@ def selfdestruct_code(
sendall_recipient_addresses: List[Address],
) -> Bytecode:
"""
- Create default self-destructing bytecode,
- which can be modified by each test if necessary.
+ Create default self-destructing bytecode, which can be modified by each
+ test if necessary.
"""
return selfdestruct_code_preset(sendall_recipient_addresses=sendall_recipient_addresses)
@@ -190,19 +193,21 @@ def test_create_selfdestruct_same_tx(
selfdestruct_contract_initial_balance: int,
):
"""
- Use CREATE or CREATE2 to create a self-destructing contract, and call it in the same
- transaction.
+ Use CREATE or CREATE2 to create a self-destructing contract, and call it in
+ the same transaction.
Behavior should be the same before and after EIP-6780.
Test using:
- - Different send-all recipient addresses: single, multiple, including self
+ - Different send-all recipient addresses: single, multiple,
+ including self
- Different initial balances for the self-destructing contract
- Different opcodes: CREATE, CREATE2
"""
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(selfdestruct_contract_initcode)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
entry_code_storage = Storage()
# Bytecode used to create the contract, can be CREATE or CREATE2
@@ -225,9 +230,11 @@ def test_create_selfdestruct_same_tx(
)
selfdestruct_contract_current_balance = selfdestruct_contract_initial_balance
- # Entry code that will be executed, creates the contract and then calls it in the same tx
+ # Entry code that will be executed, creates the contract and then calls it
+ # in the same tx
entry_code = (
- # Initcode is already deployed at `initcode_copy_from_address`, so just copy it
+ # Initcode is already deployed at `initcode_copy_from_address`, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -252,8 +259,8 @@ def test_create_selfdestruct_same_tx(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)):
entry_code += Op.MSTORE(0, sendall_recipient)
@@ -276,8 +283,9 @@ def test_create_selfdestruct_same_tx(
if sendall_recipient != selfdestruct_contract_address:
sendall_final_balances[sendall_recipient] += selfdestruct_contract_current_balance
- # Self-destructing contract must always have zero balance after the call because the
- # self-destruct always happens in the same transaction in this test
+ # Self-destructing contract must always have zero balance after the
+ # call because the self-destruct always happens in the same transaction
+ # in this test
selfdestruct_contract_current_balance = 0
entry_code += Op.SSTORE(
@@ -296,8 +304,8 @@ def test_create_selfdestruct_same_tx(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_contract_initcode), 32), 1)
tx = Transaction(
@@ -336,7 +344,8 @@ def test_self_destructing_initcode(
selfdestruct_code: Bytecode,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
- call_times: int, # Number of times to call the self-destructing contract in the same tx
+ call_times: int, # Number of times to call the self-destructing contract
+ # in the same tx
selfdestruct_contract_initial_balance: int,
):
"""
@@ -347,10 +356,12 @@ def test_self_destructing_initcode(
Test using:
- Different initial balances for the self-destructing contract
- Different opcodes: CREATE, CREATE2
- - Different number of calls to the self-destructing contract in the same tx
+ - Different number of calls to the self-destructing contract in
+ the same tx
"""
initcode_copy_from_address = pre.deploy_contract(selfdestruct_code)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
entry_code_storage = Storage()
sendall_amount = 0
@@ -364,9 +375,11 @@ def test_self_destructing_initcode(
opcode=create_opcode,
)
- # Entry code that will be executed, creates the contract and then calls it in the same tx
+ # Entry code that will be executed, creates the contract and then calls it
+ # in the same tx
entry_code = (
- # Initcode is already deployed at `initcode_copy_from_address`, so just copy it
+ # Initcode is already deployed at `initcode_copy_from_address`, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -391,8 +404,8 @@ def test_self_destructing_initcode(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i in range(call_times):
entry_code += Op.SSTORE(
@@ -414,8 +427,8 @@ def test_self_destructing_initcode(
Op.BALANCE(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_code), 32), 1)
if selfdestruct_contract_initial_balance > 0:
@@ -463,8 +476,8 @@ def test_self_destructing_initcode_create_tx(
Behavior should be the same before and after EIP-6780.
Test using:
- - Different initial balances for the self-destructing contract
- - Different transaction value amounts
+ - Different initial balances for the self-destructing contract
+ - Different transaction value amounts
"""
tx = Transaction(
sender=sender,
@@ -476,7 +489,8 @@ def test_self_destructing_initcode_create_tx(
selfdestruct_contract_address = tx.created_contract
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
sendall_amount = selfdestruct_contract_initial_balance + tx_value
post: Dict[Address, Account] = {
@@ -487,7 +501,8 @@ def test_self_destructing_initcode_create_tx(
state_test(pre=pre, post=post, tx=tx)
-@pytest.mark.parametrize("create_opcode", [Op.CREATE2]) # Can only recreate using CREATE2
+# Can only recreate using CREATE2
+@pytest.mark.parametrize("create_opcode", [Op.CREATE2])
@pytest.mark.parametrize(
"sendall_recipient_addresses",
[
@@ -514,18 +529,20 @@ def test_recreate_self_destructed_contract_different_txs(
selfdestruct_contract_initial_balance: int,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
- recreate_times: int, # Number of times to recreate the contract in different transactions
- call_times: int, # Number of times to call the self-destructing contract in the same tx
+ # Number of times to recreate the contract in different transactions
+ recreate_times: int,
+ # Number of times to call the self-destructing contract in the same tx
+ call_times: int,
):
"""
- Test that a contract can be recreated after it has self-destructed, over the lapse
- of multiple transactions.
+ Test that a contract can be recreated after it has self-destructed, over
+ the lapse of multiple transactions.
Behavior should be the same before and after EIP-6780.
Test using:
- - Different initial balances for the self-destructing contract
- - Contract creating opcodes that are not CREATE
+ - Different initial balances for the self-destructing contract
+ - Contract creating opcodes that are not CREATE
"""
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(selfdestruct_contract_initcode)
@@ -538,7 +555,8 @@ def test_recreate_self_destructed_contract_different_txs(
# Entry code that will be executed, creates the contract and then calls it
entry_code = (
- # Initcode is already deployed at initcode_copy_from_address, so just copy it
+ # Initcode is already deployed at initcode_copy_from_address, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -663,15 +681,17 @@ def test_selfdestruct_pre_existing(
call_times: int,
):
"""
- Test calling a previously created account that contains a selfdestruct, and verify its balance
- is sent to the destination address.
+ Test calling a previously created account that contains a selfdestruct, and
+ verify its balance is sent to the destination address.
- After EIP-6780, the balance should be sent to the send-all recipient address, similar to
- the behavior before the EIP, but the account is not deleted.
+ After EIP-6780, the balance should be sent to the send-all recipient
+ address, similar to the behavior before the EIP, but the account is not
+ deleted.
Test using:
- - Different send-all recipient addresses: single, multiple, including self
- - Different initial balances for the self-destructing contract
+ - Different send-all recipient addresses: single, multiple,
+ including self
+ - Different initial balances for the self-destructing contract
"""
selfdestruct_contract_address = pre.deploy_contract(
selfdestruct_code, balance=selfdestruct_contract_initial_balance
@@ -688,12 +708,12 @@ def test_selfdestruct_pre_existing(
)
selfdestruct_contract_current_balance = selfdestruct_contract_initial_balance
- # Entry code in this case will simply call the pre-existing self-destructing contract,
- # as many times as required
+ # Entry code in this case will simply call the pre-existing self-
+ # destructing contract, as many times as required
entry_code = Bytecode()
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)):
entry_code += Op.MSTORE(0, sendall_recipient)
@@ -716,8 +736,9 @@ def test_selfdestruct_pre_existing(
if sendall_recipient != selfdestruct_contract_address:
sendall_final_balances[sendall_recipient] += selfdestruct_contract_current_balance
- # Balance is only kept by the self-destructing contract if we are sending to self and the
- # EIP is activated, otherwise the balance is destroyed
+ # Balance is only kept by the self-destructing contract if we are
+ # sending to self and the EIP is activated, otherwise the balance is
+ # destroyed
if sendall_recipient != selfdestruct_contract_address or not eip_enabled:
selfdestruct_contract_current_balance = 0
@@ -737,8 +758,8 @@ def test_selfdestruct_pre_existing(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(32, 1)
tx = Transaction(
@@ -787,8 +808,9 @@ def test_selfdestruct_created_same_block_different_tx(
call_times: int,
):
"""
- Test that if an account created in the same block that contains a selfdestruct is
- called, its balance is sent to the send-all address, but the account is not deleted.
+ Test that if an account created in the same block that contains a
+ selfdestruct is called, its balance is sent to the send-all address, but
+ the account is not deleted.
"""
selfdestruct_code = selfdestruct_code_preset(
sendall_recipient_addresses=sendall_recipient_addresses,
@@ -800,11 +822,11 @@ def test_selfdestruct_created_same_block_different_tx(
sendall_amount = selfdestruct_contract_initial_balance
entry_code = Bytecode()
- # Entry code in this case will simply call the pre-existing self-destructing contract,
- # as many times as required
+ # Entry code in this case will simply call the pre-existing self-
+ # destructing contract, as many times as required
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i in range(call_times):
entry_code += Op.SSTORE(
@@ -838,8 +860,8 @@ def test_selfdestruct_created_same_block_different_tx(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(32, 1)
post: Dict[Address, Account] = {
@@ -890,15 +912,17 @@ def test_calling_from_new_contract_to_pre_existing_contract(
selfdestruct_contract_initial_balance: int,
):
"""
- Test that if an account created in the current transaction delegate-call a previously created
- account that executes self-destruct, the calling account is deleted.
+ Test that if an account created in the current transaction delegate-call a
+ previously created account that executes self-destruct, the calling account
+ is deleted.
"""
pre_existing_selfdestruct_address = pre.deploy_contract(
selfdestruct_code_preset(
sendall_recipient_addresses=sendall_recipient_addresses,
),
)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
entry_code_storage = Storage()
sendall_amount = 0
@@ -915,9 +939,11 @@ def test_calling_from_new_contract_to_pre_existing_contract(
# Bytecode used to create the contract, can be CREATE or CREATE2
create_bytecode = create_opcode(size=len(selfdestruct_contract_initcode))
- # Entry code that will be executed, creates the contract and then calls it in the same tx
+ # Entry code that will be executed, creates the contract and then calls it
+ # in the same tx
entry_code = (
- # Initcode is already deployed at `initcode_copy_from_address`, so just copy it
+ # Initcode is already deployed at `initcode_copy_from_address`, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -942,8 +968,8 @@ def test_calling_from_new_contract_to_pre_existing_contract(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i in range(call_times):
entry_code += Op.SSTORE(
@@ -977,8 +1003,8 @@ def test_calling_from_new_contract_to_pre_existing_contract(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_contract_initcode), 32), 1)
if selfdestruct_contract_initial_balance > 0:
@@ -1025,9 +1051,9 @@ def test_calling_from_pre_existing_contract_to_new_contract(
pre_existing_contract_initial_balance: int,
):
"""
- Test that if an account created in the current transaction contains a self-destruct and is
- delegate-called by an account created before the current transaction, the calling account
- is not deleted.
+ Test that if an account created in the current transaction contains a
+ self-destruct and is delegate-called by an account created before the
+ current transaction, the calling account is not deleted.
"""
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(
@@ -1051,13 +1077,16 @@ def test_calling_from_pre_existing_contract_to_new_contract(
balance=pre_existing_contract_initial_balance,
)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
entry_code_storage = Storage()
sendall_amount = pre_existing_contract_initial_balance
- # Entry code that will be executed, creates the contract and then calls it in the same tx
+ # Entry code that will be executed, creates the contract and then calls it
+ # in the same tx
entry_code = (
- # Initcode is already deployed at `initcode_copy_from_address`, so just copy it
+ # Initcode is already deployed at `initcode_copy_from_address`, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -1085,8 +1114,9 @@ def test_calling_from_pre_existing_contract_to_new_contract(
Op.EXTCODEHASH(caller_address),
)
- # Now instead of calling the newly created contract directly, we delegate call to it
- # from a pre-existing contract, and the contract must not self-destruct
+ # Now instead of calling the newly created contract directly, we delegate
+ # call to it from a pre-existing contract, and the contract must not self-
+ # destruct
entry_code_balance = selfdestruct_contract_initial_balance
for i in range(call_times):
entry_code += Op.SSTORE(
@@ -1120,8 +1150,8 @@ def test_calling_from_pre_existing_contract_to_new_contract(
Op.EXTCODEHASH(caller_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_contract_initcode), 32), 1)
tx = Transaction(
@@ -1177,8 +1207,9 @@ def test_create_selfdestruct_same_tx_increased_nonce(
selfdestruct_contract_initial_balance: int,
):
"""
- Verify that a contract can self-destruct if it was created in the same transaction, even when
- its nonce has been increased due to contract creation.
+ Verify that a contract can self-destruct if it was created in the same
+ transaction, even when its nonce has been increased due to contract
+ creation.
"""
initcode = Op.RETURN(0, 1)
selfdestruct_pre_bytecode = Op.MSTORE(0, Op.PUSH32(bytes(initcode))) + Op.POP(
@@ -1196,7 +1227,8 @@ def test_create_selfdestruct_same_tx_increased_nonce(
)
if selfdestruct_contract_initial_balance > 0:
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
- # Our entry point is an initcode that in turn creates a self-destructing contract
+ # Our entry point is an initcode that in turn creates a self-destructing
+ # contract
entry_code_storage = Storage()
# Create a dict to record the expected final balances
@@ -1208,9 +1240,11 @@ def test_create_selfdestruct_same_tx_increased_nonce(
# Bytecode used to create the contract, can be CREATE or CREATE2
create_bytecode = create_opcode(size=len(selfdestruct_contract_initcode))
- # Entry code that will be executed, creates the contract and then calls it in the same tx
+ # Entry code that will be executed, creates the contract and then calls it
+ # in the same tx
entry_code = (
- # Initcode is already deployed at `initcode_copy_from_address`, so just copy it
+ # Initcode is already deployed at `initcode_copy_from_address`, so just
+ # copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
@@ -1235,8 +1269,8 @@ def test_create_selfdestruct_same_tx_increased_nonce(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Call the self-destructing contract multiple times as required, increasing the wei sent each
- # time
+ # Call the self-destructing contract multiple times as required, increasing
+ # the wei sent each time
entry_code_balance = 0
for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)):
entry_code += Op.MSTORE(0, sendall_recipient)
@@ -1259,8 +1293,9 @@ def test_create_selfdestruct_same_tx_increased_nonce(
if sendall_recipient != selfdestruct_contract_address:
sendall_final_balances[sendall_recipient] += selfdestruct_contract_current_balance
- # Self-destructing contract must always have zero balance after the call because the
- # self-destruct always happens in the same transaction in this test
+ # Self-destructing contract must always have zero balance after the
+ # call because the self-destruct always happens in the same transaction
+ # in this test
selfdestruct_contract_current_balance = 0
entry_code += Op.SSTORE(
@@ -1279,8 +1314,8 @@ def test_create_selfdestruct_same_tx_increased_nonce(
Op.EXTCODEHASH(selfdestruct_contract_address),
)
- # Lastly return zero so the entry point contract is created and we can retain the stored
- # values for verification.
+ # Lastly return zero so the entry point contract is created and we can
+ # retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_contract_initcode), 32), 1)
tx = Transaction(
@@ -1307,7 +1342,8 @@ def test_create_selfdestruct_same_tx_increased_nonce(
for address, balance in sendall_final_balances.items():
post[address] = Account(balance=balance, storage={0: 1})
- # Check the new contracts created from the self-destructing contract were correctly created.
+ # Check the new contracts created from the self-destructing contract were
+ # correctly created.
for address in [
compute_create_address(address=selfdestruct_contract_address, nonce=i + 1)
for i in range(call_times)
diff --git a/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py b/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py
index c13dfb85266..e42f98c6d1a 100644
--- a/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py
+++ b/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py
@@ -1,4 +1,4 @@
-"""tests for selfdestruct interaction with revert."""
+"""Tests for selfdestruct interaction with revert."""
from typing import Dict
@@ -64,9 +64,10 @@ def recursive_revert_contract_code(
) -> Bytecode:
"""
Contract code that:
- Given selfdestructable contract A, transfer value to A and call A.selfdestruct.
- Then, recurse into a new call which transfers value to A,
- call A.selfdestruct, and reverts.
+ Given selfdestructable contract A, transfer value to A
+ and call A.selfdestruct.
+ Then, recurse into a new call which transfers value to A,
+ call A.selfdestruct, and reverts.
"""
# Common prefix for all three cases:
# case 1: selfdestruct_on_outer_call=1
@@ -244,7 +245,9 @@ def selfdestruct_with_transfer_contract_address(
selfdestruct_with_transfer_contract_code: Bytecode,
same_tx: bool,
) -> Address:
- """Contract address for contract that can selfdestruct and receive value."""
+ """
+ Contract address for contract that can selfdestruct and receive value.
+ """
if same_tx:
return compute_create_address(address=entry_code_address, nonce=1)
# We need to deploy the contract before.
@@ -303,7 +306,9 @@ def selfdestruct_with_transfer_initcode_copy_from_address(
pre: Alloc,
selfdestruct_with_transfer_contract_initcode: Bytecode,
) -> Address:
- """Address of a pre-existing contract we use to simply copy initcode from."""
+ """
+ Address of a pre-existing contract we use to simply copy initcode from.
+ """
addr = pre.deploy_contract(selfdestruct_with_transfer_contract_initcode)
return addr
@@ -340,11 +345,13 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200
):
"""
Given:
- Contract A which has methods to receive balance and selfdestruct, and was created in current tx
+ Contract A which has methods to receive balance and selfdestruct,
+ and was created in current tx.
+
Test the following call sequence:
- Transfer value to A and call A.selfdestruct.
- Recurse into a new call from transfers value to A, calls A.selfdestruct, and reverts.
- """ # noqa: E501
+ Transfer value to A and call A.selfdestruct. Recurse into a new call
+ from transfers value to A, calls A.selfdestruct, and reverts.
+ """
entry_code = Op.EXTCODECOPY(
selfdestruct_with_transfer_initcode_copy_from_address,
0,
@@ -357,7 +364,8 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200
Op.CREATE(
0,
0,
- len(bytes(selfdestruct_with_transfer_contract_initcode)), # Value # Offset
+ # Value Offset
+ len(bytes(selfdestruct_with_transfer_contract_initcode)),
),
)
@@ -400,7 +408,8 @@ def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
- # 2 value transfers (1 in outer call, 1 in reverted inner call)
+ # 2 value transfers (1 in outer call, 1 in reverted inner
+ # call)
0: 1, # type: ignore
# 1 selfdestruct in reverted inner call
1: 0, # type: ignore
@@ -454,8 +463,8 @@ def test_selfdestruct_not_created_in_same_tx_with_revert(
recursive_revert_contract_code: Bytecode,
):
"""
- Same test as selfdestruct_created_in_same_tx_with_revert except selfdestructable contract
- is pre-existing.
+ Same test as selfdestruct_created_in_same_tx_with_revert except
+ selfdestructable contract is pre-existing.
"""
entry_code = Op.CALL(
Op.GASLIMIT(),
@@ -477,7 +486,8 @@ def test_selfdestruct_not_created_in_same_tx_with_revert(
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
- # 2 value transfers: 1 in outer call, 1 in reverted inner call
+ # 2 value transfers: 1 in outer call, 1 in reverted inner
+ # call
0: 1, # type: ignore
# 1 selfdestruct in reverted inner call
1: 1, # type: ignore
@@ -493,9 +503,11 @@ def test_selfdestruct_not_created_in_same_tx_with_revert(
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
- # 2 value transfers: 1 in outer call, 1 in reverted inner call
+ # 2 value transfers:
+ # 1 in outer call, 1 in reverted inner call
0: 1, # type: ignore
- # 2 selfdestructs: 1 in outer call, 1 in reverted inner call # noqa SC100
+ # 2 selfdestructs:
+ # 1 in outer call, 1 in reverted inner call
1: 0, # type: ignore
}
),
diff --git a/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py b/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py
index 6f1f170ed80..8fc048309b6 100644
--- a/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py
+++ b/tests/cancun/eip7516_blobgasfee/test_blobgasfee_opcode.py
@@ -1,8 +1,8 @@
"""
-abstract: Tests [EIP-7516: BLOBBASEFEE opcode](https://eips.ethereum.org/EIPS/eip-7516)
- Test BLOBGASFEE opcode [EIP-7516: BLOBBASEFEE opcode](https://eips.ethereum.org/EIPS/eip-7516).
+BLOBBASEFEE opcode tests.
-""" # noqa: E501
+Tests for [EIP-7516: BLOBBASEFEE opcode](https://eips.ethereum.org/EIPS/eip-7516).
+"""
from itertools import count
@@ -57,13 +57,19 @@ def caller_code(
@pytest.fixture
def caller_pre_storage() -> Storage:
- """Storage of the account containing the bytecode that calls the test contract."""
+ """
+ Storage of the account containing the bytecode that calls the test
+ contract.
+ """
return Storage()
@pytest.fixture
def caller_address(pre: Alloc, caller_code: Bytecode, caller_pre_storage) -> Address:
- """Address of the account containing the bytecode that calls the test contract."""
+ """
+ Address of the account containing the bytecode that calls the test
+ contract.
+ """
return pre.deploy_contract(caller_code)
@@ -96,7 +102,10 @@ def test_blobbasefee_stack_overflow(
tx: Transaction,
call_fails: bool,
):
- """Tests that the BLOBBASEFEE opcode produces a stack overflow by using it repeatedly."""
+ """
+ Tests that the BLOBBASEFEE opcode produces a stack overflow by using it
+ repeatedly.
+ """
post = {
caller_address: Account(
storage={1: 0 if call_fails else 1},
@@ -155,7 +164,10 @@ def test_blobbasefee_before_fork(
callee_address: Address,
tx: Transaction,
):
- """Tests that the BLOBBASEFEE opcode results on exception when called before the fork."""
+ """
+ Tests that the BLOBBASEFEE opcode results on exception when called before
+ the fork.
+ """
# Fork happens at timestamp 15_000
timestamp = 7_500
post = {
@@ -193,8 +205,8 @@ def test_blobbasefee_during_fork(
tx: Transaction,
):
"""
- Tests that the BLOBBASEFEE opcode results on exception when called before the fork and
- succeeds when called after the fork.
+ Tests that the BLOBBASEFEE opcode results on exception when called before
+ the fork and succeeds when called after the fork.
"""
code_caller_post_storage = Storage()
diff --git a/tests/constantinople/eip1014_create2/__init__.py b/tests/constantinople/eip1014_create2/__init__.py
index c63c990d8b0..724704b1812 100644
--- a/tests/constantinople/eip1014_create2/__init__.py
+++ b/tests/constantinople/eip1014_create2/__init__.py
@@ -1,5 +1,3 @@
"""
-abstract: Test [EIP-1014: Skinny CREATE2](https://eips.ethereum.org/EIPS/eip-1014).
-
- Tests for [EIP-1014: Skinny CREATE2](https://eips.ethereum.org/EIPS/eip-1014).
+Tests for [EIP-1014: Skinny CREATE2](https://eips.ethereum.org/EIPS/eip-1014).
"""
diff --git a/tests/constantinople/eip1014_create2/test_create_returndata.py b/tests/constantinople/eip1014_create2/test_create_returndata.py
index 3e64af7dae6..99b6690d053 100644
--- a/tests/constantinople/eip1014_create2/test_create_returndata.py
+++ b/tests/constantinople/eip1014_create2/test_create_returndata.py
@@ -1,7 +1,7 @@
"""
-Return data management around create2
-Port call_outsize_then_create2_successful_then_returndatasizeFiller.json test
-Port call_then_create2_successful_then_returndatasizeFiller.json test.
+Return data management around create2 Port
+call_outsize_then_create2_successful_then_returndatasizeFiller.json test Port
+call_then_create2_successful_then_returndatasizeFiller.json test.
"""
import pytest
@@ -36,7 +36,10 @@ def test_create2_return_data(
pre: Alloc,
state_test: StateTestFiller,
):
- """Validate that create2 return data does not interfere with previously existing memory."""
+ """
+ Validate that create2 return data does not interfere with previously
+ existing memory.
+ """
# Storage vars
slot_returndatasize_before_create = 0
slot_returndatasize_after_create = 1
@@ -104,7 +107,8 @@ def test_create2_return_data(
slot_returndatacopy_before_create: expected_returndatacopy,
slot_returndatacopy_before_create_2: 0,
#
- # the actual bytes returned by returndatacopy opcode after create
+ # the actual bytes returned by returndatacopy opcode after
+ # create
slot_returndatacopy_after_create: (
return_data_in_create if return_type_in_create == Op.REVERT else 0
),
@@ -122,7 +126,8 @@ def test_create2_return_data(
else keccak256(int.to_bytes(return_data_in_create, 32, byteorder="big"))
),
#
- # check that create 2 didn't mess up with initial memory space declared for return
+ # check that create 2 didn't mess up with initial memory space
+ # declared for return
slot_begin_memory_after_create: expected_returndatacopy,
} # type: ignore
)
diff --git a/tests/constantinople/eip1014_create2/test_recreate.py b/tests/constantinople/eip1014_create2/test_recreate.py
index 702eca9bb77..116bbeb4527 100644
--- a/tests/constantinople/eip1014_create2/test_recreate.py
+++ b/tests/constantinople/eip1014_create2/test_recreate.py
@@ -30,8 +30,8 @@ def test_recreate(
recreate_on_separate_block: bool,
):
"""
- Test that the storage is cleared when a contract is first destructed then re-created using
- CREATE2.
+ Test that the storage is cleared when a contract is first destructed then
+ re-created using CREATE2.
"""
creator_contract_code = Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE) + Op.CREATE2(
0, 0, Op.CALLDATASIZE, 0
diff --git a/tests/constantinople/eip145_bitwise_shift/__init__.py b/tests/constantinople/eip145_bitwise_shift/__init__.py
index 5fc43725bb7..bdca58cbe60 100644
--- a/tests/constantinople/eip145_bitwise_shift/__init__.py
+++ b/tests/constantinople/eip145_bitwise_shift/__init__.py
@@ -1,5 +1,3 @@
"""
-abstract: Test [EIP-145: Bitwise shifting instructions in EVM](https://eips.ethereum.org/EIPS/eip-145).
-
- Tests for [EIP-145: Bitwise shifting instructions in EVM](https://eips.ethereum.org/EIPS/eip-145).
+Test [EIP-145: Bitwise shifting instructions in EVM](https://eips.ethereum.org/EIPS/eip-145).
"""
diff --git a/tests/constantinople/eip145_bitwise_shift/spec.py b/tests/constantinople/eip145_bitwise_shift/spec.py
index 5b272a9e22f..b8649ae2adf 100644
--- a/tests/constantinople/eip145_bitwise_shift/spec.py
+++ b/tests/constantinople/eip145_bitwise_shift/spec.py
@@ -21,8 +21,8 @@ class Spec:
https://eips.ethereum.org/EIPS/eip-145.
"""
- # Below are GPT o4-mini-high implementation of shift functions
- # It can contain bugs, treat it with caution and refer to EVM implementations
+ # Below are GPT o4-mini-high implementation of shift functions It can
+ # contain bugs, treat it with caution and refer to EVM implementations
@staticmethod
def sar(shift: int, value: int) -> int:
"""
diff --git a/tests/frontier/create/test_create_one_byte.py b/tests/frontier/create/test_create_one_byte.py
index 0cd90521e11..2c3a1312f49 100644
--- a/tests/frontier/create/test_create_one_byte.py
+++ b/tests/frontier/create/test_create_one_byte.py
@@ -1,6 +1,6 @@
"""
-The test calls CREATE in a loop deploying 1-byte contracts with all possible byte values,
-records in storage the values that failed to deploy.
+The test calls CREATE in a loop deploying 1-byte contracts with all possible
+byte values, records in storage the values that failed to deploy.
"""
import pytest
diff --git a/tests/frontier/identity_precompile/common.py b/tests/frontier/identity_precompile/common.py
index 6aef16f982c..be7dc17c42f 100644
--- a/tests/frontier/identity_precompile/common.py
+++ b/tests/frontier/identity_precompile/common.py
@@ -39,17 +39,18 @@ def generate_identity_call_bytecode(
call_succeeds: bool,
) -> Bytecode:
"""
- Generate bytecode for calling the identity precompile with given memory values.
+ Generate bytecode for calling the identity precompile with given memory
+ values.
Args:
- storage (Storage): The storage object to use for storing values.
- call_type (Op): The type of call opcode (CALL or CALLCODE).
- memory_values (Tuple[int, ...]): Values to store in memory before the call.
- call_args (CallArgs): Arguments for the CALL opcode.
- call_succeeds (bool): Whether the call should succeed or not.
+ storage (Storage): The storage object to use for storing values.
+ call_type (Op): The type of call opcode (CALL or CALLCODE).
+ memory_values(Tuple[int, ...]): Values to store in memory before
+ the call.
+ call_args(CallArgs): Arguments for the CALL opcode.
+ call_succeeds (bool): Whether the call should succeed or not.
- Returns:
- Bytecode: The generated bytecode for the identity precompile call.
+ Returns: Bytecode: The generated bytecode for the identity precompile call.
"""
code = Bytecode()
@@ -65,7 +66,8 @@ def generate_identity_call_bytecode(
if mstore_count > i + 1:
mstore_offset += 0x20
- # Call the identity precompile, then check that the last value in memory has not changed
+ # Call the identity precompile, then check that the last value in memory
+ # has not changed
code += (
Op.SSTORE(
storage.store_next(call_succeeds),
diff --git a/tests/frontier/identity_precompile/test_identity.py b/tests/frontier/identity_precompile/test_identity.py
index dc3222cfaf8..653c763564c 100644
--- a/tests/frontier/identity_precompile/test_identity.py
+++ b/tests/frontier/identity_precompile/test_identity.py
@@ -1,4 +1,4 @@
-"""abstract: Test identity precompile output size."""
+"""Test identity precompile output size."""
from typing import Tuple
@@ -118,7 +118,10 @@ def test_call_identity_precompile(
tx_gas_limit: int,
contract_balance: int,
):
- """Test identity precompile RETURNDATA is sized correctly based on the input size."""
+ """
+ Test identity precompile RETURNDATA is sized correctly based on the input
+ size.
+ """
env = Environment()
storage = Storage()
diff --git a/tests/frontier/identity_precompile/test_identity_returndatasize.py b/tests/frontier/identity_precompile/test_identity_returndatasize.py
index 45538b0c944..6e352664dc2 100644
--- a/tests/frontier/identity_precompile/test_identity_returndatasize.py
+++ b/tests/frontier/identity_precompile/test_identity_returndatasize.py
@@ -1,4 +1,4 @@
-"""abstract: Test identity precompile output size."""
+"""Test identity precompile output size."""
import pytest
@@ -37,7 +37,10 @@ def test_identity_precompile_returndata(
output_size: int,
expected_returndatasize: int,
):
- """Test identity precompile RETURNDATA is sized correctly based on the input size."""
+ """
+ Test identity precompile RETURNDATA is sized correctly based on the input
+ size.
+ """
env = Environment()
storage = Storage()
diff --git a/tests/frontier/opcodes/test_all_opcodes.py b/tests/frontier/opcodes/test_all_opcodes.py
index 458c332dd30..1d0301c7cef 100644
--- a/tests/frontier/opcodes/test_all_opcodes.py
+++ b/tests/frontier/opcodes/test_all_opcodes.py
@@ -1,6 +1,6 @@
"""
-Call every possible opcode and test that the subcall is successful
-if the opcode is supported by the fork supports and fails otherwise.
+Call every possible opcode and test that the subcall is successful if the
+opcode is supported by the fork supports and fails otherwise.
"""
from typing import Dict
@@ -56,9 +56,9 @@ def prepare_suffix(opcode: Opcode) -> Bytecode:
@pytest.mark.valid_from("Frontier")
def test_all_opcodes(state_test: StateTestFiller, pre: Alloc, fork: Fork):
"""
- Test each possible opcode on the fork with a single contract that
- calls each opcode in succession. Check that each subcall passes
- if the opcode is supported and fails otherwise.
+ Test each possible opcode on the fork with a single contract that calls
+ each opcode in succession. Check that each subcall passes if the opcode is
+ supported and fails otherwise.
"""
code_worked = 1000
@@ -75,8 +75,8 @@ def test_all_opcodes(state_test: StateTestFiller, pre: Alloc, fork: Fork):
code=sum(
Op.SSTORE(
Op.PUSH1(opcode.int()),
- # Limit gas to limit the gas consumed by the exceptional aborts in each
- # subcall that uses an undefined opcode.
+ # Limit gas to limit the gas consumed by the exceptional aborts
+ # in each subcall that uses an undefined opcode.
Op.CALL(35_000, opcode_address, 0, 0, 0, 0, 0),
)
for opcode, opcode_address in code_contract.items()
diff --git a/tests/frontier/opcodes/test_call.py b/tests/frontier/opcodes/test_call.py
index e7feb662db5..e5fead6786c 100644
--- a/tests/frontier/opcodes/test_call.py
+++ b/tests/frontier/opcodes/test_call.py
@@ -14,8 +14,9 @@
from ethereum_test_vm import Opcodes as Op
-# TODO: There's an issue with gas definitions on forks previous to Berlin, remove this when fixed.
-# https://github.com/ethereum/execution-spec-tests/pull/1952#discussion_r2237634275
+# TODO: There's an issue with gas definitions on forks previous to Berlin,
+# remove this when fixed. https://github.com/ethereum/execution-spec-
+# tests/pull/1952#discussion_r2237634275
@pytest.mark.valid_from("Berlin")
def test_call_large_offset_mstore(
state_test: StateTestFiller,
@@ -23,10 +24,11 @@ def test_call_large_offset_mstore(
fork: Fork,
):
"""
- CALL with ret_offset larger than memory size and ret_size zero
- Then do an MSTORE in that offset to see if memory was expanded in CALL.
+ CALL with ret_offset larger than memory size and ret_size zero Then do an
+ MSTORE in that offset to see if memory was expanded in CALL.
- This is for bug in a faulty EVM implementation where memory is expanded when it shouldn't.
+ This is for bug in a faulty EVM implementation where memory is expanded
+ when it shouldn't.
"""
sender = pre.fund_eoa()
@@ -35,14 +37,16 @@ def test_call_large_offset_mstore(
call_measure = CodeGasMeasure(
code=Op.CALL(gas=0, ret_offset=mem_offset, ret_size=0),
- overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs), # Cost of pushing CALL args
+ # Cost of pushing CALL args
+ overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs),
extra_stack_items=1, # Because CALL pushes 1 item to the stack
sstore_key=0,
stop=False, # Because it's the first CodeGasMeasure
)
mstore_measure = CodeGasMeasure(
code=Op.MSTORE(offset=mem_offset, value=1),
- overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs), # Cost of pushing MSTORE args
+ # Cost of pushing MSTORE args
+ overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs),
extra_stack_items=0,
sstore_key=1,
)
@@ -77,8 +81,9 @@ def test_call_large_offset_mstore(
)
-# TODO: There's an issue with gas definitions on forks previous to Berlin, remove this when fixed.
-# https://github.com/ethereum/execution-spec-tests/pull/1952#discussion_r2237634275
+# TODO: There's an issue with gas definitions on forks previous to Berlin,
+# remove this when fixed. https://github.com/ethereum/execution-spec-
+# tests/pull/1952#discussion_r2237634275
@pytest.mark.valid_from("Berlin")
def test_call_memory_expands_on_early_revert(
state_test: StateTestFiller,
@@ -87,27 +92,33 @@ def test_call_memory_expands_on_early_revert(
):
"""
When CALL reverts early (e.g. because of not enough balance by the sender),
- memory should be expanded anyway.
- We check this with an MSTORE.
+ memory should be expanded anyway. We check this with an MSTORE.
- This is for a bug in an EVM implementation where memory is expanded after executing a CALL, but
- not when an early revert happens.
+ This is for a bug in an EVM implementation where memory is expanded after
+ executing a CALL, but not when an early revert happens.
"""
sender = pre.fund_eoa()
gsc = fork.gas_costs()
- ret_size = 128 # arbitrary number, greater than memory size to trigger an expansion
+ # arbitrary number, greater than memory size to trigger an expansion
+ ret_size = 128
call_measure = CodeGasMeasure(
- code=Op.CALL(gas=0, value=100, ret_size=ret_size), # CALL with value
- overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs), # Cost of pushing CALL args
- extra_stack_items=1, # Because CALL pushes 1 item to the stack
+ # CALL with value
+ code=Op.CALL(gas=0, value=100, ret_size=ret_size),
+ # Cost of pushing CALL args
+ overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs),
+ # Because CALL pushes 1 item to the stack
+ extra_stack_items=1,
sstore_key=0,
- stop=False, # Because it's the first CodeGasMeasure
+ # Because it's the first CodeGasMeasure
+ stop=False,
)
mstore_measure = CodeGasMeasure(
- code=Op.MSTORE(offset=ret_size // 2, value=1), # Low offset for not expanding memory
- overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs), # Cost of pushing MSTORE args
+ # Low offset for not expanding memory
+ code=Op.MSTORE(offset=ret_size // 2, value=1),
+ # Cost of pushing MSTORE args
+ overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs),
extra_stack_items=0,
sstore_key=1,
)
@@ -123,7 +134,8 @@ def test_call_memory_expands_on_early_revert(
)
memory_expansion_gas_calc = fork.memory_expansion_gas_calculator()
- # call cost: address_access_cost + new_acc_cost + memory_expansion_cost + value - stipend
+ # call cost:
+ # address_access_cost+new_acc_cost+memory_expansion_cost+value-stipend
call_cost = (
gsc.G_COLD_ACCOUNT_ACCESS
+ gsc.G_NEW_ACCOUNT
@@ -132,7 +144,8 @@ def test_call_memory_expands_on_early_revert(
- gsc.G_CALL_STIPEND
)
- # mstore cost: base cost. No memory expansion cost needed, it was expanded on CALL.
+ # mstore cost: base cost. No memory expansion cost needed, it was expanded
+ # on CALL.
mstore_cost = gsc.G_MEMORY
state_test(
env=Environment(),
@@ -149,8 +162,9 @@ def test_call_memory_expands_on_early_revert(
)
-# TODO: There's an issue with gas definitions on forks previous to Berlin, remove this when fixed.
-# https://github.com/ethereum/execution-spec-tests/pull/1952#discussion_r2237634275
+# TODO: There's an issue with gas definitions on forks previous to Berlin,
+# remove this when fixed. https://github.com/ethereum/execution-spec-
+# tests/pull/1952#discussion_r2237634275
@pytest.mark.with_all_call_opcodes
@pytest.mark.valid_from("Berlin")
def test_call_large_args_offset_size_zero(
@@ -170,7 +184,8 @@ def test_call_large_args_offset_size_zero(
call_measure = CodeGasMeasure(
code=call_opcode(gas=0, args_offset=very_large_offset, args_size=0),
- overhead_cost=gsc.G_VERY_LOW * len(call_opcode.kwargs), # Cost of pushing xCALL args
+ # Cost of pushing xCALL args
+ overhead_cost=gsc.G_VERY_LOW * len(call_opcode.kwargs),
extra_stack_items=1, # Because xCALL pushes 1 item to the stack
sstore_key=0,
)
diff --git a/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py b/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py
index 740253dbb57..61ac377bacc 100644
--- a/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py
+++ b/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py
@@ -1,26 +1,33 @@
"""
-abstract: Tests the nested CALL/CALLCODE opcode gas consumption with a positive value transfer.
- This test is designed to investigate an issue identified in EthereumJS, as reported in:
- https://github.com/ethereumjs/ethereumjs-monorepo/issues/3194.
-
- The issue pertains to the incorrect gas calculation for CALL/CALLCODE operations with a
- positive value transfer, due to the pre-addition of the gas stipend (2300) to the currently
- available gas instead of adding it to the new call frame. This bug was specific to the case
- where insufficient gas was provided for the CALL/CALLCODE operation. Due to the pre-addition
- of the stipend to the currently available gas, the case for insufficient gas was not properly
- failing with an out-of-gas error.
-
- Test setup: Given two smart contract accounts, 0x0A (caller) and 0x0B (callee):
- 1) An arbitrary transaction calls into the contract 0x0A.
- 2) Contract 0x0A executes a CALL to contract 0x0B with a specific gas limit (X).
- 3) Contract 0x0B then attempts a CALL/CALLCODE to a non-existent contract 0x0C,
- with a positive value transfer (activating the gas stipend).
- 4) If the gas X provided by contract 0x0A to 0x0B is sufficient, contract 0x0B
- will push 0x01 onto the stack after returning to the call frame in 0x0A. Otherwise, it
- should push 0x00, indicating the insufficiency of gas X (for the bug in EthereumJS, the
- CALL/CALLCODE operation would return 0x01 due to the pre-addition of the gas stipend).
- 5) The resulting stack value is saved into contract 0x0A's storage, allowing us to
- verify whether the provided gas was sufficient or insufficient.
+Tests nested CALL/CALLCODE gas usage with positive value transfer.
+
+This test investigates an issue identified in EthereumJS, as reported in:
+https://github.com/ethereumjs/ethereumjs-monorepo/issues/3194.
+
+The issue pertains to the incorrect gas calculation for CALL/CALLCODE
+operations with a positive value transfer, due to the pre-addition of the
+gas stipend (2300) to the currently available gas instead of adding it to
+the new call frame. This bug was specific to the case where insufficient
+gas was provided for the CALL/CALLCODE operation. Due to the pre-addition
+of the stipend to the currently available gas, the case for insufficient
+gas was not properly failing with an out-of-gas error.
+
+Test setup:
+
+Given two smart contract accounts, 0x0A (caller) and 0x0B (callee):
+1. An arbitrary transaction calls into the contract 0x0A.
+2. Contract 0x0A executes a CALL to contract 0x0B with a specific gas limit
+(X).
+3. Contract 0x0B then attempts a CALL/CALLCODE to a non-existent contract
+0x0C, with a positive value transfer (activating the gas stipend).
+4. If the gas X provided by contract 0x0A to 0x0B is sufficient, contract
+0x0B will push 0x01 onto the stack after returning to the call frame in
+0x0A. Otherwise, it should push 0x00, indicating the insufficiency of
+gas X (for the bug in EthereumJS, the CALL/CALLCODE operation would
+return 0x01 due to the pre-addition of the gas stipend).
+5. The resulting stack value is saved into contract 0x0A's storage,
+allowing us to verify whether the provided gas was sufficient or
+insufficient.
"""
from typing import Dict
@@ -41,13 +48,15 @@
"""
PUSH opcode cost is 3, GAS opcode cost is 2.
-We need 6 PUSH's and one GAS to fill the stack for both CALL & CALLCODE, in the callee contract.
+We need 6 PUSH's and one GAS to fill the stack for both CALL & CALLCODE,
+in the callee contract.
"""
CALLEE_INIT_STACK_GAS = 6 * 3 + 2
"""
CALL gas breakdowns: (https://www.evm.codes/#f1)
-memory_exp_cost + code_exec_cost + address_access_cost + positive_value_cost + empty_account_cost
+memory_exp_cost + code_exec_cost + address_access_cost +
+positive_value_cost + empty_account_cost
= 0 + 0 + 2600 + 9000 + 25000 = 36600
"""
CALL_GAS = 36600
@@ -55,8 +64,8 @@
"""
CALLCODE gas breakdowns: (https://www.evm.codes/#f2)
-memory_exp_cost + code_exec_cost + address_access_cost + positive_value_cost
-= 0 + 0 + 2600 + 9000 = 11600
+memory_exp_cost + code_exec_cost + address_access_cost +
+positive_value_cost = 0 + 0 + 2600 + 9000 = 11600
"""
CALLCODE_GAS = 11600
CALLCODE_SUFFICIENT_GAS = CALLCODE_GAS + CALLEE_INIT_STACK_GAS
@@ -66,14 +75,14 @@
def callee_code(pre: Alloc, callee_opcode: Op) -> Bytecode:
"""
Code called by the caller contract:
- PUSH1 0x00 * 4
- PUSH1 0x01 <- for positive value transfer
- PUSH2 Contract.nonexistent
- GAS <- value doesn't matter
- CALL/CALLCODE.
+ PUSH1 0x00 * 4
+ PUSH1 0x01 <- for positive value transfer
+ PUSH2 Contract.nonexistent
+ GAS <- value doesn't matter
+ CALL/CALLCODE.
"""
- # The address needs to be empty and different for each execution of the fixture,
- # otherwise the calculations (empty_account_cost) are incorrect.
+ # The address needs to be empty and different for each execution of the
+ # fixture, otherwise the calculations (empty_account_cost) are incorrect.
return callee_opcode(Op.GAS(), pre.empty_account(), 1, 0, 0, 0, 0)
@@ -93,12 +102,12 @@ def callee_address(pre: Alloc, callee_code: Bytecode) -> Address:
def caller_code(caller_gas_limit: int, callee_address: Address) -> Bytecode:
"""
Code to CALL the callee contract:
- PUSH1 0x00 * 5
- PUSH2 Contract.callee
- PUSH2 caller_gas <- gas limit set for CALL to callee contract
- CALL
- PUSH1 0x00
- SSTORE.
+ PUSH1 0x00 * 5
+ PUSH2 Contract.callee
+ PUSH2 caller_gas <- gas limit set for CALL to callee contract
+ CALL
+ PUSH1 0x00
+ SSTORE.
"""
return Op.SSTORE(0, Op.CALL(caller_gas_limit, callee_address, 0, 0, 0, 0, 0))
@@ -107,12 +116,12 @@ def caller_code(caller_gas_limit: int, callee_address: Address) -> Bytecode:
def caller_address(pre: Alloc, caller_code: Bytecode) -> Address:
"""
Code to CALL the callee contract:
- PUSH1 0x00 * 5
- PUSH2 Contract.callee
- PUSH2 caller_gas <- gas limit set for CALL to callee contract
- CALL
- PUSH1 0x00
- SSTORE.
+ PUSH1 0x00 * 5
+ PUSH2 Contract.callee
+ PUSH2 caller_gas <- gas limit set for CALL to callee contract
+ CALL
+ PUSH1 0x00
+ SSTORE.
"""
return pre.deploy_contract(caller_code, balance=0x03)
@@ -151,5 +160,8 @@ def test_value_transfer_gas_calculation(
caller_tx: Transaction,
post: Dict[str, Account],
):
- """Tests the nested CALL/CALLCODE opcode gas consumption with a positive value transfer."""
+ """
+ Tests the nested CALL/CALLCODE opcode gas consumption with a positive value
+ transfer.
+ """
state_test(env=Environment(), pre=pre, post=post, tx=caller_tx)
diff --git a/tests/frontier/opcodes/test_calldataload.py b/tests/frontier/opcodes/test_calldataload.py
index 41f03f12847..9776e66e054 100644
--- a/tests/frontier/opcodes/test_calldataload.py
+++ b/tests/frontier/opcodes/test_calldataload.py
@@ -53,10 +53,15 @@ def test_calldataload(
Test `CALLDATALOAD` opcode.
Tests two scenarios:
- - calldata_source is "contract": CALLDATALOAD reads from calldata passed by another contract
- - calldata_source is "tx": CALLDATALOAD reads directly from transaction calldata
+ - calldata_source is "contract": CALLDATALOAD reads from calldata
+ passed by another contract
+ - calldata_source is "tx": CALLDATALOAD reads directly from
+ transaction calldata
- Based on https://github.com/ethereum/tests/blob/ae4791077e8fcf716136e70fe8392f1a1f1495fb/src/GeneralStateTestsFiller/VMTests/vmTests/calldatacopyFiller.yml
+ Based on
+ https://github.com/ethereum/tests/blob/
+ ae4791077e8fcf716136e70fe8392f1a1f1495fb/src/
+ GeneralStateTestsFiller/VMTests/vmTests/calldatacopyFiller.yml
"""
contract_address = pre.deploy_contract(
Op.SSTORE(0, Op.CALLDATALOAD(offset=calldata_offset)) + Op.STOP,
diff --git a/tests/frontier/opcodes/test_calldatasize.py b/tests/frontier/opcodes/test_calldatasize.py
index d06a7330da6..2106524015d 100644
--- a/tests/frontier/opcodes/test_calldatasize.py
+++ b/tests/frontier/opcodes/test_calldatasize.py
@@ -30,10 +30,15 @@ def test_calldatasize(
Test `CALLDATASIZE` opcode.
Tests two scenarios:
- - calldata_source is "contract": CALLDATASIZE reads from calldata passed by another contract
- - calldata_source is "tx": CALLDATASIZE reads directly from transaction calldata
+ - calldata_source is "contract": CALLDATASIZE reads from calldata
+ passed by another contract
+ - calldata_source is "tx": CALLDATASIZE reads directly from
+ transaction calldata
- Based on https://github.com/ethereum/tests/blob/81862e4848585a438d64f911a19b3825f0f4cd95/src/GeneralStateTestsFiller/VMTests/vmTests/calldatasizeFiller.yml
+ Based on
+ https://github.com/ethereum/tests/blob/
+ 81862e4848585a438d64f911a19b3825f0f4cd95/src/
+ GeneralStateTestsFiller/VMTests/vmTests/calldatasizeFiller.yml
"""
contract_address = pre.deploy_contract(Op.SSTORE(key=0x0, value=Op.CALLDATASIZE))
calldata = b"\x01" * args_size
diff --git a/tests/frontier/opcodes/test_dup.py b/tests/frontier/opcodes/test_dup.py
index a4d3d3e85f5..ee8cbd3c141 100644
--- a/tests/frontier/opcodes/test_dup.py
+++ b/tests/frontier/opcodes/test_dup.py
@@ -1,8 +1,4 @@
-"""
-abstract: Test DUP
- Test the DUP opcodes.
-
-"""
+"""Test DUP Test the DUP opcodes."""
import pytest
@@ -43,9 +39,13 @@ def test_dup(
"""
Test the DUP1-DUP16 opcodes.
- Note: Test case ported from [ethereum/tests](https://github.com/ethereum/tests)
- Test ported from [ethereum/tests/GeneralStateTests/VMTests/vmTests/dup.json](https://github.com/ethereum/tests/blob/v14.0/GeneralStateTests/VMTests/vmTests/dup.json) by Ori Pomerantz.
- """ # noqa: E501
+ Note: Test case ported from
+ [ethereum/tests](https://github.com/ethereum/tests).
+
+ Test ported from [ethereum/tests/GeneralStateTests/VMTests/
+ vmTests/dup.json](https://github.com/ethereum/tests/blob/
+ v14.0/GeneralStateTests/VMTests/vmTests/dup.json) by Ori Pomerantz.
+ """
env = Environment()
sender = pre.fund_eoa()
post = {}
diff --git a/tests/frontier/opcodes/test_push.py b/tests/frontier/opcodes/test_push.py
index a97abaa982c..fc407f9c4d0 100644
--- a/tests/frontier/opcodes/test_push.py
+++ b/tests/frontier/opcodes/test_push.py
@@ -1,7 +1,11 @@
"""
A State test for the set of `PUSH*` opcodes.
-Ported from: https://github.com/ethereum/tests/blob/4f65a0a7cbecf4442415c226c65e089acaaf6a8b/src/GeneralStateTestsFiller/VMTests/vmTests/pushFiller.yml.
-""" # noqa: E501
+
+Ported from:
+https://github.com/ethereum/tests/blob/
+4f65a0a7cbecf4442415c226c65e089acaaf6a8b/src/
+GeneralStateTestsFiller/VMTests/vmTests/pushFiller.yml.
+"""
import pytest
@@ -30,7 +34,8 @@ def get_input_for_push_opcode(opcode: Op) -> bytes:
)
@pytest.mark.parametrize(
"push_opcode",
- [getattr(Op, f"PUSH{i}") for i in range(1, 33)], # Dynamically parametrize PUSH opcodes
+ # Dynamically parametrize PUSH opcodes
+ [getattr(Op, f"PUSH{i}") for i in range(1, 33)],
ids=lambda op: str(op),
)
@pytest.mark.valid_from("Frontier")
@@ -38,9 +43,8 @@ def test_push(state_test: StateTestFiller, fork: Fork, pre: Alloc, push_opcode:
"""
The set of `PUSH*` opcodes pushes data onto the stack.
- In this test, we ensure that the set of `PUSH*` opcodes writes
- a portion of an excerpt from the Ethereum yellow paper to
- storage.
+ In this test, we ensure that the set of `PUSH*` opcodes writes a portion of
+ an excerpt from the Ethereum yellow paper to storage.
"""
# Input used to test the `PUSH*` opcode.
excerpt = get_input_for_push_opcode(push_opcode)
@@ -90,19 +94,24 @@ def test_push(state_test: StateTestFiller, fork: Fork, pre: Alloc, push_opcode:
def test_stack_overflow(
state_test: StateTestFiller, fork: Fork, pre: Alloc, push_opcode: Op, stack_height: int
):
- """A test to ensure that the stack overflows when the stack limit of 1024 is exceeded."""
+ """
+ A test the stack overflows when the stack limit of 1024 is exceeded.
+ """
env = Environment()
# Input used to test the `PUSH*` opcode.
excerpt = get_input_for_push_opcode(push_opcode)
"""
- Essentially write a n-byte message to storage by pushing [1024,1025] times to stack. This
- simulates a "jump" over the stack limit of 1024.
- The message is UTF-8 encoding of excerpt (say 0x45 for PUSH1). Within the stack limit,
- the message is written to the to the storage at the same offset (0x45 for PUSH1).
- The last iteration will overflow the stack and the storage slot will be empty.
+
+ Essentially write a n-byte message to storage by pushing [1024,1025] times
+ to stack. This simulates a "jump" over the stack limit of 1024.
+
+ The message is UTF-8 encoding of excerpt (say 0x45 for PUSH1). Within the
+ stack limit, the message is written to the to the storage at the same
+ offset (0x45 for PUSH1). The last iteration will overflow the stack and the
+ storage slot will be empty.
** Bytecode explanation **
+---------------------------------------------------+
@@ -115,7 +124,8 @@ def test_stack_overflow(
"""
contract_code: Bytecode = Bytecode()
for _ in range(stack_height - 2):
- contract_code += Op.PUSH1(0) # mostly push 0 to avoid contract size limit exceeded
+ # mostly push 0 to avoid contract size limit exceeded
+ contract_code += Op.PUSH1(0)
contract_code += push_opcode(excerpt) * 2 + Op.SSTORE
contract = pre.deploy_contract(contract_code)
diff --git a/tests/frontier/opcodes/test_selfdestruct.py b/tests/frontier/opcodes/test_selfdestruct.py
index e165684a0d8..bedddbf679b 100644
--- a/tests/frontier/opcodes/test_selfdestruct.py
+++ b/tests/frontier/opcodes/test_selfdestruct.py
@@ -10,9 +10,9 @@
@pytest.mark.valid_until("Homestead")
def test_double_kill(blockchain_test: BlockchainTestFiller, pre: Alloc):
"""
- Test that when two transactions attempt to destruct a contract,
- the second transaction actually resurrects the contract as an empty account (prior to Spurious
- Dragon).
+ Test that when two transactions attempt to destruct a contract, the second
+ transaction actually resurrects the contract as an empty account (prior to
+ Spurious Dragon).
"""
sender = pre.fund_eoa()
diff --git a/tests/frontier/precompiles/test_precompile_absence.py b/tests/frontier/precompiles/test_precompile_absence.py
index 1982ad2f217..46b9dd44ece 100644
--- a/tests/frontier/precompiles/test_precompile_absence.py
+++ b/tests/frontier/precompiles/test_precompile_absence.py
@@ -1,4 +1,4 @@
-"""abstract: Test Calling Precompile Range (close to zero)."""
+"""Test Calling Precompile Range (close to zero)."""
import pytest
@@ -33,7 +33,10 @@ def test_precompile_absence(
fork: Fork,
calldata_size: int,
):
- """Test that addresses close to zero are not precompiles unless active in the fork."""
+ """
+ Test that addresses close to zero are not precompiles unless active in the
+ fork.
+ """
active_precompiles = fork.precompiles()
storage = Storage()
call_code = Bytecode()
diff --git a/tests/frontier/precompiles/test_precompiles.py b/tests/frontier/precompiles/test_precompiles.py
index 24ef1b00457..b7ba6b91428 100644
--- a/tests/frontier/precompiles/test_precompiles.py
+++ b/tests/frontier/precompiles/test_precompiles.py
@@ -18,14 +18,16 @@
def precompile_addresses(fork: Fork) -> Iterator[Tuple[Address, bool]]:
"""
- Yield the addresses of precompiled contracts and their support status for a given fork.
+ Yield the addresses of precompiled contracts and their support status for a
+ given fork.
Args:
- fork (Fork): The fork instance containing precompiled contract information.
+ fork (Fork): The fork instance containing precompiled
+ contract information.
- Yields:
- Iterator[Tuple[str, bool]]: A tuple containing the address in hexadecimal format and a
- boolean indicating whether the address is a supported precompile.
+ Yields: Iterator[Tuple[str, bool]]: A tuple containing the address in
+ hexadecimal format and a boolean indicating whether the address is a
+ supported precompile.
"""
supported_precompiles = fork.precompiles()
@@ -60,17 +62,19 @@ def test_precompiles(
Tests the behavior of precompiled contracts in the Ethereum state test.
Args:
- state_test (StateTestFiller): The state test filler object used to run the test.
- address (str): The address of the precompiled contract to test.
- precompile_exists (bool): A flag indicating whether the precompiled contract exists at the
- given address.
- pre (Alloc): The allocation object used to deploy the contract and set up the initial
- state.
-
- This test deploys a contract that performs two CALL operations to the specified address and a
- fixed address (0x10000), measuring the gas used for each call. It then stores the difference
- in gas usage in storage slot 0. The test verifies the expected storage value based on
- whether the precompiled contract exists at the given address.
+ state_test (StateTestFiller): The state test filler object used to
+ run the test.
+ address (str): The address of the precompiled contract to test.
+ precompile_exists (bool): A flag indicating whether the precompiled
+ contract exists at the given address.
+ pre (Alloc): The allocation object used to deploy the contract and
+ set up the initial state.
+
+ This test deploys a contract that performs two CALL operations to the
+ specified address and a fixed address (0x10000), measuring the gas used for
+ each call. It then stores the difference in gas usage in storage slot 0.
+ The test verifies the expected storage value based on whether the
+ precompiled contract exists at the given address.
"""
env = Environment()
diff --git a/tests/frontier/scenarios/common.py b/tests/frontier/scenarios/common.py
index 7142ca685e5..1d7a95296c7 100644
--- a/tests/frontier/scenarios/common.py
+++ b/tests/frontier/scenarios/common.py
@@ -30,9 +30,9 @@ class ScenarioExpectOpcode(Enum):
@dataclass
class ScenarioEnvironment:
"""
- Scenario evm environment
- Each scenario must define an environment on which program is executed
- This is so post state verification could check results of evm opcodes.
+ Scenario evm environment Each scenario must define an environment on which
+ program is executed This is so post state verification could check results
+ of evm opcodes.
"""
code_address: Address # Op.ADDRESS, address scope for program
@@ -63,9 +63,10 @@ class ProgramResult:
Describe expected result of a program.
Attributes:
- result (int | ScenarioExpectOpcode): The result of the program
- from_fork (Fork): The result is only valid from this fork (default: Frontier)
- static_support (bool): Can be verified in static context (default: True)
+ result (int | ScenarioExpectOpcode): The result of the program
+ from_fork (Fork): The result is only valid from this fork
+ (default: Frontier)
+ static_support (bool): Can be verified in static context (default: True)
"""
@@ -79,8 +80,8 @@ def translate_result(
self, env: ScenarioEnvironment, exec_env: ExecutionEnvironment
) -> int | Address:
"""
- Translate expected program result code into concrete value,
- given the scenario evm environment and test execution environment.
+ Translate expected program result code into concrete value, given the
+ scenario evm environment and test execution environment.
"""
if exec_env.fork < self.from_fork:
return 0
@@ -154,10 +155,10 @@ class ScenarioGeneratorInput:
Parameters for the scenario generator function.
Attributes:
- fork (Fork): Fork for which we ask to generate scenarios
- pre (Alloc): Access to the state to be able to deploy contracts into pre
- operation (Bytecode): Evm bytecode program that will be tested
- external_address (Address): Static external address for ext opcodes
+ fork (Fork): Fork for which we ask to generate scenarios
+ pre(Alloc): Access to the state to be able to deploy contracts into pre
+ operation (Bytecode): Evm bytecode program that will be tested
+ external_address (Address): Static external address for ext opcodes
"""
@@ -172,11 +173,12 @@ class Scenario:
Describe test scenario that will be run in test for each program.
Attributes:
- category (str): Scenario category name
- name (str): Scenario name for the test vector
- code (Address): Address that is an entry point for scenario code
- env (ScenarioEnvironment): Evm values for ScenarioExpectAddress map
- reverting (bool): If scenario reverts program execution, making result 0 (default: False)
+ category (str): Scenario category name
+ name (str): Scenario name for the test vector
+ code (Address): Address that is an entry point for scenario code
+ env (ScenarioEnvironment): Evm values for ScenarioExpectAddress map
+ reverting (bool): If scenario reverts program execution,
+ making result 0 (default: False)
"""
@@ -189,10 +191,10 @@ class Scenario:
def make_gas_hash_contract(pre: Alloc) -> Address:
"""
- Contract that spends unique amount of gas based on input
- Used for the values we can't predict, can be gas consuming on high values
- So that if we can't check exact value in expect section,
- we at least could spend unique gas amount.
+ Contract that spends unique amount of gas based on input.
+ Used for the values we can't predict, can be gas consuming on high values.
+ So that if we can't check exact value in expect section, we at least
+ could spend unique gas amount.
"""
gas_hash_address = pre.deploy_contract(
code=Op.MSTORE(0, 0)
@@ -215,8 +217,9 @@ def make_gas_hash_contract(pre: Alloc) -> Address:
def make_invalid_opcode_contract(pre: Alloc, fork: Fork) -> Address:
"""
- Deploy a contract that will execute any asked byte as an opcode from calldataload
- Deploy 20 empty stack elements. Jump to opcode instruction. if worked, return 0.
+ Deploy a contract that will execute any asked byte as an opcode from
+ calldataload Deploy 20 empty stack elements. Jump to opcode instruction. if
+ worked, return 0.
"""
invalid_opcode_caller = pre.deploy_contract(
code=Op.PUSH1(0) * 20
diff --git a/tests/frontier/scenarios/programs/all_frontier_opcodes.py b/tests/frontier/scenarios/programs/all_frontier_opcodes.py
index 83978c7509a..72d8fa459a4 100644
--- a/tests/frontier/scenarios/programs/all_frontier_opcodes.py
+++ b/tests/frontier/scenarios/programs/all_frontier_opcodes.py
@@ -1,4 +1,7 @@
-"""Define a program for scenario test that executes all frontier opcodes and entangles it's result.""" # noqa: E501
+"""
+Define a program for scenario test that executes all frontier opcodes and
+entangles it's result.
+"""
from functools import cached_property
@@ -15,7 +18,9 @@
def make_all_opcode_program() -> Bytecode:
- """Make a program that call each Frontier opcode and verifies it's result."""
+ """
+ Make a program that call each Frontier opcode and verifies it's result.
+ """
code: Bytecode = (
# Test opcode 01 - ADD
Conditional(
diff --git a/tests/frontier/scenarios/programs/context_calls.py b/tests/frontier/scenarios/programs/context_calls.py
index 0693d181102..ea4d8938389 100644
--- a/tests/frontier/scenarios/programs/context_calls.py
+++ b/tests/frontier/scenarios/programs/context_calls.py
@@ -15,7 +15,9 @@
class ProgramAddress(ScenarioTestProgram):
- """Check that ADDRESS is really the code execution address in all scenarios."""
+ """
+ Check that ADDRESS is really the code execution address in all scenarios.
+ """
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
diff --git a/tests/frontier/scenarios/scenarios/call_combinations.py b/tests/frontier/scenarios/scenarios/call_combinations.py
index e8e091307ce..318c48d08e3 100644
--- a/tests/frontier/scenarios/scenarios/call_combinations.py
+++ b/tests/frontier/scenarios/scenarios/call_combinations.py
@@ -55,15 +55,17 @@ def __init__(self, scenario_input: ScenarioGeneratorInput):
def generate(self) -> List[Scenario]:
"""
- Generate Scenarios for call combinations
- We take code that we want to test at scenario_input.operation_contract
- and put it in the context of call combinations.
+ Generate Scenarios for call combinations We take code that we want to
+ test at scenario_input.operation_contract and put it in the context of
+ call combinations.
Example:
- root_contract -> call -> scenario_contract -> first_call -> sub_contract
- sub_contact -> second_call -> code
- We assume that code always returns it's result
- That we pass as return value in scenario_contract for the post state verification
+ root_contract -> call -> scenario_contract -> first_call ->
+ sub_contract sub_contact -> second_call -> code
+
+ We assume that code always returns its result.
+ That we pass as return value in scenario_contract for the
+ post state verification.
"""
scenarios_list: List[Scenario] = []
@@ -80,8 +82,8 @@ def generate(self) -> List[Scenario]:
def _generate_one_call_scenario(self, first_call: Opcode) -> Scenario:
"""
- Generate scenario for only one call
- root_contract -(CALL)-> scenario_contract -(first_call)-> operation_contract.
+ Generate scenario for only one call root_contract -(CALL)->
+ scenario_contract -(first_call)-> operation_contract.
"""
scenario_input: ScenarioGeneratorInput = self.scenario_input
pre: Alloc = scenario_input.pre
@@ -162,15 +164,16 @@ def _generate_one_call_scenario(self, first_call: Opcode) -> Scenario:
def _generate_two_call_scenario(self, first_call: Opcode, second_call: Opcode) -> Scenario:
"""
- Generate scenario for two types of calls combination
- root_contract -(CALL)-> scenario_contract -(first_call)-> sub_contract
- sub_contract -(second_call) -> operation_contract.
+ Generate scenario for two types of calls combination root_contract
+ -(CALL)-> scenario_contract -(first_call)-> sub_contract sub_contract
+ -(second_call) -> operation_contract.
"""
def _compute_code_caller() -> Address:
"""
- Calculate who is the code caller in program_contract's code in given sequence
- root -CALL-> scenario_contract -(first_call)-> sub_contract -(second_call)-> program.
+ Calculate who is the code caller in program_contract's code in
+ given sequence root -CALL-> scenario_contract -(first_call)
+ -> sub_contract -(second_call)-> program.
"""
code_caller: Address = root_contract
if first_call == Op.DELEGATECALL:
@@ -188,8 +191,9 @@ def _compute_code_caller() -> Address:
def _compute_selfbalance() -> int:
"""
- Calculate the result of Op.SELFBALANCE in program scope in given sequence
- root -CALL-> scenario_contract -(first_call)-> sub_contract -(second_call)-> program.
+ Calculate the result of Op.SELFBALANCE in program scope in given
+ sequence root -CALL-> scenario_contract -(first_call)->
+ sub_contract -(second_call)-> program.
"""
selfbalance: int = 0
if second_call in [Op.CALL]:
@@ -214,7 +218,8 @@ def _compute_selfbalance() -> int:
def _compute_callvalue() -> int:
"""
Calculate the expected callvalue in program scope given sequence:
- root -CALL-> scenario_contract -(first_call)-> sub_contract -(second_call)-> program.
+ root -CALL-> scenario_contract -(first_call)-> sub_contract
+ -(second_call)-> program.
"""
if second_call == Op.STATICCALL:
return 0
diff --git a/tests/frontier/scenarios/scenarios/create_combinations.py b/tests/frontier/scenarios/scenarios/create_combinations.py
index d5603913246..0a5040a8735 100644
--- a/tests/frontier/scenarios/scenarios/create_combinations.py
+++ b/tests/frontier/scenarios/scenarios/create_combinations.py
@@ -27,7 +27,10 @@ def scenarios_create_combinations(scenario_input: ScenarioGeneratorInput) -> Lis
"""Generate Scenarios for create combinations."""
def _compute_selfbalance() -> int:
- """Compute selfbalance opcode for root -> call -> scenario -> create | [call*] -> program.""" # noqa: E501
+ """
+ Compute selfbalance opcode for root -> call -> scenario ->
+ create | [call*] -> program.
+ """
if call in [Op.DELEGATECALL, Op.CALLCODE]:
return (
balance.scenario_contract_balance + balance.root_call_value - balance.create_value
@@ -51,7 +54,8 @@ def _compute_selfbalance() -> int:
salt = [0] if create == Op.CREATE2 else []
operation_contract = scenario_input.pre.deploy_contract(code=scenario_input.operation_code)
- # the code result in init code will be actually code of a deployed contract
+ # the code result in init code will be actually code of a deployed
+ # contract
scenario_contract = scenario_input.pre.deploy_contract(
balance=3,
code=Op.EXTCODECOPY(operation_contract, 0, 0, Op.EXTCODESIZE(operation_contract))
diff --git a/tests/frontier/scenarios/test_scenarios.py b/tests/frontier/scenarios/test_scenarios.py
index 585aa6aab73..bb820d81d04 100644
--- a/tests/frontier/scenarios/test_scenarios.py
+++ b/tests/frontier/scenarios/test_scenarios.py
@@ -1,6 +1,6 @@
"""
-Call every possible opcode and test that the subcall is successful
-if the opcode is supported by the fork and fails otherwise.
+Call every possible opcode and test that the subcall is successful if the
+opcode is supported by the fork and fails otherwise.
"""
from typing import List
@@ -76,7 +76,10 @@
@pytest.fixture
def scenarios(fork: Fork, pre: Alloc, test_program: ScenarioTestProgram) -> List[Scenario]:
- """Define fixture vectors of all possible scenarios, given the current pre state input."""
+ """
+ Define fixture vectors of all possible scenarios, given the current pre
+ state input.
+ """
scenarios_list: List[Scenario] = []
scenario_input = ScenarioGeneratorInput(
@@ -118,9 +121,13 @@ def scenarios(fork: Fork, pre: Alloc, test_program: ScenarioTestProgram) -> List
@pytest.mark.valid_from("Frontier")
@pytest.mark.parametrize(
# select program to debug ("program_id","scenario_name")
- # program="" select all programs
- # scenario_name="" select all scenarios
- # Example: [ScenarioDebug(program_id=ProgramSstoreSload().id, scenario_name="scenario_CALL_CALL")], # noqa: E501
+ # program=""
+ # select all programs scenario_name=""
+ # select all scenarios
+ #
+ # Example:
+ # [ScenarioDebug(program_id=ProgramSstoreSload().id,
+ # scenario_name="scenario_CALL_CALL")]
"debug",
[
ScenarioDebug(
@@ -178,13 +185,13 @@ def test_scenarios(
scenarios,
):
"""
- Test given operation in different scenarios
- Verify that it's return value equal to expected result on every scenario,
- that is valid for the given fork.
+ Test given operation in different scenarios Verify that it's return value
+ equal to expected result on every scenario, that is valid for the given
+ fork.
- Note: Don't use pytest parametrize for scenario production, because scenarios will be complex
- Generate one test file for [each operation] * [each scenario] to save space
- As well as operations will be complex too
+ Note: Don't use pytest parametrize for scenario production, because
+ scenarios will be complex Generate one test file for [each operation] *
+ [each scenario] to save space As well as operations will be complex too
"""
tx_env = Environment()
tx_origin: Address = pre.fund_eoa()
@@ -213,7 +220,8 @@ def test_scenarios(
fork=fork,
origin=tx_origin,
gasprice=tx_gasprice,
- timestamp=tx_env.timestamp, # we can't know timestamp before head, use gas hash
+ timestamp=tx_env.timestamp, # we can't know timestamp before head,
+ # use gas hash
number=len(blocks) + 1,
gaslimit=tx_env.gas_limit,
coinbase=tx_env.fee_recipient,
diff --git a/tests/homestead/coverage/test_coverage.py b/tests/homestead/coverage/test_coverage.py
index b0120d113fb..52743076429 100644
--- a/tests/homestead/coverage/test_coverage.py
+++ b/tests/homestead/coverage/test_coverage.py
@@ -1,4 +1,7 @@
-"""Tests that address coverage gaps that result from updating `ethereum/tests` into EEST tests."""
+"""
+Tests that address coverage gaps that result from updating `ethereum/tests`
+into EEST tests.
+"""
import pytest
@@ -17,11 +20,11 @@ def test_coverage(
fork: Fork,
):
"""
- Cover gaps that result from transforming Yul code into
- our Python opcode wrapper bytecode.
+ Cover gaps that result from transforming Yul code into our Python opcode
+ wrapper bytecode.
- E.g. Yul tends to optimize stack items by using `SWAP1` and `DUP1` opcodes, which are not
- regularly used in python code.
+ E.g. Yul tends to optimize stack items by using `SWAP1` and `DUP1` opcodes,
+ which are not regularly used in python code.
Modify this test to cover more Yul code if required in the future.
"""
diff --git a/tests/istanbul/eip1344_chainid/__init__.py b/tests/istanbul/eip1344_chainid/__init__.py
index bb41fcd2e72..3669063cbee 100644
--- a/tests/istanbul/eip1344_chainid/__init__.py
+++ b/tests/istanbul/eip1344_chainid/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-1344: ChainID Opcode](https://eips.ethereum.org/EIPS/eip-1344)
- Test cases for [EIP-1344: ChainID Opcode](https://eips.ethereum.org/EIPS/eip-1344).
+Tests for [EIP-1344: ChainID Opcode](https://eips.ethereum.org/EIPS/eip-1344).
"""
diff --git a/tests/istanbul/eip1344_chainid/test_chainid.py b/tests/istanbul/eip1344_chainid/test_chainid.py
index 72c670268cf..da878691b23 100644
--- a/tests/istanbul/eip1344_chainid/test_chainid.py
+++ b/tests/istanbul/eip1344_chainid/test_chainid.py
@@ -1,6 +1,5 @@
"""
-abstract: Tests [EIP-1344: CHAINID opcode](https://eips.ethereum.org/EIPS/eip-1344)
- Test cases for [EIP-1344: CHAINID opcode](https://eips.ethereum.org/EIPS/eip-1344).
+Tests [EIP-1344: CHAINID opcode](https://eips.ethereum.org/EIPS/eip-1344).
"""
import pytest
diff --git a/tests/istanbul/eip152_blake2/__init__.py b/tests/istanbul/eip152_blake2/__init__.py
index 1f034e456fb..040419e930f 100644
--- a/tests/istanbul/eip152_blake2/__init__.py
+++ b/tests/istanbul/eip152_blake2/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-152: BLAKE2 compression precompile](https://eips.ethereum.org/EIPS/eip-152)
- Test cases for [EIP-152: BLAKE2 compression precompile](https://eips.ethereum.org/EIPS/eip-152).
+Tests [EIP-152: BLAKE2 compression precompile](https://eips.ethereum.org/EIPS/eip-152).
"""
diff --git a/tests/istanbul/eip152_blake2/common.py b/tests/istanbul/eip152_blake2/common.py
index 7081a31a2ec..2f0ab7ba7cb 100644
--- a/tests/istanbul/eip152_blake2/common.py
+++ b/tests/istanbul/eip152_blake2/common.py
@@ -14,15 +14,19 @@ class Blake2bInput(TestParameterGroup):
call data from them. Returns all inputs encoded as bytes.
Attributes:
- rounds_length (int): An optional integer representing the bytes length
- for the number of rounds. Defaults to the expected length of 4.
- rounds (int | str): A hex string or integer value representing the number of rounds.
- h (str): A hex string that represents the state vector.
- m (str): A hex string that represents the message block vector.
- t_0 (int | str): A hex string or integer value that represents the first offset counter.
- t_1 (int | str): A hex string or integer value that represents the second offset counter.
- f (bool): An optional boolean that represents the final block indicator flag.
- Defaults to True.
+ rounds_length (int): An optional integer representing the bytes
+ length for the number of rounds.
+ Defaults to the expected length of 4.
+ rounds (int | str): A hex string or integer value representing the number
+ of rounds.
+ h (str): A hex string that represents the state vector.
+ m (str): A hex string that represents the message block vector.
+ t_0 (int | str): A hex string or integer value that represents
+ the first offset counter.
+ t_1(int | str): A hex string or integer value that represents the second
+ offset counter.
+ f (bool): An optional boolean that represents the final block indicator
+ flag. Defaults to True.
"""
@@ -58,10 +62,10 @@ class ExpectedOutput(TestParameterGroup):
Expected test result.
Attributes:
- call_succeeds (str | bool): A hex string or boolean to indicate whether the call was
- successful or not.
- data_1 (str): String value of the first updated state vector.
- data_2 (str): String value of the second updated state vector.
+ call_succeeds (str | bool): A hex string or boolean to indicate
+ whether the call was successful or not.
+ data_1 (str): String value of the first updated state vector.
+ data_2 (str): String value of the second updated state vector.
"""
diff --git a/tests/istanbul/eip152_blake2/conftest.py b/tests/istanbul/eip152_blake2/conftest.py
index ed544ec2911..a6fbc26be1c 100644
--- a/tests/istanbul/eip152_blake2/conftest.py
+++ b/tests/istanbul/eip152_blake2/conftest.py
@@ -11,8 +11,8 @@
@pytest.fixture
def blake2b_contract_bytecode(call_opcode: Op) -> Bytecode:
"""
- Contract code that performs the provided opcode (CALL or CALLCODE) to the BLAKE2b precompile
- and stores the result.
+ Contract code that performs the provided opcode (CALL or CALLCODE) to the
+ BLAKE2b precompile and stores the result.
"""
return (
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
diff --git a/tests/istanbul/eip152_blake2/spec.py b/tests/istanbul/eip152_blake2/spec.py
index 38413b71898..021c94a28d0 100644
--- a/tests/istanbul/eip152_blake2/spec.py
+++ b/tests/istanbul/eip152_blake2/spec.py
@@ -35,7 +35,8 @@ class Spec:
BLAKE2_PRECOMPILE_T_1_LENGTH = 8
BLAKE2_PRECOMPILE_F_LENGTH = 1
- # Constants for BLAKE2b and BLAKE2s spec defined at https://datatracker.ietf.org/doc/html/rfc7693#section-3.2
+ # Constants for BLAKE2b and BLAKE2s spec defined at
+ # https://datatracker.ietf.org/doc/html/rfc7693#section-3.2
BLAKE2B_PRECOMPILE_ROUNDS = 12
BLAKE2B_PRECOMPILE_H_LENGTH = 64
@@ -47,8 +48,19 @@ class SpecTestVectors:
"""Defines common test parameters for the BLAKE2b precompile."""
# The following constants are used to define common test parameters
- # Origin of vectors defined at https://datatracker.ietf.org/doc/html/rfc7693.html#appendix-A
- BLAKE2_STATE_VECTOR = "48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b" # noqa:E501
- BLAKE2_MESSAGE_BLOCK_VECTOR = "6162630000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" # noqa:E501
+ # Origin of vectors defined at
+ # https://datatracker.ietf.org/doc/html/rfc7693.html#appendix-A
+ BLAKE2_STATE_VECTOR = (
+ "48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1"
+ "361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f"
+ "79217e1319cde05b"
+ )
+ BLAKE2_MESSAGE_BLOCK_VECTOR = (
+ "616263000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000"
+ )
BLAKE2_OFFSET_COUNTER_0 = 3
BLAKE2_OFFSET_COUNTER_1 = 0
diff --git a/tests/istanbul/eip152_blake2/test_blake2.py b/tests/istanbul/eip152_blake2/test_blake2.py
index dab5c9e6ba3..6e575947c83 100644
--- a/tests/istanbul/eip152_blake2/test_blake2.py
+++ b/tests/istanbul/eip152_blake2/test_blake2.py
@@ -1,6 +1,5 @@
"""
-abstract: Tests [EIP-152: BLAKE2b compression precompile](https://eips.ethereum.org/EIPS/eip-152)
- Test cases for [EIP-152: BLAKE2b compression precompile](https://eips.ethereum.org/EIPS/eip-152).
+Tests [EIP-152: BLAKE2b compression precompile](https://eips.ethereum.org/EIPS/eip-152).
"""
from typing import List
diff --git a/tests/istanbul/eip152_blake2/test_blake2_delegatecall.py b/tests/istanbul/eip152_blake2/test_blake2_delegatecall.py
index b7ed40ba4a3..79ecfb664bd 100644
--- a/tests/istanbul/eip152_blake2/test_blake2_delegatecall.py
+++ b/tests/istanbul/eip152_blake2/test_blake2_delegatecall.py
@@ -1,4 +1,6 @@
-"""abstract: Test delegatecall to Blake2B Precompile before and after it was added."""
+"""
+Test delegatecall to Blake2B Precompile before and after added.
+"""
import pytest
@@ -21,7 +23,10 @@
@pytest.mark.valid_from("ConstantinopleFix")
def test_blake2_precompile_delegatecall(state_test: StateTestFiller, pre: Alloc, fork: Fork):
- """Test delegatecall consumes specified gas for the Blake2B precompile when it exists."""
+ """
+ Test delegatecall consumes specified gas for the Blake2B precompile when it
+ exists.
+ """
env = Environment()
account = pre.deploy_contract(
@@ -43,7 +48,8 @@ def test_blake2_precompile_delegatecall(state_test: StateTestFiller, pre: Alloc,
protected=True,
)
- # If precompile exists, DELEGATECALL will fail, otherwise DELEGATECALL will succeed
+ # If precompile exists, DELEGATECALL will fail, otherwise DELEGATECALL will
+ # succeed
post = {
account: Account(
storage={
diff --git a/tests/osaka/__init__.py b/tests/osaka/__init__.py
index 5eceac229b0..81fc13f3537 100644
--- a/tests/osaka/__init__.py
+++ b/tests/osaka/__init__.py
@@ -1 +1,4 @@
-"""Test cases for EVM functionality introduced in Osaka, [EIP-7607: Hardfork Meta - Fusaka](https://eip.directory/eips/eip-7607).""" # noqa: E501
+"""
+Test cases for EVM functionality introduced in Osaka, [EIP-7607: Hardfork Meta
+- Fusaka](https://eip.directory/eips/eip-7607).
+"""
diff --git a/tests/osaka/eip7594_peerdas/__init__.py b/tests/osaka/eip7594_peerdas/__init__.py
index 3b5d0449782..43cd5354b74 100644
--- a/tests/osaka/eip7594_peerdas/__init__.py
+++ b/tests/osaka/eip7594_peerdas/__init__.py
@@ -1,4 +1,5 @@
"""
Test suite for
-[EIP-7594: PeerDAS - Peer Data Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
+[EIP-7594: PeerDAS - Peer Data Availability
+Sampling](https://eips.ethereum.org/EIPS/eip-7594).
"""
diff --git a/tests/osaka/eip7594_peerdas/test_get_blobs.py b/tests/osaka/eip7594_peerdas/test_get_blobs.py
index 2d349fd93b3..a38fcf36c51 100644
--- a/tests/osaka/eip7594_peerdas/test_get_blobs.py
+++ b/tests/osaka/eip7594_peerdas/test_get_blobs.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests get blobs engine endpoint for [EIP-7594: PeerDAS - Peer Data Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594)
- Test get blobs engine endpoint for [EIP-7594: PeerDAS - Peer Data Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
-""" # noqa: E501
+Get blobs engine endpoint tests.
+
+Tests for get blobs engine endpoint in [EIP-7594: PeerDAS - Peer Data
+Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
+"""
from hashlib import sha256
from typing import List, Optional
@@ -92,7 +94,8 @@ def excess_blob_gas(
block_base_fee_per_gas: int,
) -> int | None:
"""
- Calculate the excess blob gas of the block under test from the parent block.
+ Calculate the excess blob gas of the block under test from the parent
+ block.
Value can be overloaded by a test case to provide a custom excess blob gas.
"""
@@ -148,10 +151,10 @@ def tx_max_fee_per_blob_gas( # noqa: D103
@pytest.fixture
def tx_error() -> Optional[TransactionException]:
"""
- Even though the final block we are producing in each of these tests is invalid, and some of the
- transactions will be invalid due to the format in the final block, none of the transactions
- should be rejected by the transition tool because they are being sent to it with the correct
- format.
+ Even though the final block we are producing in each of these tests is
+ invalid, and some of the transactions will be invalid due to the format in
+ the final block, none of the transactions should be rejected by the
+ transition tool because they are being sent to it with the correct format.
"""
return None
@@ -319,8 +322,8 @@ def test_get_blobs(
txs: List[NetworkWrappedTransaction | Transaction],
):
"""
- Test valid blob combinations where one or more txs in the block
- serialized version contain a full blob (network version) tx.
+ Test valid blob combinations where one or more txs in the block serialized
+ version contain a full blob (network version) tx.
"""
blobs_test(pre=pre, txs=txs)
@@ -336,6 +339,9 @@ def test_get_blobs_nonexisting(
pre: Alloc,
txs: List[NetworkWrappedTransaction | Transaction],
):
- """Test that ensures clients respond with 'null' when at least one requested blob is not available.""" # noqa: E501
+ """
+ Test that ensures clients respond with 'null' when at least one requested
+ blob is not available.
+ """
nonexisting_blob_hashes = [Hash(sha256(str(i).encode()).digest()) for i in range(5)]
blobs_test(pre=pre, txs=txs, nonexisting_blob_hashes=nonexisting_blob_hashes)
diff --git a/tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py b/tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py
index c9d0114738d..4dcf4ce2050 100644
--- a/tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py
+++ b/tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests `MAX_BLOBS_PER_TX` limit for [EIP-7594: PeerDAS - Peer Data Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594)
- Tests `MAX_BLOBS_PER_TX` limit for [EIP-7594: PeerDAS - Peer Data Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
-""" # noqa: E501
+MAX_BLOBS_PER_TX limit tests.
+
+Tests for `MAX_BLOBS_PER_TX` limit in [EIP-7594: PeerDAS - Peer Data
+Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
+"""
import pytest
@@ -88,9 +90,9 @@ def test_valid_max_blobs_per_tx(
tx: Transaction,
):
"""
- Test that transactions with blob count from 1 to MAX_BLOBS_PER_TX are accepted.
- Verifies that individual transactions can contain up to the maximum allowed
- number of blobs per transaction.
+ Test that transactions with blob count from 1 to MAX_BLOBS_PER_TX are
+ accepted. Verifies that individual transactions can contain up to the
+ maximum allowed number of blobs per transaction.
"""
state_test(
env=env,
@@ -120,10 +122,10 @@ def test_invalid_max_blobs_per_tx(
blob_count: int,
):
"""
- Test that transactions exceeding MAX_BLOBS_PER_TX are rejected.
- Verifies that individual transactions cannot contain more than the maximum
- allowed number of blobs per transaction, even if the total would be within
- the block limit.
+ Test that transactions exceeding MAX_BLOBS_PER_TX are rejected. Verifies
+ that individual transactions cannot contain more than the maximum allowed
+ number of blobs per transaction, even if the total would be within the
+ block limit.
"""
state_test(
env=env,
diff --git a/tests/osaka/eip7823_modexp_upper_bounds/__init__.py b/tests/osaka/eip7823_modexp_upper_bounds/__init__.py
index 20ebfaec013..b41a84cc6c0 100644
--- a/tests/osaka/eip7823_modexp_upper_bounds/__init__.py
+++ b/tests/osaka/eip7823_modexp_upper_bounds/__init__.py
@@ -1,4 +1,5 @@
"""
-abstract: Tests [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823)
- Test cases for [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823).
+Tests [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823).
+
+Test cases for EIP-7823: Set upper bounds for MODEXP.
"""
diff --git a/tests/osaka/eip7823_modexp_upper_bounds/conftest.py b/tests/osaka/eip7823_modexp_upper_bounds/conftest.py
index 9a89e91db31..663cd5456d3 100644
--- a/tests/osaka/eip7823_modexp_upper_bounds/conftest.py
+++ b/tests/osaka/eip7823_modexp_upper_bounds/conftest.py
@@ -16,8 +16,8 @@
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
- Storage of the test contract after the transaction is executed.
- Note: Fixture `call_contract_code` fills the actual expected storage values.
+ Storage of the test contract after the transaction is executed. Note:
+ Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@@ -27,8 +27,8 @@ def call_succeeds(
total_gas_used: int, fork: Fork, env: Environment, modexp_input: ModExpInput
) -> bool:
"""
- By default, depending on the expected output, we can deduce if the call is expected to succeed
- or fail.
+ By default, depending on the expected output, we can deduce if the call is
+ expected to succeed or fail.
"""
# Transaction gas limit exceeded
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
@@ -57,14 +57,15 @@ def gas_measure_contract(
call_succeeds: bool,
) -> Address:
"""
- Deploys a contract that measures ModExp gas consumption and execution result.
+ Deploys a contract that measures ModExp gas consumption and execution
+ result.
Always stored:
- storage[0]: precompile call success
- storage[1]: return data length from precompile
+ storage[0]: precompile call success
+ storage[1]: return data length from precompile
Only if the precompile call succeeds:
- storage[2]: gas consumed by precompile
- storage[3]: hash of return data from precompile
+ storage[2]: gas consumed by precompile
+ storage[3]: hash of return data from precompile
"""
call_code = Op.CALL(
precompile_gas,
@@ -122,14 +123,17 @@ def gas_measure_contract(
@pytest.fixture
def precompile_gas(fork: Fork, modexp_input: ModExpInput) -> int:
- """Calculate gas cost for the ModExp precompile and verify it matches expected gas."""
+ """
+ Calculate gas cost for the ModExp precompile and verify it matches expected
+ gas.
+ """
spec = Spec if fork < Osaka else Spec7883
try:
calculated_gas = spec.calculate_gas_cost(modexp_input)
return calculated_gas
except Exception:
- # Used for `test_modexp_invalid_inputs` we expect the call to not succeed.
- # Return is for completeness.
+ # Used for `test_modexp_invalid_inputs` we expect the call to not
+ # succeed. Return is for completeness.
return 500 if fork >= Osaka else 200
@@ -153,7 +157,9 @@ def tx(
def total_gas_used(
fork: Fork, modexp_expected: bytes, modexp_input: ModExpInput, precompile_gas: int
) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 500_000
@@ -170,7 +176,9 @@ def total_gas_used(
@pytest.fixture
def tx_gas_limit(total_gas_used: int, fork: Fork, env: Environment) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
return min(tx_gas_limit_cap, total_gas_used)
diff --git a/tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py b/tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py
index fca1e181272..d91c4d3c711 100644
--- a/tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py
+++ b/tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py
@@ -1,6 +1,5 @@
"""
-abstract: Test [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823)
- Tests upper bounds of the MODEXP precompile.
+Test [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823).
"""
from typing import Dict
@@ -76,7 +75,8 @@
pytest.param(
ModExpInput(
base=b"",
- # Non-zero exponent is cancelled with zero multiplication complexity pre EIP-7823.
+ # Non-zero exponent is cancelled with zero multiplication
+ # complexity pre EIP-7823.
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"",
),
@@ -293,7 +293,10 @@ def test_modexp_upper_bounds_fork_transition(
modexp_input: ModExpInput,
modexp_expected: bytes,
):
- """Test MODEXP upper bounds enforcement transition from before to after Osaka hard fork."""
+ """
+ Test MODEXP upper bounds enforcement transition from before to after Osaka
+ hard fork.
+ """
call_code = Op.CALL(
address=Spec.MODEXP_ADDRESS,
args_size=Op.CALLDATASIZE,
diff --git a/tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py b/tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py
index ecc57bb22ba..6ad1209d939 100644
--- a/tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py
+++ b/tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-7825: Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825).
- Test cases for [EIP-7825: Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825).
+Tests [EIP-7825: Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
diff --git a/tests/osaka/eip7825_transaction_gas_limit_cap/spec.py b/tests/osaka/eip7825_transaction_gas_limit_cap/spec.py
index 58a93203a49..319c70e3988 100644
--- a/tests/osaka/eip7825_transaction_gas_limit_cap/spec.py
+++ b/tests/osaka/eip7825_transaction_gas_limit_cap/spec.py
@@ -17,7 +17,9 @@ class ReferenceSpec:
@dataclass(frozen=True)
class Spec:
- """Constants and helpers for the EIP-7825 Transaction Gas Limit Cap tests."""
+ """
+ Constants and helpers for the EIP-7825 Transaction Gas Limit Cap tests.
+ """
# Gas limit constants
tx_gas_limit_cap = 2**24 # 16,777,216
diff --git a/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py b/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py
index 254cf62decf..5c23a5571b9 100644
--- a/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py
+++ b/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py
@@ -1,6 +1,8 @@
"""
-abstract: Tests [EIP-7825 Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825)
- Test cases for [EIP-7825 Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825)].
+Transaction gas limit cap tests.
+
+Tests for transaction gas limit cap in [EIP-7825: Transaction Gas Limit
+Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
from typing import List
@@ -37,12 +39,13 @@
def tx_gas_limit_cap_tests(fork: Fork) -> List[ParameterSet]:
"""
- Return a list of tests for transaction gas limit cap parametrized for each different
- fork.
+ Return a list of tests for transaction gas limit cap parametrized for each
+ different fork.
"""
fork_tx_gas_limit_cap = fork.transaction_gas_limit_cap()
if fork_tx_gas_limit_cap is None:
- # Use a default value for forks that don't have a transaction gas limit cap
+ # Use a default value for forks that don't have a transaction gas limit
+ # cap
return [
pytest.param(Spec.tx_gas_limit_cap + 1, None, id="tx_gas_limit_cap_none"),
]
@@ -69,7 +72,9 @@ def test_transaction_gas_limit_cap(
error: TransactionException | None,
tx_type: int,
):
- """Test the transaction gas limit cap behavior for all transaction types."""
+ """
+ Test the transaction gas limit cap behavior for all transaction types.
+ """
env = Environment()
sender = pre.fund_eoa()
@@ -179,7 +184,9 @@ def test_tx_gas_larger_than_block_gas_limit(
fork: Fork,
exceed_block_gas_limit: bool,
):
- """Test multiple transactions with total gas larger than the block gas limit."""
+ """
+ Test multiple transactions with total gas larger than the block gas limit.
+ """
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
@@ -302,7 +309,8 @@ def test_tx_gas_limit_cap_full_calldata(
num_of_bytes += int(exceed_tx_gas_limit)
- # Gas cost calculation based on EIP-7623: (https://eips.ethereum.org/EIPS/eip-7623)
+ # Gas cost calculation based on EIP-7623:
+ # (https://eips.ethereum.org/EIPS/eip-7623)
#
# Simplified in this test case:
# - No execution gas used (no opcodes are executed)
@@ -377,12 +385,12 @@ def test_tx_gas_limit_cap_contract_creation(
code = Op.JUMPDEST * num_of_bytes
- # Craft a contract creation transaction that exceeds the transaction gas limit cap
+ # Craft a contract creation transaction that exceeds the transaction gas
+ # limit cap
#
# Total cost =
# intrinsic cost (base tx cost + contract creation cost)
- # + calldata cost
- # + init code execution cost
+ # + calldata cost + init code execution cost
#
# The contract body is filled with JUMPDEST instructions, so:
# total cost = intrinsic cost + calldata cost + (num_of_jumpdest * 1 gas)
@@ -424,7 +432,10 @@ def test_tx_gas_limit_cap_access_list_with_diff_keys(
pre: Alloc,
fork: Fork,
):
- """Test the transaction gas limit cap behavior for access list with different storage keys."""
+ """
+ Test the transaction gas limit cap behavior for access list with different
+ storage keys.
+ """
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
@@ -500,7 +511,10 @@ def test_tx_gas_limit_cap_access_list_with_diff_addr(
exceed_tx_gas_limit: bool,
correct_intrinsic_cost_in_transaction_gas_limit: bool,
):
- """Test the transaction gas limit cap behavior for access list with different addresses."""
+ """
+ Test the transaction gas limit cap behavior for access list with different
+ addresses.
+ """
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
@@ -588,14 +602,13 @@ def test_tx_gas_limit_cap_authorized_tx(
)
# EIP-7702 authorization transaction cost:
+ # 21000 + 16 * non-zero calldata bytes + 4 * zero calldata bytes + 1900 *
+ # access list storage key count + 2400 * access list address count +
+ # PER_EMPTY_ACCOUNT_COST * authorization list length
#
- # 21000 + 16 * non-zero calldata bytes + 4 * zero calldata bytes
- # + 1900 * access list storage key count
- # + 2400 * access list address count
- # + PER_EMPTY_ACCOUNT_COST * authorization list length
+ # There is no calldata and no storage keys in this test case and the access
+ # address list count is equal to the authorization list length
#
- # There is no calldata and no storage keys in this test case
- # and the access address list count is equal to the authorization list length
# total cost = 21000 + (2400 + 25_000) * auth_list_length
auth_address = pre.deploy_contract(code=Op.STOP)
diff --git a/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py b/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py
index eb992e8ded1..c28cd62dbcf 100644
--- a/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py
+++ b/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py
@@ -1,6 +1,8 @@
"""
-abstract: Tests [EIP-7825 Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825)
- Test cases for [EIP-7825 Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825)].
+Transaction gas limit cap fork transition tests.
+
+Tests for fork transition behavior in [EIP-7825: Transaction Gas Limit
+Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
import pytest
@@ -44,8 +46,8 @@ def test_transaction_gas_limit_cap_at_transition(
"""
Test transaction gas limit cap behavior at the Osaka transition.
- Before timestamp 15000: No gas limit cap (transactions with gas > 2^24 are valid)
- At/after timestamp 15000: Gas limit cap of 2^24 is enforced
+ Before timestamp 15000: No gas limit cap (transactions with gas > 2^24 are
+ valid) At/after timestamp 15000: Gas limit cap of 2^24 is enforced
"""
contract_address = pre.deploy_contract(
code=Op.SSTORE(Op.TIMESTAMP, Op.ADD(Op.SLOAD(Op.TIMESTAMP), 1)) + Op.STOP,
@@ -58,7 +60,8 @@ def test_transaction_gas_limit_cap_at_transition(
# Test boundary: cap + 1 should fail after fork activation
above_cap = tx_gas_cap + 1
- # Before fork activation: both cap and above_cap transactions should succeed
+ # Before fork activation: both cap and above_cap transactions should
+ # succeed
at_cap_tx_before_fork = Transaction(
ty=0, # Legacy transaction
to=contract_address,
@@ -86,7 +89,8 @@ def test_transaction_gas_limit_cap_at_transition(
blocks = []
- # Before transition (timestamp < 15000): both cap and above_cap transactions should succeed
+ # Before transition (timestamp < 15000): both cap and above_cap
+ # transactions should succeed
blocks.append(
Block(
timestamp=14_999,
diff --git a/tests/osaka/eip7883_modexp_gas_increase/__init__.py b/tests/osaka/eip7883_modexp_gas_increase/__init__.py
index 4b43f2c9ed4..7987a367c26 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/__init__.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883)
- Test cases for [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
+Tests for [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
"""
diff --git a/tests/osaka/eip7883_modexp_gas_increase/conftest.py b/tests/osaka/eip7883_modexp_gas_increase/conftest.py
index 1a685531549..2740800c663 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/conftest.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/conftest.py
@@ -42,8 +42,8 @@ def call_opcode() -> Op:
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
- Storage of the test contract after the transaction is executed.
- Note: Fixture `call_contract_code` fills the actual expected storage values.
+ Storage of the test contract after the transaction is executed. Note:
+ Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@@ -83,9 +83,10 @@ def expected_tx_cap_fail() -> bool:
@pytest.fixture
def call_succeeds(exceeds_tx_gas_cap: bool, expected_tx_cap_fail: bool) -> bool:
"""
- Determine whether the ModExp precompile call should succeed or fail.
- By default, depending on the expected output, we assume it succeeds.
- Under EIP-7825, transactions requiring more gas than the cap should fail only if unexpected.
+ Determine whether the ModExp precompile call should succeed or fail. By
+ default, depending on the expected output, we assume it succeeds. Under
+ EIP-7825, transactions requiring more gas than the cap should fail only if
+ unexpected.
"""
if exceeds_tx_gas_cap and not expected_tx_cap_fail:
pytest.fail(
@@ -107,14 +108,16 @@ def gas_measure_contract(
call_succeeds: bool,
) -> Address:
"""
- Deploys a contract that measures ModExp gas consumption and execution result.
+ Deploys a contract that measures ModExp gas consumption and execution
+ result.
Always stored:
- storage[0]: precompile call success
- storage[1]: return data length from precompile
+ storage[0]: precompile call success
+ storage[1]: return data length from precompile
+
Only if the precompile call succeeds:
- storage[2]: gas consumed by precompile
- storage[3]: hash of return data from precompile
+ storage[2]: gas consumed by precompile
+ storage[3]: hash of return data from precompile
"""
assert call_opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]
value = [0] if call_opcode in [Op.CALL, Op.CALLCODE] else []
@@ -183,7 +186,10 @@ def gas_measure_contract(
def precompile_gas(
fork: Fork, modexp_input: ModExpInput, gas_old: int | None, gas_new: int | None
) -> int:
- """Calculate gas cost for the ModExp precompile and verify it matches expected gas."""
+ """
+ Calculate gas cost for the ModExp precompile and verify it matches expected
+ gas.
+ """
spec = Spec if fork < Osaka else Spec7883
try:
calculated_gas = spec.calculate_gas_cost(modexp_input)
@@ -199,8 +205,8 @@ def precompile_gas(
)
return calculated_gas
except Exception:
- # Used for `test_modexp_invalid_inputs` we expect the call to not succeed.
- # Return is for completeness.
+ # Used for `test_modexp_invalid_inputs` we expect the call to not
+ # succeed. Return is for completeness.
return 500 if fork >= Osaka else 200
@@ -228,7 +234,9 @@ def tx(
@pytest.fixture
def tx_gas_limit(total_tx_gas_needed: int, fork: Fork, env: Environment) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
return min(tx_gas_limit_cap, total_tx_gas_needed)
diff --git a/tests/osaka/eip7883_modexp_gas_increase/helpers.py b/tests/osaka/eip7883_modexp_gas_increase/helpers.py
index 62819fea4be..dc1e05536fb 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/helpers.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/helpers.py
@@ -31,7 +31,10 @@ class Vector(BaseModel):
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self):
- """Convert the test vector to a tuple that can be used as a parameter in a pytest test."""
+ """
+ Convert the test vector to a tuple that can be used as a parameter in a
+ pytest test.
+ """
return pytest.param(
self.modexp_input, self.modexp_expected, self.gas_old, self.gas_new, id=self.name
)
diff --git a/tests/osaka/eip7883_modexp_gas_increase/spec.py b/tests/osaka/eip7883_modexp_gas_increase/spec.py
index 74908e60b9e..bd399595aff 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/spec.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/spec.py
@@ -18,8 +18,8 @@ class ReferenceSpec:
def ceiling_division(a: int, b: int) -> int:
"""
- Calculate the ceil without using floating point.
- Used by many of the EVM's formulas.
+ Calculate the ceil without using floating point. Used by many of the EVM's
+ formulas.
"""
return -(a // -b)
@@ -63,9 +63,9 @@ def calculate_multiplication_complexity(cls, base_length: int, modulus_length: i
@classmethod
def calculate_iteration_count(cls, modexp_input: ModExpInput) -> int:
"""
- Calculate the iteration count of the ModExp precompile.
- This handles length mismatch cases by using declared lengths from the raw input
- and only the first 32 bytes of exponent data for iteration calculation.
+ Calculate the iteration count of the ModExp precompile. This handles
+ length mismatch cases by using declared lengths from the raw input and
+ only the first 32 bytes of exponent data for iteration calculation.
"""
_, exponent_length, _ = modexp_input.get_declared_lengths()
exponent_head = modexp_input.get_exponent_head()
@@ -83,8 +83,9 @@ def calculate_iteration_count(cls, modexp_input: ModExpInput) -> int:
@classmethod
def calculate_gas_cost(cls, modexp_input: ModExpInput) -> int:
"""
- Calculate the ModExp gas cost according to EIP-2565 specification, overridden by the
- constants within `Spec7883` when calculating for the EIP-7883 specification.
+ Calculate the ModExp gas cost according to EIP-2565 specification,
+ overridden by the constants within `Spec7883` when calculating for the
+ EIP-7883 specification.
"""
base_length, _, modulus_length = modexp_input.get_declared_lengths()
multiplication_complexity = cls.calculate_multiplication_complexity(
@@ -97,8 +98,8 @@ def calculate_gas_cost(cls, modexp_input: ModExpInput) -> int:
@dataclass(frozen=True)
class Spec7883(Spec):
"""
- Constants and helpers for the ModExp gas cost increase EIP.
- These override the original Spec class variables for EIP-7883.
+ Constants and helpers for the ModExp gas cost increase EIP. These override
+ the original Spec class variables for EIP-7883.
"""
MODEXP_ADDRESS = 0x05
@@ -110,7 +111,10 @@ class Spec7883(Spec):
@classmethod
def calculate_multiplication_complexity(cls, base_length: int, modulus_length: int) -> int:
- """Calculate the multiplication complexity of the ModExp precompile for EIP-7883."""
+ """
+ Calculate the multiplication complexity of the ModExp precompile for
+ EIP-7883.
+ """
max_length = max(base_length, modulus_length)
words = ceiling_division(max_length, cls.WORD_SIZE)
complexity = 16
diff --git a/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py b/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py
index 4132bbfdf60..9264d3db7f7 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py
@@ -1,6 +1,8 @@
"""
-abstract: Tests [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883)
- Test cases for [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
+EIP-7883 ModExp gas cost increase tests.
+
+Tests for ModExp gas cost increase in
+[EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
"""
from typing import Dict
@@ -75,8 +77,8 @@ def test_vectors_from_legacy_tests(
@pytest.mark.parametrize(
"modexp_input,",
[
- # These invalid inputs are from EIP-7823.
- # Ref: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7823.md#analysis
+ # These invalid inputs are from EIP-7823. Ref:
+ # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7823.md#analysis
pytest.param(
bytes.fromhex("9e5faafc"),
id="invalid-case-1",
@@ -254,7 +256,10 @@ def test_modexp_gas_usage_contract_wrapper(
tx: Transaction,
post: Dict,
):
- """Test ModExp gas cost with different gas modifiers using contract wrapper calls."""
+ """
+ Test ModExp gas cost with different gas modifiers using contract wrapper
+ calls.
+ """
state_test(pre=pre, tx=tx, post=post)
@@ -306,7 +311,10 @@ def test_modexp_used_in_transaction_entry_points(
tx_gas_limit: int,
call_values: int,
):
- """Test ModExp using in transaction entry points with different precompile gas modifiers."""
+ """
+ Test ModExp using in transaction entry points with different precompile gas
+ modifiers.
+ """
tx = Transaction(
to=Spec.MODEXP_ADDRESS,
sender=pre.fund_eoa(),
@@ -459,11 +467,10 @@ def create_modexp_variable_gas_test_cases():
"""
Create test cases for ModExp variable gas cost testing.
- Returns:
- List of pytest.param objects for the test cases
-
+ Returns: List of pytest.param objects for the test cases
"""
- # Test case definitions: (base, exponent, modulus, expected_result, gas_usage, test_id)
+ # Test case definitions: (base, exponent, modulus, expected_result,
+ # gas_usage, test_id)
test_cases = [
("", "", "", "", 500, "Z0"),
("01" * 32, "00" * 32, "", "", 500, "Z1"),
@@ -540,12 +547,15 @@ def create_modexp_variable_gas_test_cases():
# Gas calculation parameters:
#
- # Please refer to EIP-7883 for details of each function in the gas calculation.
+ # Please refer to EIP-7883 for details of each function in the gas
+ # calculation.
# Link: https://eips.ethereum.org/EIPS/eip-7883
#
# - calculate_multiplication_complexity:
- # - Comp: if max_length <= 32 bytes, it is Small (S), otherwise it is Large (L)
- # - Rel (Length Relation): base < modulus (<), base = modulus (=), base > modulus (>)
+ # - Comp: if max_length <= 32 bytes, it is Small (S), otherwise it is
+ # Large (L)
+ # - Rel (Length Relation): base < modulus (<), base = modulus (=),
+ # base > modulus (>)
#
# - calculate_iteration_count
# - Iter (Iteration Case):
@@ -555,68 +565,72 @@ def create_modexp_variable_gas_test_cases():
# - D: exp>32 and low256≠0
#
# - calculate_gas_cost
- # - Clamp: True if raw gas < 500 (clamped to 500), False if raw gas ≥ 500 (no clamping)
-
- # Test case coverage table:
- # ┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
- # │ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
- # ├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
- # │ Z0 │ - │ - │ - │ - │ 500 │ Zero case – empty inputs │
- # │ Z1 │ S │ - │ A │ True │ 500 │ Non-zero base, zero exp, empty modulus │
- # │ Z2 │ L │ - │ A │ False │ 32768 │ Large base (1024B), zero exp, empty modulus │
- # │ Z3 │ S │ - │ C │ False |253952 │ Base, large zero exp (1024B), empty modulus │
- # │ Z4 │ S │ - │ D │ False │253952 │ Base, large exp (last byte=1), empty modulus │
- # │ Z5 │ S │ < │ A │ True │ 500 │ Empty base/exp, non-zero modulus only │
- # │ Z6 │ S │ < │ B │ False │ 3968 │ Empty base, non-zero exp and modulus │
- # │ Z7 │ L │ < │ B │ False │ 32768 │ Empty base, small exp, large modulus │
- # │ S0 │ S │ = │ A │ True │ 500 │ Small, equal, zero exp, clamped │
- # │ S1 │ S │ = │ B │ True │ 500 │ Small, equal, small exp, clamped │
- # │ S2 │ S │ = │ B │ False │ 4080 │ Small, equal, large exp, unclamped │
- # │ S3 │ S │ = │ C │ False │ 2048 │ Small, equal, large exp + zero low256 │
- # │ S4 │ S │ = │ D │ False │ 2048 │ Small, equal, large exp + non-zero low256 │
- # │ S5 │ S │ > │ A │ True │ 500 │ Small, base > mod, zero exp, clamped │
- # │ S6 │ S │ < │ B │ True │ 500 │ Small, base < mod, small exp, clamped │
- # │ L0 │ L │ = │ A │ True │ 500 │ Large, equal, zero exp, clamped │
- # │ L1 │ L │ = │ B │ False │ 12750 │ Large, equal, large exp, unclamped │
- # │ L2 │ L │ = │ C │ False │ 6400 │ Large, equal, large exp + zero low256 │
- # │ L3 │ L │ = │ D │ False │ 6400 │ Large, equal, large exp + non-zero low256 │
- # │ L4 │ L │ > │ B │ True │ 500 │ Large, base > mod, small exp, clamped │
- # │ L5 │ L │ < │ C │ False │ 9216 │ Large, base < mod, large exp + zero low256 │
- # │ B1 │ L │ < │ B │ True │ 500 │ Cross 32-byte boundary (31/33) │
- # │ B2 │ L │ > │ B │ True │ 500 │ Cross 32-byte boundary (33/31) │
- # │ B4 │ L │ = │ B │ True │ 500 │ Just over 32-byte boundary │
- # │ Z8 │ S │ = │ A │ True │ 500 │ All zeros except modulus │
- # │ Z9 │ S │ = │ A │ True │ 500 │ Zero modulus special case │
- # │ Z10 │ S │ = │ B │ False │ 3968 │ Zero base, large exponent │
- # │ Z11 │ S │ = │ C │ True │ 500 │ Zero base, 33B zero exp, non-zero modulus │
- # │ Z12 │ S │ = │ C │ False |253952 │ Zero base, large zero exp, non-zero modulus │
- # │ Z13 │ L │ > │ A │ False │ 32768 │ Large zero base, zero exp, non-zero modulus │
- # │ Z14 │ S │ = │ C │ False |253952 │ Base, large zero exp, zero modulus │
- # │ Z15 │ L │ < │ B │ False │ 32768 │ Base, small exp, large zero modulus │
- # │ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
- # │ M1 │ L │ = │ D │ False │ 98176 │ Maximum values stress test │
- # │ M2 │ S │ = │ B │ True │ 500 │ Max base/mod, small exponent │
- # │ M3 │ L │ < │ D │ False │ 98176 │ Small base, max exponent/mod │
- # │ T2 │ S │ = │ B │ True │ 500 │ Tiny maximum values │
- # │ P2 │ S │ = │ B │ False │ 4080 │ High bit in exponent │
- # │ P3 │ L │ = │ D │ False │ 1150 │ Specific bit pattern in large exponent │
- # │ A1 │ L │ < │ C │ False │ 65536 │ Asymmetric: tiny base, large exp/mod │
- # │ A2 │ L │ > │ B │ True │ 500 │ Asymmetric: large base, tiny exp/mod │
- # │ A3 │ L │ > │ C │ False │ 65536 │ Asymmetric: large base/exp, tiny modulus │
- # │ W2 │ S │ = │ B │ True │ 500 │ Exactly 8-byte words │
- # │ E1 │ S │ = │ D │ True │ 500 │ Exponent exactly 33 bytes │
- # │ E2 │ S │ = │ B │ False │ 4080 │ High bit in exponent first byte │
- # │ E3 │ S │ = │ B │ True │ 500 │ High bit in exponent last byte │
- # │ E4 │ S │ = │ B │ False │ 4064 │ Maximum 32-byte exponent │
- # │ IC1 │ L │ = │ B │ True │ 500 │ Bit shift vs multiplication @ 33 bytes │
- # │ IC3 │ S │ = │ B │ True │ 500 │ Ceiling division at 7 bytes │
- # │ IC4 │ S │ = │ B │ True │ 500 │ Ceiling division at 9 bytes │
- # │ IC5 │ S │ = │ B │ False │ 2160 │ Bit counting in middle of exponent │
- # │ IC6 │ L │ = │ B │ True │ 500 │ Native library even byte optimization │
- # │ IC7 │ L │ = │ B │ True │ 500 │ Vector optimization 128-bit boundary │
- # │ IC9 │ S │ = │ B │ N/A │ N/A │ Zero modulus handling │
- # │ IC10│ S │ = │ B │ False │ 4080 │ Power-of-2 boundary with high bit │
- # └─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
+ # - Clamp: True if raw gas < 500 (clamped to 500), False if raw gas ≥ 500
+ # (no clamping)
+
+ """
+ Test case coverage table:
+
+ ┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
+ │ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
+ ├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
+ │ Z0 │ - │ - │ - │ - │ 500 │ Zero case – empty inputs │
+ │ Z1 │ S │ - │ A │ True │ 500 │ Non-zero base, zero exp, empty modulus │
+ │ Z2 │ L │ - │ A │ False │ 32768 │ Large base (1024B), zero exp, empty modulus │
+ │ Z3 │ S │ - │ C │ False |253952 │ Base, large zero exp (1024B), empty modulus │
+ │ Z4 │ S │ - │ D │ False │253952 │ Base, large exp (last byte=1), empty modulus │
+ │ Z5 │ S │ < │ A │ True │ 500 │ Empty base/exp, non-zero modulus only │
+ │ Z6 │ S │ < │ B │ False │ 3968 │ Empty base, non-zero exp and modulus │
+ │ Z7 │ L │ < │ B │ False │ 32768 │ Empty base, small exp, large modulus │
+ │ S0 │ S │ = │ A │ True │ 500 │ Small, equal, zero exp, clamped │
+ │ S1 │ S │ = │ B │ True │ 500 │ Small, equal, small exp, clamped │
+ │ S2 │ S │ = │ B │ False │ 4080 │ Small, equal, large exp, unclamped │
+ │ S3 │ S │ = │ C │ False │ 2048 │ Small, equal, large exp + zero low256 │
+ │ S4 │ S │ = │ D │ False │ 2048 │ Small, equal, large exp + non-zero low256 │
+ │ S5 │ S │ > │ A │ True │ 500 │ Small, base > mod, zero exp, clamped │
+ │ S6 │ S │ < │ B │ True │ 500 │ Small, base < mod, small exp, clamped │
+ │ L0 │ L │ = │ A │ True │ 500 │ Large, equal, zero exp, clamped │
+ │ L1 │ L │ = │ B │ False │ 12750 │ Large, equal, large exp, unclamped │
+ │ L2 │ L │ = │ C │ False │ 6400 │ Large, equal, large exp + zero low256 │
+ │ L3 │ L │ = │ D │ False │ 6400 │ Large, equal, large exp + non-zero low256 │
+ │ L4 │ L │ > │ B │ True │ 500 │ Large, base > mod, small exp, clamped │
+ │ L5 │ L │ < │ C │ False │ 9216 │ Large, base < mod, large exp + zero low256 │
+ │ B1 │ L │ < │ B │ True │ 500 │ Cross 32-byte boundary (31/33) │
+ │ B2 │ L │ > │ B │ True │ 500 │ Cross 32-byte boundary (33/31) │
+ │ B4 │ L │ = │ B │ True │ 500 │ Just over 32-byte boundary │
+ │ Z8 │ S │ = │ A │ True │ 500 │ All zeros except modulus │
+ │ Z9 │ S │ = │ A │ True │ 500 │ Zero modulus special case │
+ │ Z10 │ S │ = │ B │ False │ 3968 │ Zero base, large exponent │
+ │ Z11 │ S │ = │ C │ True │ 500 │ Zero base, 33B zero exp, non-zero modulus │
+ │ Z12 │ S │ = │ C │ False |253952 │ Zero base, large zero exp, non-zero modulus │
+ │ Z13 │ L │ > │ A │ False │ 32768 │ Large zero base, zero exp, non-zero modulus │
+ │ Z14 │ S │ = │ C │ False |253952 │ Base, large zero exp, zero modulus │
+ │ Z15 │ L │ < │ B │ False │ 32768 │ Base, small exp, large zero modulus │
+ │ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
+ │ M1 │ L │ = │ D │ False │ 98176 │ Maximum values stress test │
+ │ M2 │ S │ = │ B │ True │ 500 │ Max base/mod, small exponent │
+ │ M3 │ L │ < │ D │ False │ 98176 │ Small base, max exponent/mod │
+ │ T2 │ S │ = │ B │ True │ 500 │ Tiny maximum values │
+ │ P2 │ S │ = │ B │ False │ 4080 │ High bit in exponent │
+ │ P3 │ L │ = │ D │ False │ 1150 │ Specific bit pattern in large exponent │
+ │ A1 │ L │ < │ C │ False │ 65536 │ Asymmetric: tiny base, large exp/mod │
+ │ A2 │ L │ > │ B │ True │ 500 │ Asymmetric: large base, tiny exp/mod │
+ │ A3 │ L │ > │ C │ False │ 65536 │ Asymmetric: large base/exp, tiny modulus │
+ │ W2 │ S │ = │ B │ True │ 500 │ Exactly 8-byte words │
+ │ E1 │ S │ = │ D │ True │ 500 │ Exponent exactly 33 bytes │
+ │ E2 │ S │ = │ B │ False │ 4080 │ High bit in exponent first byte │
+ │ E3 │ S │ = │ B │ True │ 500 │ High bit in exponent last byte │
+ │ E4 │ S │ = │ B │ False │ 4064 │ Maximum 32-byte exponent │
+ │ IC1 │ L │ = │ B │ True │ 500 │ Bit shift vs multiplication @ 33 bytes │
+ │ IC3 │ S │ = │ B │ True │ 500 │ Ceiling division at 7 bytes │
+ │ IC4 │ S │ = │ B │ True │ 500 │ Ceiling division at 9 bytes │
+ │ IC5 │ S │ = │ B │ False │ 2160 │ Bit counting in middle of exponent │
+ │ IC6 │ L │ = │ B │ True │ 500 │ Native library even byte optimization │
+ │ IC7 │ L │ = │ B │ True │ 500 │ Vector optimization 128-bit boundary │
+ │ IC9 │ S │ = │ B │ N/A │ N/A │ Zero modulus handling │
+ │ IC10│ S │ = │ B │ False │ 4080 │ Power-of-2 boundary with high bit │
+ └─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
+ """ # noqa: W505
for base, exponent, modulus, expected_result, gas_usage, test_id in test_cases:
yield pytest.param(
ModExpInput(base=base, exponent=exponent, modulus=modulus),
@@ -662,13 +676,14 @@ def test_modexp_variable_gas_cost(
@pytest.mark.valid_from("Berlin")
def test_modexp_variable_gas_cost_exceed_tx_gas_cap(state_test, pre, tx, post):
"""
- Test ModExp variable gas cost.
- Inputs with an expected gas cost over the EIP-7825 tx gas cap.
- """
- # Test case coverage table (gas cap):
- # ┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
- # │ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
- # ├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
- # │ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
- # └─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
+ Test ModExp variable gas cost. Inputs with an expected gas cost over the
+ EIP-7825 tx gas cap.
+
+ Test case coverage table (gas cap):
+ ┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
+ │ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
+ ├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
+ │ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
+ └─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
+ """ # noqa: W505
state_test(pre=pre, tx=tx, post=post)
diff --git a/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py b/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py
index 5c4499ace62..5d779ae627c 100644
--- a/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py
+++ b/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py
@@ -1,4 +1,6 @@
-"""Test ModExp gas cost transition from EIP-7883 before and after the Osaka hard fork."""
+"""
+Test ModExp gas cost transition from EIP-7883 before & after the Osaka fork.
+"""
import pytest
@@ -38,7 +40,10 @@ def test_modexp_fork_transition(
modexp_input: ModExpInput,
modexp_expected: bytes,
):
- """Test ModExp gas cost transition from EIP-7883 before and after the Osaka hard fork."""
+ """
+ Test ModExp gas cost transition from EIP-7883 before and after the Osaka
+ hard fork.
+ """
call_code = Op.CALL(
address=Spec.MODEXP_ADDRESS,
args_size=Op.CALLDATASIZE,
diff --git a/tests/osaka/eip7918_blob_reserve_price/conftest.py b/tests/osaka/eip7918_blob_reserve_price/conftest.py
index 027cf971349..4f5f9d81bd5 100644
--- a/tests/osaka/eip7918_blob_reserve_price/conftest.py
+++ b/tests/osaka/eip7918_blob_reserve_price/conftest.py
@@ -56,7 +56,9 @@ def parent_excess_blob_gas(
parent_excess_blobs: int | None,
blob_gas_per_blob: int,
) -> int | None:
- """Calculate the excess blob gas of the parent block from the excess blobs."""
+ """
+ Calculate the excess blob gas of the parent block from the excess blobs.
+ """
if parent_excess_blobs is None:
return None
assert parent_excess_blobs >= 0
@@ -68,7 +70,8 @@ def blobs_per_tx() -> int:
"""
Total number of blobs per transaction.
- Can be overloaded by a test case to provide a custom blobs per transaction count.
+ Can be overloaded by a test case to provide a custom blobs per transaction
+ count.
"""
return 1
@@ -85,7 +88,10 @@ def block_base_fee_per_gas(
parent_excess_blobs: int | None,
block_base_fee_per_gas_delta: int,
) -> int:
- """Block base fee per gas. Default is 7 unless a delta is provided or overloaded."""
+ """
+ Block base fee per gas. Default is 7 unless a delta is provided or
+ overloaded.
+ """
if block_base_fee_per_gas_delta != 0:
if parent_excess_blobs is None:
blob_base_fee = 1
@@ -106,7 +112,8 @@ def excess_blob_gas(
block_base_fee_per_gas: int,
) -> int | None:
"""
- Calculate the excess blob gas of the block under test from the parent block.
+ Calculate the excess blob gas of the block under test from the parent
+ block.
Value can be overloaded by a test case to provide a custom excess blob gas.
"""
@@ -127,7 +134,8 @@ def correct_excess_blob_gas(
block_base_fee_per_gas: int,
) -> int:
"""
- Calculate the correct excess blob gas of the block under test from the parent block.
+ Calculate the correct excess blob gas of the block under test from the
+ parent block.
Should not be overloaded by a test case.
"""
@@ -193,7 +201,9 @@ def env(
block_base_fee_per_gas: int,
genesis_excess_blob_gas: int,
) -> Environment:
- """Prepare the environment of the genesis block for all blockchain tests."""
+ """
+ Prepare the environment of the genesis block for all blockchain tests.
+ """
return Environment(
excess_blob_gas=genesis_excess_blob_gas,
blob_gas_used=0,
diff --git a/tests/osaka/eip7918_blob_reserve_price/spec.py b/tests/osaka/eip7918_blob_reserve_price/spec.py
index 32c162bb45b..60d480da5af 100644
--- a/tests/osaka/eip7918_blob_reserve_price/spec.py
+++ b/tests/osaka/eip7918_blob_reserve_price/spec.py
@@ -20,8 +20,8 @@ class ReferenceSpec:
@dataclass(frozen=True)
class Spec(EIP4844Spec):
"""
- Parameters from the EIP-7918 specifications.
- Extends EIP-4844 spec with the new reserve price constant and functionality.
+ Parameters from the EIP-7918 specifications. Extends EIP-4844 spec with the
+ new reserve price constant and functionality.
"""
BLOB_BASE_COST = 2**13
@@ -50,6 +50,8 @@ def calc_effective_blob_base_fee(
base_fee_per_gas: int,
blob_base_fee: int,
) -> int:
- """Calculate the effective blob base fee considering the reserve price."""
+ """
+ Calculate the effective blob base fee considering the reserve price.
+ """
reserve_price = cls.get_reserve_price(base_fee_per_gas)
return max(reserve_price, blob_base_fee)
diff --git a/tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py b/tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py
index 1b1d902b316..820f19b54a4 100644
--- a/tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py
+++ b/tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py
@@ -1,8 +1,9 @@
"""
-abstract: [EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918)
- Test the blob base fee reserve price mechanism for [EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
+ [EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
-""" # noqa: E501
+Test the blob base fee reserve price mechanism for
+[EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
+"""
from typing import Dict, List
@@ -145,7 +146,10 @@ def test_reserve_price_various_base_fee_scenarios(
block: Block,
post: Dict[Address, Account],
):
- """Test reserve price mechanism across various block base fee and excess blob gas scenarios."""
+ """
+ Test reserve price mechanism across various block base fee and excess blob
+ gas scenarios.
+ """
blockchain_test(
pre=pre,
post=post,
@@ -156,7 +160,8 @@ def test_reserve_price_various_base_fee_scenarios(
@pytest.mark.parametrize_by_fork(
"parent_excess_blobs",
- # Keep max assuming this will be greater than 20 in the future, to test a blob fee of > 1 :)
+ # Keep max assuming this will be greater than 20 in the future, to test a
+ # blob fee of > 1 :)
lambda fork: [0, 3, fork.target_blobs_per_block(), fork.max_blobs_per_block()],
)
@pytest.mark.parametrize("block_base_fee_per_gas_delta", [-2, -1, 0, 1, 10, 100])
@@ -168,17 +173,30 @@ def test_reserve_price_boundary(
post: Dict[Address, Account],
):
"""
- Tests the reserve price boundary mechanism. Note the default block base fee per gas is 7 (delta is 0).
- With a non zero delta the block base fee per gas is set to (boundary * blob base fee) + delta.
-
- Example scenarios from parametrization, assume parent_excess_blobs = 3:
- delta=-2: blob_base_fee=1, boundary=8, block_base_fee_per_gas=8+(-2)=6, 6 < 8, reserve inactive, effective_fee=1
- delta=0: blob_base_fee=1, boundary=8, block_base_fee_per_gas=7, 7 < 8, reserve inactive, effective_fee=1
- delta=100: blob_base_fee=1, boundary=8, block_base_fee_per_gas=8+100=108, 108 > 8, reserve active, effective_fee=max(108/8, 1)=13
-
- All values give a blob base_ fee of 1 because we need a much higher excess blob gas
- to increase the blob fee. This only increases to 2 at 20 excess blobs.
- """ # noqa: E501
+ Tests the reserve price boundary mechanism. Note the default block base fee
+ per gas is 7 (delta is 0). With a non zero delta the block base fee per gas
+ is set to (boundary * blob base fee) + delta.
+
+ Example scenarios from parametrization:
+ Assume
+ parent_excess_blobs = 3:
+ delta=-2:
+ blob_base_fee=1,
+ boundary=8,
+ block_base_fee_per_gas=8+(-2)=6, 6 < 8,
+ reserve inactive,
+ effective_fee=1 delta=0:
+ blob_base_fee=1, boundary=8,
+ block_base_fee_per_gas=7, 7 < 8,
+ reserve inactive, effective_fee=1
+ delta=100: blob_base_fee=1,
+ boundary=8, block_base_fee_per_gas=8+100=108, 108 > 8,
+ reserve active, effective_fee=max(108/8, 1)=13
+
+ All values give a blob base_ fee of 1 because we need a much higher excess
+ blob gas to increase the blob fee. This only increases to 2 at 20 excess
+ blobs.
+ """
blockchain_test(
genesis_environment=env,
pre=pre,
diff --git a/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py b/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py
index 07e1d69711b..b52dce2943b 100644
--- a/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py
+++ b/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py
@@ -1,8 +1,7 @@
"""
-abstract: [EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918)
- Test the blob base fee reserve price mechanism for [EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
-
-""" # noqa: E501
+ [EIP-7918: Blob base fee bounded by execution
+cost](https://eips.ethereum.org/EIPS/eip-7918).
+"""
import pytest
diff --git a/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py b/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py
index 7bf2883b265..b41b9eb298f 100644
--- a/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py
+++ b/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py
@@ -99,8 +99,8 @@ def genesis_base_fee_per_gas(
parent_base_fee_per_gas: int,
) -> int:
"""Genesis base fee per gas."""
- # Base fee always drops from genesis to block 1 because the genesis block never uses
- # any tx gas.
+ # Base fee always drops from genesis to block 1 because the genesis block
+ # never uses any tx gas.
return (parent_base_fee_per_gas * fork.base_fee_max_change_denominator()) // 7
@@ -151,10 +151,12 @@ def env(
) -> Environment:
"""Environment for the test."""
return Environment(
- # Excess blob gas always drops from genesis to block 1 because genesis uses no blob gas.
+ # Excess blob gas always drops from genesis to block 1 because genesis
+ # uses no blob gas.
excess_blob_gas=genesis_excess_blob_gas,
base_fee_per_gas=genesis_base_fee_per_gas,
- gas_limit=16_000_000, # To make it easier to reach the requirement with a single tx
+ gas_limit=16_000_000, # To make it easier to reach the requirement
+ # with a single tx
)
@@ -234,8 +236,8 @@ def parent_block_txs(
"""
Transactions included in the block prior to the fork transition fork.
- Includes blob transactions to raise the `parent_blob_gas_used` and normal transactions
- to raise/lower the base fee per gas.
+ Includes blob transactions to raise the `parent_blob_gas_used` and normal
+ transactions to raise/lower the base fee per gas.
"""
parent_block_blob_txs = get_blob_transactions(
blob_count=parent_blob_count,
@@ -386,8 +388,8 @@ def target_blob_gas_per_block(self) -> int:
def calculate_excess_blob_gas(self, parent_header: ParentHeader) -> int:
"""
- Calculate the excess blob gas for the current block based
- on the gas used in the parent block.
+ Calculate the excess blob gas for the current block based on the gas
+ used in the parent block.
"""
excess_blob_gas_calculator = self.fork.excess_blob_gas_calculator(timestamp=self.timestamp)
return excess_blob_gas_calculator(
@@ -400,8 +402,8 @@ def execution_base_fee_threshold_from_excess_blob_gas(
self, excess_blob_gas: int
) -> int | None:
"""
- Return the minimum base fee required to trigger the reserve mechanism, or None
- for blob schedules that don't have a reserve price mechanism.
+ Return the minimum base fee required to trigger the reserve mechanism,
+ or None for blob schedules that don't have a reserve price mechanism.
"""
if self.blob_base_cost is None:
return None
@@ -414,8 +416,8 @@ def execution_base_fee_threshold_from_excess_blob_gas(
def get_fork_scenarios(fork: Fork) -> Iterator[ParameterSet]:
"""
- Return the list of scenarios at the fork boundary depending on the source fork and
- transition fork properties.
+ Return the list of scenarios at the fork boundary depending on the source
+ fork and transition fork properties.
"""
source_blob_schedule = BlobSchedule(fork=fork, timestamp=0)
transition_blob_schedule = BlobSchedule(fork=fork, timestamp=15_000)
@@ -439,16 +441,16 @@ def get_fork_scenarios(fork: Fork) -> Iterator[ParameterSet]:
source_execution_threshold != transition_execution_threshold
and transition_execution_threshold is not None
):
- # The source base fee reserve threshold is different from the transition one
- # given the excess blob gas.
- # We can verify that the BPO is activated correctly by using the a setup block
- # with transition_execution_threshold to trigger the reserve.
+ # The source base fee reserve threshold is different from the
+ # transition one given the excess blob gas. We can verify that the
+ # BPO is activated correctly by using the a setup block with
+ # transition_execution_threshold to trigger the reserve.
for source_blob_count in [0, source_blob_schedule.target, source_blob_schedule.max]:
- # Scenario 1: Parent base fee per gas is below the threshold at the
- # parent of the transition block, so even though the base fee increases on
- # the transition block to reach the value required to activate the reserve,
- # since the base fee per gas of the parent is used, the reserve must not be
- # activated.
+ # Scenario 1: Parent base fee per gas is below the threshold at
+ # the parent of the transition block, so even though the base
+ # fee increases on the transition block to reach the value
+ # required to activate the reserve, since the base fee per gas
+ # of the parent is used, the reserve must not be activated.
parent_base_fee = transition_execution_threshold - 1
transition_base_fee = transition_execution_threshold
parent_header = ParentHeader(
@@ -477,9 +479,9 @@ def get_fork_scenarios(fork: Fork) -> Iterator[ParameterSet]:
),
)
- # Scenario 2: Parent base fee per gas is at the threshold, so the reserve
- # is activated even though the base fee per gas decreases below the
- # threshold on the transition block.
+ # Scenario 2: Parent base fee per gas is at the threshold, so
+ # the reserve is activated even though the base fee per gas
+ # decreases below the threshold on the transition block.
parent_base_fee = transition_execution_threshold
transition_base_fee = transition_execution_threshold - 1
parent_header = ParentHeader(
@@ -509,7 +511,8 @@ def get_fork_scenarios(fork: Fork) -> Iterator[ParameterSet]:
)
if fork == BPO2ToBPO3AtTime15k:
- # Explicitly add the exact scenario that triggered the Fusaka Devnet-4 fork.
+ # Explicitly add the exact scenario that triggered the Fusaka Devnet-4
+ # fork.
yield pytest.param(
0x32,
0x125BF5F,
@@ -541,7 +544,10 @@ def test_reserve_price_at_transition(
transition_block: Block,
env: Environment,
):
- """Test reserve price mechanism across various block base fee and excess blob gas scenarios."""
+ """
+ Test reserve price mechanism across various block base fee and excess blob
+ gas scenarios.
+ """
blockchain_test(
pre=pre,
post={},
diff --git a/tests/osaka/eip7934_block_rlp_limit/__init__.py b/tests/osaka/eip7934_block_rlp_limit/__init__.py
index 5c84ada8706..f2bdbe02266 100644
--- a/tests/osaka/eip7934_block_rlp_limit/__init__.py
+++ b/tests/osaka/eip7934_block_rlp_limit/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934)
- Test cases for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
+Tests for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
"""
diff --git a/tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py b/tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py
index 575c8c002c9..8fb3550b717 100644
--- a/tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py
+++ b/tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py
@@ -1,6 +1,5 @@
"""
-abstract: Test [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934)
- Tests for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
+Tests for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
"""
from functools import lru_cache
@@ -59,7 +58,9 @@ def block_size_limit(fork: Fork) -> int:
@pytest.fixture
def block_errors() -> List[BlockException]:
- """Block exceptions expected for blocks that exceed the `MAX_RLP_BLOCK_SIZE`."""
+ """
+ Block exceptions expected for blocks that exceed the `MAX_RLP_BLOCK_SIZE`.
+ """
return [BlockException.RLP_BLOCK_LIMIT_EXCEEDED]
@@ -121,8 +122,8 @@ def exact_size_transactions(
"""
Generate transactions that fill a block to exactly the RLP size limit.
- The calculation uses caching to avoid recalculating the same block rlp for each
- fork. Calculate the block and fill with real sender for testing.
+ The calculation uses caching to avoid recalculating the same block rlp for
+ each fork. Calculate the block and fill with real sender for testing.
Args:
sender: The sender account
@@ -131,7 +132,8 @@ def exact_size_transactions(
pre: Required if emit_logs is True, used to deploy the log contract
gas_limit: The gas limit for the block
emit_logs: If True, transactions will call a contract that emits logs
- specific_transaction_to_include: If provided, this transaction will be included
+ specific_transaction_to_include: If provided, this transaction will
+ be included
"""
log_contract = None
@@ -190,8 +192,8 @@ def _exact_size_transactions_cached(
emit_logs_contract: Address | None = None,
) -> Tuple[List[Transaction], int]:
"""
- Generate transactions that fill a block to exactly the RLP size limit. Abstracted
- with hashable arguments for caching block calculations.
+ Generate transactions that fill a block to exactly the RLP size limit.
+ Abstracted with hashable arguments for caching block calculations.
"""
return _exact_size_transactions_impl(
block_size_limit,
@@ -212,8 +214,8 @@ def _exact_size_transactions_impl(
emit_logs_contract: Address | None = None,
) -> Tuple[List[Transaction], int]:
"""
- Calculate the exact size of transactions to be included. Shared by both cached and
- non-cached paths.
+ Calculate the exact size of transactions to be included. Shared by both
+ cached and non-cached paths.
"""
transactions = []
nonce = 0
@@ -224,9 +226,10 @@ def _exact_size_transactions_impl(
data_large = Bytes(b"\x00" * 500_000)
gas_limit_large = calculator(calldata=data_large)
- # block with 16 transactions + large calldata remains safely below the limit
- # add 15 generic transactions to fill the block and one typed transaction
- # if tx_type is specified, otherwise just add 16 generic transactions
+ # block with 16 transactions + large calldata remains safely below the
+ # limit add 15 generic transactions to fill the block and one typed
+ # transaction if tx_type is specified, otherwise just add 16 generic
+ # transactions
not_all_generic_txs = any(
kwarg is not None for kwarg in [specific_transaction_to_include, emit_logs_contract]
)
@@ -492,7 +495,10 @@ def test_block_at_rlp_limit_with_logs(
fork: Fork,
block_size_limit: int,
):
- """Test that a block at the RLP size limit is valid even when transactions emit logs."""
+ """
+ Test that a block at the RLP size limit is valid even when transactions
+ emit logs.
+ """
transactions, gas_used = exact_size_transactions(
sender,
block_size_limit,
diff --git a/tests/osaka/eip7939_count_leading_zeros/__init__.py b/tests/osaka/eip7939_count_leading_zeros/__init__.py
index 23769774cfe..1e9f1a4156c 100644
--- a/tests/osaka/eip7939_count_leading_zeros/__init__.py
+++ b/tests/osaka/eip7939_count_leading_zeros/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939)
- Test cases for [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939).
+Tests [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939).
"""
diff --git a/tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py b/tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py
index 04cb4f2a26e..f2592a81898 100644
--- a/tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py
+++ b/tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py
@@ -1,6 +1,5 @@
"""
-abstract: Tests [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939)
- Test cases for [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939).
+Tests [EIP-7939: Count leading zeros (CLZ)](https://eips.ethereum.org/EIPS/eip-7939).
"""
import pytest
@@ -97,7 +96,8 @@ def test_clz_opcode_scenarios(
Test CLZ opcode functionality.
Cases:
- - Format 0xb000...111: leading zeros followed by ones (2**256 - 1 >> bits)
+ - Format 0xb000...111: leading zeros followed by ones
+ (2**256 - 1 >> bits)
- Format 0xb010...000: single bit set at position (1 << bits)
Test coverage:
@@ -186,7 +186,9 @@ def test_clz_gas_cost_boundary(
@EIPChecklist.Opcode.Test.StackComplexOperations.StackHeights.Zero()
@pytest.mark.valid_from("Osaka")
def test_clz_stack_underflow(state_test: StateTestFiller, pre: Alloc):
- """Test CLZ opcode with empty stack (should revert due to stack underflow)."""
+ """
+ Test CLZ opcode with empty stack (should revert due to stack underflow).
+ """
sender = pre.fund_eoa()
callee_address = pre.deploy_contract(
code=Op.CLZ + Op.STOP, # No stack items, should underflow
@@ -330,9 +332,12 @@ def test_clz_fork_transition(blockchain_test: BlockchainTestFiller, pre: Alloc):
),
callee_address: Account(
storage={
- 14_999: "0xdeadbeef", # CLZ not valid before fork, storage unchanged
- 15_000: 155, # CLZ valid on transition block, CLZ(1 << 100) = 155
- 15_001: 155, # CLZ continues to be valid after transition
+ # CLZ not valid before fork, storage unchanged
+ 14_999: "0xdeadbeef",
+ # CLZ valid on transition block, CLZ(1 << 100) = 155
+ 15_000: 155,
+ # CLZ continues to be valid after transition
+ 15_001: 155,
}
),
},
@@ -468,7 +473,8 @@ def test_clz_code_copy_operation(state_test: StateTestFiller, pre: Alloc, bits:
address=target_address, dest_offset=0, offset=clz_code_offset, size=1
)
)
- + Op.SSTORE(storage.store_next(mload_value), Op.MLOAD(0)) # Store loaded CLZ byte
+ # Store loaded CLZ byte
+ + Op.SSTORE(storage.store_next(mload_value), Op.MLOAD(0))
),
storage={"0x00": "0xdeadbeef"},
)
@@ -505,8 +511,9 @@ def test_clz_with_memory_operation(state_test: StateTestFiller, pre: Alloc, bits
# MSTORE
#
# This sequence stores a 32-byte value in memory.
- # Later, we copy the immediate value from the PUSH32 instruction into memory
- # using CODECOPY or EXTCODECOPY, and then load it with MLOAD for the CLZ test.
+ # Later, we copy the immediate value from the PUSH32 instruction into
+ # memory using CODECOPY or EXTCODECOPY, and then load it with MLOAD for
+ # the CLZ test.
target_code = Op.PUSH32(1 << bits)
offset = 1
diff --git a/tests/osaka/eip7951_p256verify_precompiles/__init__.py b/tests/osaka/eip7951_p256verify_precompiles/__init__.py
index dc20209ead1..a5fa2c7cfc6 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/__init__.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)
- Test cases for [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)].
+Tests [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
"""
diff --git a/tests/osaka/eip7951_p256verify_precompiles/conftest.py b/tests/osaka/eip7951_p256verify_precompiles/conftest.py
index 17833d4408f..179eebd87ca 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/conftest.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/conftest.py
@@ -16,9 +16,11 @@ def vector_gas_value() -> int | None:
"""
Gas value from the test vector if any.
- If `None` it means that the test scenario did not come from a file, so no comparison is needed.
+ If `None` it means that the test scenario did not come from a file, so no
+ comparison is needed.
- The `vectors_from_file` function reads the gas value from the file and overwrites this fixture.
+ The `vectors_from_file` function reads the gas value from the file and
+ overwrites this fixture.
"""
return None
@@ -38,9 +40,10 @@ def precompile_gas_modifier() -> int:
"""
Modify the gas passed to the precompile, for testing purposes.
- By default the call is made with the exact gas amount required for the given opcode,
- but when this fixture is overridden, the gas amount can be modified to, e.g., test
- a lower amount and test if the precompile call fails.
+ By default the call is made with the exact gas amount required for the
+ given opcode, but when this fixture is overridden, the gas amount can be
+ modified to, e.g., test a lower amount and test if the precompile call
+ fails.
"""
return 0
@@ -59,7 +62,10 @@ def call_opcode() -> Op:
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed.
- Note: Fixture `call_contract_code` fills the actual expected storage values.
+
+ Note:
+ Fixture `call_contract_code` fills the actual expected storage values.
+
"""
return Storage()
@@ -67,8 +73,8 @@ def call_contract_post_storage() -> Storage:
@pytest.fixture
def call_succeeds() -> bool:
"""
- By default, depending on the expected output, we can deduce if the call is expected to succeed
- or fail.
+ By default, depending on the expected output, we can deduce if the call is
+ expected to succeed or fail.
"""
return True
@@ -130,7 +136,9 @@ def post(call_contract_address: Address, call_contract_post_storage: Storage):
@pytest.fixture
def tx_gas_limit(fork: Fork, input_data: bytes, precompile_gas: int) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
diff --git a/tests/osaka/eip7951_p256verify_precompiles/helpers.py b/tests/osaka/eip7951_p256verify_precompiles/helpers.py
index c220f0e517d..320f04e4e30 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/helpers.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/helpers.py
@@ -9,7 +9,10 @@
def current_python_script_directory(*args: str) -> str:
- """Get the current Python script directory, optionally appending additional path components."""
+ """
+ Get the current Python script directory, optionally appending additional
+ path components.
+ """
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
@@ -24,7 +27,10 @@ class Vector(BaseModel):
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self):
- """Convert the test vector to a tuple that can be used as a parameter in a pytest test."""
+ """
+ Convert the test vector to a tuple that can be used as a parameter in a
+ pytest test.
+ """
return pytest.param(self.input, self.expected, self.gas, id=self.name)
@@ -38,7 +44,10 @@ class FailVector(BaseModel):
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self):
- """Convert the test vector to a tuple that can be used as a parameter in a pytest test."""
+ """
+ Convert the test vector to a tuple that can be used as a parameter in a
+ pytest test.
+ """
return pytest.param(self.input, id=self.name)
diff --git a/tests/osaka/eip7951_p256verify_precompiles/spec.py b/tests/osaka/eip7951_p256verify_precompiles/spec.py
index 84532af2e82..eacb89633bc 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/spec.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/spec.py
@@ -104,7 +104,8 @@ class Spec:
INVALID_RETURN_VALUE = b""
DELEGATION_DESIGNATION = Bytes("ef0100")
- # Test constants (from https://github.com/C2SP/wycheproof/blob/4a6c2bf5dc4c0b67c770233ad33961ee653996a0/testvectors/ecdsa_secp256r1_sha256_test.json#L35)
+ # Test constants, from:
+ # https://github.com/C2SP/wycheproof/blob/4a6c2bf5dc4c0b67c770233ad33961ee653996a0/testvectors/ecdsa_secp256r1_sha256_test.json#L35
H0 = H(0xBB5A52F42F9C9261ED4361F59422A1E30036E7C32B270C8807A419FECA605023)
R0 = R(0x2BA3A8BE6B94D5EC80A6D9D1190A436EFFE50D85A1EEE859B8CC6AF9BD5C2E18)
S0 = S(0x4CD60B855D442F5B3C7B11EB6C4E0AE7525FE710FAB9AA7C77A67F79E6FADD76)
diff --git a/tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py b/tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py
index bd188a84ca1..e908fe23107 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py
@@ -1,6 +1,5 @@
"""
-abstract: Tests [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)
- Test cases for [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)].
+Tests for [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
"""
import pytest
@@ -36,8 +35,9 @@
+ vectors_from_file("secp256r1_u1_u2.json")
+ vectors_from_file("secp256r1_k_and_s.json")
+ vectors_from_file("secp256r1_public_key.json"),
- # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256 test suite, valid cases
- # Source: https://github.com/C2SP/wycheproof/blob/main/testvectors/ecdsa_secp256r1_sha256_test.json
+ # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256 test
+ # suite, valid cases are from this source:
+ # https://github.com/C2SP/wycheproof/blob/main/testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@@ -52,15 +52,20 @@ def test_wycheproof_valid(state_test: StateTestFiller, pre: Alloc, post: dict, t
"input_data,expected_output,vector_gas_value",
vectors_from_file("secp256r1_special_case_r_s.json")
+ vectors_from_file("secp256r1_modified_r_s.json"),
- # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256 test suite, invalid cases
- # Source: https://github.com/C2SP/wycheproof/blob/main/testvectors/ecdsa_secp256r1_sha256_test.json
+ # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256
+ # test suite, invalid cases
+ # Source: https://github.com/C2SP/wycheproof/blob/main/
+ # testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@EIPChecklist.Precompile.Test.Inputs.Invalid()
@EIPChecklist.Precompile.Test.Inputs.MaxValues()
def test_wycheproof_invalid(state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction):
- """Test P256Verify precompile with Wycheproof test suite (invalid cases)."""
+ """
+ Test P256Verify precompile with Wycheproof test suite
+ (invalid cases).
+ """
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@@ -68,15 +73,19 @@ def test_wycheproof_invalid(state_test: StateTestFiller, pre: Alloc, post: dict,
"input_data,expected_output,vector_gas_value",
vectors_from_file("secp256r1_small_large_r_s.json")
+ vectors_from_file("secp256r1_special_points.json"),
- # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256 test suite,
- # valid/invalid cases
- # Source: https://github.com/C2SP/wycheproof/blob/main/testvectors/ecdsa_secp256r1_sha256_test.json
+ # Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256
+ # test suite, valid/invalid cases
+ # Source: https://github.com/C2SP/wycheproof/blob/main/
+ # testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@EIPChecklist.Precompile.Test.Inputs.MaxValues()
def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction):
- """Test P256Verify precompile with Wycheproof test suite (mixed valid/invalid cases)."""
+ """
+ Test P256Verify precompile with Wycheproof test suite
+ (mixed valid/invalid cases).
+ """
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@@ -169,11 +178,14 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
id="near_field_boundary_p_minus_3",
),
pytest.param(
- # Invalid curve attack: This point satisfies y² = x³ - 3x + 1 (mod p)
- # instead of the correct P-256 equation y² = x³ - 3x + b where
- # b = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B
- # This tests that the implementation properly validates the curve equation
- # and rejects points on different curves (CVE-2020-0601 class vulnerability)
+ # Invalid curve attack: This point satisfies y² = x³ - 3x + 1 (mod
+ # p) instead of the correct P-256 equation y² = x³ - 3x + b where
+ # b = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53...
+ # ...B0F63BCE3C3E27D2604B
+ #
+ # This tests that the implementation properly validates the curve
+ # equation and rejects points on different curves (CVE-2020-0601
+ # class vulnerability)
Spec.H0
+ Spec.R0
+ Spec.S0
@@ -183,9 +195,12 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: Singular curve with b = 0
- # Point satisfies y² = x³ - 3x (mod p) - a singular/degenerate curve
- # Singular curves have discriminant = 0 and provide no security guarantees
- # This tests rejection of points on curves with catastrophic security failures
+ # Point satisfies y² = x³ - 3x (mod p) - a singular/degenerate
+ # curve
+ # Singular curves have discriminant = 0 and provide no security
+ # guarantees.
+ # This tests rejection of points on curves with catastrophic
+ # security failures
Spec.H0
+ Spec.R0
+ Spec.S0
@@ -196,8 +211,11 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
pytest.param(
# Invalid curve attack: Boundary value b = p-1
# Point satisfies y² = x³ - 3x + (p-1) (mod p)
- # Tests proper parameter validation at modular arithmetic boundaries
- # Ensures implementations handle field arithmetic edge cases correctly
+ #
+ # Tests proper parameter validation at
+ # modular arithmetic boundaries.
+ # Ensures implementations handle field arithmetic edge cases
+ # correctly.
Spec.H0
+ Spec.R0
+ Spec.S0
@@ -208,8 +226,11 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
pytest.param(
# Invalid curve attack: Small discriminant curve with b = 2
# Point satisfies y² = x³ - 3x + 2 (mod p)
- # Curves with small discriminants are vulnerable to specialized attacks
- # Tests rejection of cryptographically weak curve parameters
+ #
+ # Curves with small discriminants are vulnerable to specialized
+ # attacks.
+ #
+ # Tests rejection of cryptographically weak curve parameters.
Spec.H0 + Spec.R0 + Spec.S0 + X(0x1) + Y(0x0),
id="invalid_curve_attack_small_discriminant",
),
@@ -228,7 +249,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
pytest.param(
# Invalid curve attack: Composite order curve with b = -Spec.B
# Random point which satisfies y² = x³ - 3x - Spec.B (mod p)
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0xC223E1538C4D7B5BBD3EF932736826FD64F4E8B5C80250D9E07A728689D13C38)
+ R(0x0C7CB59EF6BE7539397CC979AD9A87A3B73A0DD268BBA4990A3378C6391512D5)
+ S(0xF8C943685BCFE7864C0F8485CACD732D3A9F167531CAF26B67A3CB10B641F92C)
@@ -239,7 +261,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
pytest.param(
# Invalid curve attack: Composite order curve with b = -Spec.B
# Random point which satisfies y² = x³ - 3x - Spec.B (mod p)
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x982D25BF8E0E81FF41AC3C8033604C78ED5EF17C6EDDA977072EAB6821A7AD0A)
+ R(0x7C1996FA0EC911E4739AE7340B5345823272F494DFA32034A4FE5642C3DB91F2)
+ S(0x1E4D6CCF1AFB675D18BD27274770C8B84028D272D1D2641E70B30E1DF17AF3DC)
@@ -249,7 +272,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0)
+ R(0xD21697149F598FEAE9A750DCA86AE6D5EFA654680BA748D2DF7053115101C129)
+ S(0xEF3FD943AD1F126B3EBA1A5900D79886755DB6DAFCB6B0117D86364340CE36CC)
@@ -259,7 +283,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0)
+ R(0x52E47C5D6AAB66AB6A18A694359EB86FDD40F10E79EF5493C5469EC88BA03334)
+ S(0x7584C5BF3CA2869C7E383B1603A935EEB79D990B7F7152E055EC562E87FD715E)
@@ -269,7 +294,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0)
+ R(0x81333B13B13F362253BD536D17563A72EB575F1993F55ED40E633E503F60B864)
+ S(0xE2208C4045F5241ECCF08F825399224C4B78595A10433EC33799DCAD7B0E1F4A)
@@ -279,7 +305,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0)
+ R(0x3C593B5857D1D0EB83923D73E76A7A53EF191BB210267D8C0BE17A4E34AB2E73)
+ S(0xD022359310067882F713AFBECECE71CB80E4857368F46AB0346362DB033ED298)
@@ -289,7 +316,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0)
+ R(0x425CFFCA652791CABFC81B1E4B7712DBA196599FABCE16978E06E6AF486B1FEC)
+ S(0x58B864B5A41CD17524E4773EC353C9590D792F601DA075AD9B3F40E8E7070E8A)
@@ -299,7 +327,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x2DA0A74BE3122AEAEF5704D0EB27881FBFB918B4A5252B660935263D0569BA92)
+ R(0x5543729CBCFD99EE6C3B422D7F245903E7177B3A6A4E3C20C0DC5F5E109795AE)
+ S(0x96403D5BB253EBD7DEF44BCBC062FCD4EA5E358B19B67C13E625EFDF6B977597)
@@ -309,7 +338,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x1F9D9B26DB42380C85F075174DDAF158F9DE4CD10C3104190D7AF96938DD8ECD)
+ R(0x159946DBC4F1DE68CD4096862A5B10E5986ACB32229D6E68884DC83DAB70A307)
+ S(0x63D80724A4074421F7DD255630794E3AEBE635B756D72B24652AAC07D01B289C)
@@ -319,7 +349,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0xD380DA9251F1FB809ED48C70DC8F81E91C471F0E81BC95E7611C653278A5B6B4)
+ R(0xFF197EB72A9E531B17B872525247E6564B786CC014ED28B6849CE7D8C976BDF2)
+ S(0x7B0B2EFF9BB5409052B35FD3FF81DCE77D95A1F75C46989817045120DA5C3C9C)
@@ -329,7 +360,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x4B082B60497ED87FFE570612D521E73A2CD6C832744EF8E4E2E329E30D3D5879)
+ R(0x6665A88CB3FF30D339A1975FD46CF5EF480A68A093AB778550073D3528C3B609)
+ S(0xAEAADDB235E4AC6097356DB96161E27849EA8EDF1E971F74EB51E19A1CC950A1)
@@ -339,7 +371,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x6CC2B605CFBDB22B9E7B55EFE8C1DA0F1C5A0EC1AA8D82EEDFB5EA70E9846E88)
+ R(0x3C593B5857D1D0EB83923D73E76A7A53EF191BB210267D8C0BE17A4E34AB2E73)
+ S(0xD022359310067882F713AFBECECE71CB80E4857368F46AB0346362DB033ED298)
@@ -349,7 +382,8 @@ def test_wycheproof_extra(state_test: StateTestFiller, pre: Alloc, post: dict, t
),
pytest.param(
# Invalid curve attack: random point bytes.
- # Without the curve check in the implementation, the signature checks out.
+ # Without the curve check in the implementation,
+ # the signature checks out.
H(0x810C1D53EA96A700C93F6AF1C183197B040EA6FEAE10564877A1C78EC6074FF1)
+ R(0x34D0F0C8E14D39002B5DEA00808957963E849503DDFD626323433047D696C7C4)
+ S(0x6A7FE39C046304317F799FB900877073F2AE3C798DD4414795551A833ABCBA85)
@@ -530,10 +564,12 @@ def test_precompile_will_return_success_with_tx_value(
# This tests the modular comparison: r' ≡ r (mod N)
pytest.param(
Spec.H0
- # R: A value that when used in ECDSA verification produces an x-coordinate > N
+ # R: A value that when used in ECDSA verification produces an
+ # x-coordinate > N
+ R(0x000000000000000000000000000000004319055358E8617B0C46353D039CDAAB)
+ S(0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC63254E)
- # X, Y: Public key coordinates that will produce x-coordinate > N during verification
+ # X, Y: Public key coordinates that will produce x-coordinate > N
+ # during verification
+ X(0x0AD99500288D466940031D72A9F5445A4D43784640855BF0A69874D2DE5FE103)
+ Y(0xC5011E6EF2C42DCD50D5D3D29F99AE6EBA2C80C9244F4C5422F0979FF0C3BA5E),
Spec.SUCCESS_RETURN_VALUE,
@@ -545,7 +581,8 @@ def test_precompile_will_return_success_with_tx_value(
+ Spec.S0
+ Spec.X0
+ Spec.Y0,
- Spec.INVALID_RETURN_VALUE, # Should fail because R = 1 is not a valid signature
+ Spec.INVALID_RETURN_VALUE, # Should fail because R = 1 is not a
+ # valid signature
id="r_equals_n_plus_one",
),
pytest.param(
@@ -554,7 +591,8 @@ def test_precompile_will_return_success_with_tx_value(
+ Spec.S0
+ Spec.X0
+ Spec.Y0,
- Spec.INVALID_RETURN_VALUE, # Should fail because R = 2 is not a valid signature
+ Spec.INVALID_RETURN_VALUE, # Should fail because R = 2 is not a
+ # valid signature
id="r_equals_n_plus_two",
),
],
@@ -566,9 +604,9 @@ def test_modular_comparison(state_test: StateTestFiller, pre: Alloc, post: dict,
"""
Test the modular comparison condition for secp256r1 precompile.
- This tests that when the x-coordinate of R' exceeds the curve order N,
- the verification should use modular arithmetic:
- r' ≡ r (mod N) instead of direct equality r' == r.
+ This tests that when the x-coordinate of R' exceeds the curve order N, the
+ verification should use modular arithmetic: r' ≡ r (mod N) instead of
+ direct equality r' == r.
"""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
diff --git a/tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py b/tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py
index b331b55c369..9f8f692ad30 100644
--- a/tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py
+++ b/tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py
@@ -1,7 +1,11 @@
"""
-abstract: Tests P256VERIFY precompiles of [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)
- Tests P256VERIFY precompiles of [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951)
- before the Osaka hard fork is active.
+Tests P256VERIFY precompiles of [EIP-7951: Precompile for secp256r1
+Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
+
+Tests P256VERIFY
+precompiles of [EIP-7951: Precompile for secp256r1 Curve
+Support](https://eips.ethereum.org/EIPS/eip-7951) before the Osaka hard fork is
+active.
"""
import pytest
diff --git a/tests/paris/security/test_selfdestruct_balance_bug.py b/tests/paris/security/test_selfdestruct_balance_bug.py
index 1122d2f460f..99bed4248a4 100644
--- a/tests/paris/security/test_selfdestruct_balance_bug.py
+++ b/tests/paris/security/test_selfdestruct_balance_bug.py
@@ -1,13 +1,15 @@
"""
-bug: Tests the Consensus Flaw During Block Processing related to SELFDESTRUCT
- Tests the consensus-vulnerability reported in
- [go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4).
+Tests the Consensus Flaw During Block Processing related to SELFDESTRUCT.
+
+Tests the consensus-vulnerability reported in
+[go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4](https://github.com/ethere
+um/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4).
To reproduce the issue with this test case:
1. Fill the test with the most recent geth evm version.
-2. Run the fixture output within a vulnerable geth version: v1.9.20 > geth >=
- v1.9.4.
+2. Run the fixture output within a vulnerable geth version:
+ v1.9.20 > geth >= v1.9.4.
"""
import pytest
@@ -33,18 +35,18 @@ def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, pre:
`0xaa` contract after executing specific transactions.
1. Start with contract `0xaa` which has initial balance of 3 wei.
- `0xaa` contract code simply performs a self-destruct to itself.
+ `0xaa` contract code simply performs a self-destruct to itself.
- 2. Send a transaction (tx 1) to invoke caller contract `0xcc` (which
- has a balance of 1 wei), which in turn invokes `0xaa` with a 1 wei call.
+ 2. Send a transaction (tx 1) to invoke caller contract `0xcc` (which has a
+ balance of 1 wei), which in turn invokes `0xaa` with a 1 wei call.
- 3. Store the balance of `0xaa` after the first transaction
- is processed. `0xaa` self-destructed. Expected outcome: 0 wei.
+ 3. Store the balance of `0xaa` after the first transaction is processed.
+ `0xaa` self-destructed. Expected outcome: 0 wei.
4. Send another transaction (tx 2) to call 0xaa with 5 wei.
- 5. Store the balance of `0xaa` after the second transaction
- is processed. No self-destruct. Expected outcome: 5 wei.
+ 5. Store the balance of `0xaa` after the second transaction is processed.
+ No self-destruct. Expected outcome: 5 wei.
6. Verify that:
- Call within tx 1 is successful, i.e `0xaa` self-destructed.
diff --git a/tests/prague/__init__.py b/tests/prague/__init__.py
index 9a0cd1ac6c8..7a26fa5de34 100644
--- a/tests/prague/__init__.py
+++ b/tests/prague/__init__.py
@@ -1,5 +1,6 @@
"""
-Test cases for EVM functionality introduced in Prague, [EIP-7600: Hardfork Meta - Pectra](https://eip.directory/eips/eip-7600).
+Test cases for EVM functionality introduced in Prague, [EIP-7600: Hardfork Meta
+- Pectra](https://eip.directory/eips/eip-7600).
Devnet Specifications:
@@ -8,4 +9,4 @@
- [ethpandaops/pectra-devnet-3](https://notes.ethereum.org/@ethpandaops/pectra-devnet-3).
- [ethpandaops/pectra-devnet-2](https://notes.ethereum.org/@ethpandaops/pectra-devnet-2).
- [ethpandaops/pectra-devnet-1](https://notes.ethereum.org/@ethpandaops/pectra-devnet-1).
-""" # noqa: E501
+"""
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/__init__.py b/tests/prague/eip2537_bls_12_381_precompiles/__init__.py
index 2cede194058..bc90dfa9752 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/__init__.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests for [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/conftest.py b/tests/prague/eip2537_bls_12_381_precompiles/conftest.py
index a4780065df7..8ae554bff0b 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/conftest.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/conftest.py
@@ -17,9 +17,11 @@ def vector_gas_value() -> int | None:
"""
Gas value from the test vector if any.
- If `None` it means that the test scenario did not come from a file, so no comparison is needed.
+ If `None` it means that the test scenario did not come from a file, so no
+ comparison is needed.
- The `vectors_from_file` function reads the gas value from the file and overwrites this fixture.
+ The `vectors_from_file` function reads the gas value from the file and
+ overwrites this fixture.
"""
return None
@@ -42,9 +44,10 @@ def precompile_gas_modifier() -> int:
"""
Modify the gas passed to the precompile, for testing purposes.
- By default the call is made with the exact gas amount required for the given opcode,
- but when this fixture is overridden, the gas amount can be modified to, e.g., test
- a lower amount and test if the precompile call fails.
+ By default the call is made with the exact gas amount required for the
+ given opcode, but when this fixture is overridden, the gas amount can be
+ modified to, e.g., test a lower amount and test if the precompile call
+ fails.
"""
return 0
@@ -63,7 +66,10 @@ def call_opcode() -> Op:
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed.
- Note: Fixture `call_contract_code` fills the actual expected storage values.
+
+ Note:
+ Fixture `call_contract_code` fills the actual expected storage values.
+
"""
return Storage()
@@ -73,8 +79,8 @@ def call_succeeds(
expected_output: bytes | SupportsBytes,
) -> bool:
"""
- By default, depending on the expected output, we can deduce if the call is expected to succeed
- or fail.
+ By default, depending on the expected output, we can deduce if the call is
+ expected to succeed or fail.
"""
return len(bytes(expected_output)) > 0
@@ -93,24 +99,23 @@ def call_contract_code(
Code of the test contract.
Args:
- precompile_address:
- Address of the precompile to call.
- precompile_gas:
- Gas cost for the precompile, which is automatically calculated by the `precompile_gas`
- fixture, but can be overridden in the test.
- precompile_gas_modifier:
- Gas cost modifier for the precompile, which is automatically set to zero by the
- `precompile_gas_modifier` fixture, but can be overridden in the test.
- expected_output:
- Expected output of the precompile call. This value is used to determine if the call is
- expected to succeed or fail.
- call_succeeds:
- Boolean that indicates if the call is expected to succeed or fail.
- call_opcode:
- Type of call used to call the precompile (Op.CALL, Op.CALLCODE, Op.DELEGATECALL,
- Op.STATICCALL).
- call_contract_post_storage:
- Storage of the test contract after the transaction is executed.
+ precompile_address: Address of the precompile to call.
+ precompile_gas: Gas cost for the precompile, which is automatically
+ calculated by the `precompile_gas` fixture, but can
+ be overridden in the test.
+ precompile_gas_modifier: Gas cost modifier for the precompile, which
+ is automatically set to zero by the
+ `precompile_gas_modifier` fixture, but
+ can be overridden in the test.
+ expected_output: Expected output of the precompile call.
+ This value is used to determine if the call is
+ expected to succeed or fail.
+ call_succeeds: Boolean that indicates if the call is expected to
+ succeed or fail.
+ call_opcode: Type of call used to call the precompile (Op.CALL,
+ Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL).
+ call_contract_post_storage: Storage of the test contract after the
+ transaction is executed.
"""
expected_output = bytes(expected_output)
@@ -177,7 +182,9 @@ def post(call_contract_address: Address, call_contract_post_storage: Storage):
@pytest.fixture
def tx_gas_limit(fork: Fork, input_data: bytes, precompile_gas: int) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/helpers.py b/tests/prague/eip2537_bls_12_381_precompiles/helpers.py
index afadc8bb601..0fe15471524 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/helpers.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/helpers.py
@@ -14,7 +14,10 @@
def current_python_script_directory(*args: str) -> str:
- """Get the current Python script directory, optionally appending additional path components."""
+ """
+ Get the current Python script directory, optionally appending additional
+ path components.
+ """
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
@@ -29,7 +32,10 @@ class Vector(BaseModel):
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self):
- """Convert the test vector to a tuple that can be used as a parameter in a pytest test."""
+ """
+ Convert the test vector to a tuple that can be used as a parameter in a
+ pytest test.
+ """
return pytest.param(self.input, self.expected, self.gas, id=self.name)
@@ -43,7 +49,10 @@ class FailVector(BaseModel):
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self):
- """Convert the test vector to a tuple that can be used as a parameter in a pytest test."""
+ """
+ Convert the test vector to a tuple that can be used as a parameter in a
+ pytest test.
+ """
return pytest.param(self.input, id=self.name)
@@ -71,6 +80,7 @@ def vectors_from_file(filename: str) -> List:
def add_points_g1(point_a: PointG1, point_b: PointG1) -> PointG1:
"""
Add two points in G1 using standard formulas.
+
For points P = (x, y) and Q = (u, v), compute R = P + Q.
"""
if point_a.x == 0 and point_a.y == 0:
@@ -88,7 +98,9 @@ def add_points_g1(point_a: PointG1, point_b: PointG1) -> PointG1:
def add_points_g2(point_a: PointG2, point_b: PointG2) -> PointG2:
"""
Add two points in G2 using standard formulas.
- For points P = ((x_0, x_1), (y_0, y_1)) and Q = ((u_0, u_1), (v_0, v_1)), compute R = P + Q.
+
+ For points P = ((x_0, x_1), (y_0, y_1)) and
+ Q = ((u_0, u_1), (v_0, v_1)), compute R = P + Q.
"""
if point_a.x == (0, 0) and point_a.y == (0, 0):
return point_b
@@ -112,10 +124,11 @@ class BLSPointGenerator:
- on the standard curve
- in the correct r-order subgroup or not
- on the curve or not
- - on an isomorphic curve (not standard curve) but in the correct r-order subgroup
+ - on an isomorphic curve (not standard curve) but in the correct
+ r-order subgroup
Additional resource that helped the class implementation:
- https://hackmd.io/@benjaminion/bls12-381
+ https://hackmd.io/@benjaminion/bls12-381
"""
# Constants for G1 curve equations
@@ -125,7 +138,8 @@ class BLSPointGenerator:
# This is a known parameter of the BLS12-381 curve specification
STANDARD_B_G1 = Spec.B_COEFFICIENT
- # Isomorphic G1 curve uses b=24 (can be any b value for an isomorphic curve)
+ # Isomorphic G1 curve uses b=24 (can be any b value for an isomorphic
+ # curve)
ISOMORPHIC_B_G1 = 24 # Isomorphic curve: y^2 = x^3 + 24
# Constants for G2 curve equations
@@ -192,8 +206,8 @@ def check_in_g2_subgroup(point: PointG2) -> bool:
@staticmethod
def sqrt_fq(a: FQ) -> Optional[FQ]:
"""
- Compute smallest square root of FQ element (if it exists). Used when finding valid
- y-coordinates for a given x-coordinate on the G1 curve.
+ Compute smallest square root of FQ element (if it exists). Used when
+ finding valid y-coordinates for a given x-coordinate on the G1 curve.
"""
assert field_modulus % 4 == 3, "This sqrt method requires p % 4 == 3"
candidate = a ** ((field_modulus + 1) // 4)
@@ -206,8 +220,8 @@ def sqrt_fq(a: FQ) -> Optional[FQ]:
@staticmethod
def sqrt_fq2(a: FQ2) -> Optional[FQ2]:
"""
- Compute square root of FQ2 element (if it exists). Used when finding valid
- y-coordinates for a given x-coordinate on the G2 curve.
+ Compute square root of FQ2 element (if it exists). Used when finding
+ valid y-coordinates for a given x-coordinate on the G2 curve.
"""
if a == FQ2([0, 0]):
return FQ2([0, 0])
@@ -222,8 +236,9 @@ def sqrt_fq2(a: FQ2) -> Optional[FQ2]:
@classmethod
def multiply_by_cofactor(cls, point: Any, is_g2: bool = False):
"""
- Multiply a point by the cofactor to ensure it's in the correct r-order subgroup.
- Used for creating points in the correct r-order subgroup when using isomorphic curves.
+ Multiply a point by the cofactor to ensure it's in the correct r-order
+ subgroup. Used for creating points in the correct r-order subgroup when
+ using isomorphic curves.
"""
cofactor = cls.G2_COFACTOR if is_g2 else cls.G1_COFACTOR
try:
@@ -251,8 +266,8 @@ def multiply_by_cofactor(cls, point: Any, is_g2: bool = False):
@memory.cache
def find_g1_point_by_x(cls, x_value: int, in_subgroup: bool, on_curve: bool = True) -> PointG1:
"""
- Find a G1 point with x-coordinate at or near the given value,
- with the specified subgroup membership and curve membership.
+ Find a G1 point with x-coordinate at or near the given value, with the
+ specified subgroup membership and curve membership.
"""
max_offset = 5000
isomorphic_b = cls.ISOMORPHIC_B_G1
@@ -267,7 +282,8 @@ def find_g1_point_by_x(cls, x_value: int, in_subgroup: bool, on_curve: bool = Tr
try:
x = FQ(try_x)
- # Calculate y² = x³ + b (standard curve or isomorphic curve)
+ # Calculate y² = x³ + b (standard curve or isomorphic
+ # curve)
b_value = cls.STANDARD_B_G1 if on_curve else isomorphic_b
y_squared = x**3 + FQ(b_value)
@@ -280,7 +296,8 @@ def find_g1_point_by_x(cls, x_value: int, in_subgroup: bool, on_curve: bool = Tr
raw_point = (int(x), int(y))
raw_point2 = (int(x), Spec.P - int(y))
- # For isomorphic curve points in subgroup, apply cofactor multiplication
+ # For isomorphic curve points in subgroup, apply cofactor
+ # multiplication
if not on_curve and in_subgroup:
try:
subgroup_point = cls.multiply_by_cofactor(raw_point, is_g2=False)
@@ -327,8 +344,8 @@ def find_g2_point_by_x(
cls, x_value: tuple, in_subgroup: bool, on_curve: bool = True
) -> PointG2:
"""
- Find a G2 point with x-coordinate at or near the given value,
- with the specified subgroup membership and curve membership.
+ Find a G2 point with x-coordinate at or near the given value, with the
+ specified subgroup membership and curve membership.
"""
max_offset = 5000
isomorphic_b = cls.ISOMORPHIC_B_G2
@@ -344,7 +361,8 @@ def find_g2_point_by_x(
try:
x = FQ2(try_x)
- # Calculate y² = x³ + b (standard curve or isomorphic curve)
+ # Calculate y² = x³ + b (standard curve or isomorphic
+ # curve)
b_value = cls.STANDARD_B_G2 if on_curve else isomorphic_b
y_squared = x**3 + FQ2(b_value)
@@ -363,7 +381,8 @@ def find_g2_point_by_x(
(Spec.P - int(y.coeffs[0]), Spec.P - int(y.coeffs[1])),
)
- # For isomorphic curve points in subgroup, apply cofactor multiplication
+ # For isomorphic curve points in subgroup, apply cofactor
+ # multiplication
if not on_curve and in_subgroup:
try:
subgroup_point = cls.multiply_by_cofactor(raw_point, is_g2=True)
@@ -413,26 +432,36 @@ def find_g2_point_by_x(
# G1 points by x coordinate (near or on the x value)
@classmethod
def generate_g1_point_in_subgroup_by_x(cls, x_value: int) -> PointG1:
- """G1 point that is in the r-order subgroup with x-coordinate by/on the given value."""
+ """
+ G1 point that is in the r-order subgroup with x-coordinate by/on the
+ given value.
+ """
return cls.find_g1_point_by_x(x_value, in_subgroup=True, on_curve=True)
@classmethod
def generate_g1_point_not_in_subgroup_by_x(cls, x_value: int) -> PointG1:
- """G1 point that is NOT in the r-order subgroup with x-coordinate by/on the given value."""
+ """
+ G1 point that is NOT in the r-order subgroup with x-coordinate by/on
+ the given value.
+ """
return cls.find_g1_point_by_x(x_value, in_subgroup=False, on_curve=True)
@classmethod
def generate_g1_point_not_on_curve_by_x(cls, x_value: int) -> PointG1:
- """G1 point that is NOT on the curve with x-coordinate by/on the given value."""
+ """
+ G1 point that is NOT on the curve with x-coordinate by/on the given
+ value.
+ """
return cls.find_g1_point_by_x(x_value, in_subgroup=False, on_curve=False)
@classmethod
def generate_g1_point_on_isomorphic_curve_by_x(cls, x_value: int) -> PointG1:
"""
- G1 point that is on an isomorphic curve (not standard curve)
- but in the r-order subgroup with x-coordinate by/on the given value.
+ G1 point that is on an isomorphic curve (not standard curve) but in the
+ r-order subgroup with x-coordinate by/on the given value.
- Uses cofactor multiplication to ensure the point is in the correct subgroup.
+ Uses cofactor multiplication to ensure the point is in the correct
+ subgroup.
"""
return cls.find_g1_point_by_x(x_value, in_subgroup=True, on_curve=False)
@@ -464,10 +493,11 @@ def generate_random_g1_point_not_on_curve(cls, seed: int) -> PointG1:
@classmethod
def generate_random_g1_point_on_isomorphic_curve(cls, seed: int) -> PointG1:
"""
- Generate a random G1 point that is on an isomorphic curve (not standard curve)
- but in the r-order subgroup.
+ Generate a random G1 point that is on an isomorphic curve (not standard
+ curve) but in the r-order subgroup.
- Uses cofactor multiplication to ensure the point is in the correct subgroup.
+ Uses cofactor multiplication to ensure the point is in the correct
+ subgroup.
"""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"on_isomorphic_curve").digest()
@@ -477,26 +507,36 @@ def generate_random_g1_point_on_isomorphic_curve(cls, seed: int) -> PointG1:
# G2 point generators - by x coordinate (near or on the x value)
@classmethod
def generate_g2_point_in_subgroup_by_x(cls, x_value: tuple) -> PointG2:
- """G2 point that is in the r-order subgroup with x-coordinate by/on the given value."""
+ """
+ G2 point that is in the r-order subgroup with x-coordinate by/on the
+ given value.
+ """
return cls.find_g2_point_by_x(x_value, in_subgroup=True, on_curve=True)
@classmethod
def generate_g2_point_not_in_subgroup_by_x(cls, x_value: tuple) -> PointG2:
- """G2 point that is NOT in the r-order subgroup with x-coordinate by/on the given value."""
+ """
+ G2 point that is NOT in the r-order subgroup with x-coordinate by/on
+ the given value.
+ """
return cls.find_g2_point_by_x(x_value, in_subgroup=False, on_curve=True)
@classmethod
def generate_g2_point_not_on_curve_by_x(cls, x_value: tuple) -> PointG2:
- """G2 point that is NOT on the curve with x-coordinate by/on the given value."""
+ """
+ G2 point that is NOT on the curve with x-coordinate by/on the given
+ value.
+ """
return cls.find_g2_point_by_x(x_value, in_subgroup=False, on_curve=False)
@classmethod
def generate_g2_point_on_isomorphic_curve_by_x(cls, x_value: tuple) -> PointG2:
"""
- G2 point that is on an isomorphic curve (not standard curve)
- but in the r-order subgroup with x-coordinate near the given value.
+ G2 point that is on an isomorphic curve (not standard curve) but in the
+ r-order subgroup with x-coordinate near the given value.
- Uses cofactor multiplication to ensure the point is in the correct subgroup.
+ Uses cofactor multiplication to ensure the point is in the correct
+ subgroup.
"""
return cls.find_g2_point_by_x(x_value, in_subgroup=True, on_curve=False)
@@ -537,9 +577,9 @@ def generate_random_g2_point_not_on_curve(cls, seed: int) -> PointG2:
@classmethod
def generate_random_g2_point_on_isomorphic_curve(cls, seed: int) -> PointG2:
"""
- Generate a random G2 point that is on an isomorphic curve (not standard curve)
- but in the r-order subgroup.
- Uses cofactor multiplication to ensure the point is in the correct subgroup.
+ Generate a random G2 point that is on an isomorphic curve (not standard
+ curve) but in the r-order subgroup. Uses cofactor multiplication to
+ ensure the point is in the correct subgroup.
"""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"g2_on_isomorphic_curve").digest()
@@ -553,81 +593,127 @@ def generate_random_g2_point_on_isomorphic_curve(cls, seed: int) -> PointG2:
@classmethod
def generate_g1_map_isogeny_kernel_points(cls) -> List[FP]:
"""
- Return precomputed kernel points for the BLS12-381 G1 map to curve function. These map to
- the G1 identity point `Spec.INF_G1`. They are generated using sage math externally with the
- following script as its significantly faster than using `py_ecc` (200-1000x faster).
-
- For reference we can imagine the map to curve function as a simple 2 step process, where an
- input t value is mapped to a point on the auxiliary curve via a SWU map, and then that
- point is mapped to the BLS curve via an 11-isogeny. For reference:
- - https://eips.ethereum.org/assets/eip-2537/field_to_curve
+ Return precomputed kernel points for the BLS12-381 G1 map to curve
+ function. These map to the G1 identity point `Spec.INF_G1`. They are
+ generated using sage math externally with the following script as its
+ significantly faster than using `py_ecc` (200-1000x faster).
+
+ For reference we can imagine the map to curve function as a simple 2
+ step process, where an input t value is mapped to a point on the
+ auxiliary curve via a SWU map, and then that point is mapped to the BLS
+ curve via an 11-isogeny. For reference:
+ https://eips.ethereum.org/assets/eip-2537/field_to_curve
+
+ Note we cannot use sage math directly within EEST as it is not a pure
+ python library and requires an external dependency to be installed on
+ the system machine.
+
+ Thanks to @petertdavies (Peter Miller) for the sage math script to
+ generate these points: ```sage
+ q = 0x1A0111EA397FE69A4B1BA7B6434BACD764774B84F3
+ 8512BF6730D2A0F6B0F6241EABFFFEB153FFFFB9FEFFFFFFFFAAAB
- Note we cannot use sage math directly within EEST as it is not a pure python library and
- requires an external dependency to be installed on the system machine.
-
- Thanks to @petertdavies (Peter Miller) for the sage math script to generate these points:
- ```sage
- q = 0x1A0111EA397FE69A4B1BA7B6434BACD764774B84F38512BF6730D2A0F6B0F6241EABFFFEB153FFFFB9FEFFFFFFFFAAAB
Fq = GF(q)
E1 = EllipticCurve(Fq, (0, 4)) # BLS12-381 curve
- ISO_11_A = Fq(0x144698A3B8E9433D693A02C96D4982B0EA985383EE66A8D8E8981AEFD881AC98936F8DA0E0F97F5CF428082D584C1D)
- ISO_11_B = Fq(0x12E2908D11688030018B12E8753EEE3B2016C1F0F24F4070A0B9C14FCEF35EF55A23215A316CEAA5D1CC48E98E172BE0)
+
+ ISO_11_A = Fq(0x144698A3B8E9433D693A02C96D4982B0EA985383EE66A8D8E
+ 8981AEFD881AC98936F8DA0E0F97F5CF428082D584C1D)
+
+ ISO_11_B = Fq(0x12E2908D11688030018B12E8753EEE3B2016C1F0F24F4070A0B
+ 9C14FCEF35EF55A23215A316CEAA5D1CC48E98E172BE0)
+
ISO_11_Z = Fq(11)
+
Ei = EllipticCurve(Fq, (ISO_11_A, ISO_11_B))
- iso = EllipticCurveIsogeny(E=E1, kernel=None, codomain=Ei, degree=11).dual()
+
+ iso = EllipticCurveIsogeny(
+ E=E1,
+ kernel=None,
+ codomain=Ei,
+ degree=11).dual()
+
for (x, _) in iso.kernel_polynomial().roots():
discriminant = 1 - 4 / (ISO_11_A / ISO_11_B * x + 1)
if not discriminant.is_square():
continue
+
for sign in [1, -1]:
zt2 = (-1 + sign * discriminant.sqrt()) / 2
+
t2 = zt2 / ISO_11_Z
+
if t2.is_square():
t = t2.sqrt()
- assert x == -ISO_11_B / ISO_11_A * (1 + 1 / (ISO_11_Z**2 * t**4 + ISO_11_Z * t**2))
+ assert x == -ISO_11_B / ISO_11_A * (1 + 1 / (ISO_11_Z**2 *
+ t**4 + ISO_11_Z * t**2))
+
print(t)
```
- To reproduce, add the script contents to a file called `points.sage`, then run `sage points.sage`!
+ To reproduce, add the script contents to a file called `points.sage`,
+ then run `sage points.sage`!
- Please see the sage math installation guide to replicate:
- - https://doc.sagemath.org/html/en/installation/index.html
+ Please see the sage math installation guide to replicate: -
+ https://doc.sagemath.org/html/en/installation/index.html
- As G1 uses an 11-degree isogeny, its kernel contains exactly 11 points on the auxiliary
- curve that maps to the point at infinity on the BLS curve. This includes the point at
- infinity (doesn't concern us as the initial SWU map can never output infinity from any int
- t) and 10 other unique kernel points.
+ As G1 uses an 11-degree isogeny, its kernel contains exactly 11 points
+ on the auxiliary curve that maps to the point at infinity on the BLS
+ curve. This includes the point at infinity (doesn't concern us as the
+ initial SWU map can never output infinity from any int t) and 10 other
+ unique kernel points.
- These 10 other kernel points correspond to 5 x-coords on the curve (since each x-coord
- yields two points with y and -y). However, not all of these kernel points can be reached by
- the SWU map, which is why we only have 4 unique t values below.
+ These 10 other kernel points correspond to 5 x-coords on the curve
+ (since each x-coord yields two points with y and -y). However, not all
+ of these kernel points can be reached by the SWU map, which is why we
+ only have 4 unique t values below.
- The kernel polynomial has 5 roots (x-coords), and each root can potentially yield two
- t values that map to kernel points via the SWU function. Analysis shows that only 2 of
- these roots yield valid t values because the other 3 roots fail either the discriminant
- square check or the t^2 square check in the SWU inverse calculation. From these 2 valid
- roots, we get the 4 unique t values listed below.
+ The kernel polynomial has 5 roots (x-coords), and each root can
+ potentially yield two t values that map to kernel points via the SWU
+ function. Analysis shows that only 2 of these roots yield valid t
+ values because the other 3 roots fail either the discriminant square
+ check or the t^2 square check in the SWU inverse calculation. From
+ these 2 valid roots, we get the 4 unique t values listed below.
The roots and their properties are as follows:
- - Root 1 (x=3447232547282837364692125741875673748077489238391001187748258124039623697289612052402753422028380156396811587142615):
- Fails because its discriminant is not a square.
- - Root 2 (x=3086251397349454634226049654186198282625136597600255705376316455943570106637401671127489553534256598630507009270951):
- Fails because its discriminant is not a square.
- - Root 3 (x=2512099095366387796245759085729510986367032014959769672734622752070562589059815523018960565849753051338812932816014):
- Has a square discriminant, but both sign options yield t^2 values that are not squares.
- - Root 4 (x=2077344747421819657086473418925078480898358265217674456436079722467637536216749299440611432676849905020722484031356):
- Yields two valid t values:
- - 1731081574565817469321317449275278355306982786154072576198758675751495027640363897075486577327802192163339186341827
- - 861410691052762088300790587394810074303505896628048305535645284922135116676755956131724844456716837983264353875219
- - Root 5 (x=162902306530757011687648381458039960905879760854007434532151803806422383239905014872915974221245198317567396330740):
- Yields two valid t values:
- - 1006044755431560595281793557931171729984964515682961911911398807521437683216171091013202870577238485832047490326971
- - 1562001338336877267717400325455189014780228097985596277514975439801739125527323838522116502949589758528550231396418
-
- Additionally we also have the additive inverses of these t values, which are also valid
- kernel (non-unique) points. These are generated using the relationship:
- `(-t) mod p === (p - t) mod p`
- """ # noqa: E501
+ - Root 1
+ (x=3447232547282837364692125741875673748077489238391001187748258
+ 124039623697289612052402753422028380156396811587142615):
+ Fails because its discriminant is not a square.
+
+ - Root 2
+ (x=3086251397349454634226049654186198282625136597600255705376316
+ 455943570106637401671127489553534256598630507009270951):
+ Fails because its discriminant is not a square.
+
+ - Root 3
+ (x=2512099095366387796245759085729510986367032014959769672734622
+ 752070562589059815523018960565849753051338812932816014):
+ Has a square discriminant, but both sign options yield t^2 values
+ that are not squares.
+
+ - Root 4
+ (x=2077344747421819657086473418925078480898358265217674456436079
+ 722467637536216749299440611432676849905020722484031356):
+ Yields two valid t values:
+ - 173108157456581746932131744927527835530698278615407257619875
+ 8675751495027640363897075486577327802192163339186341827
+ and
+ - 861410691052762088300790587394810074303505896628048305535645
+ 284922135116676755956131724844456716837983264353875219
+
+ - Root 5
+ (x=1629023065307570116876483814580399609058797608540074345321518
+ 03806422383239905014872915974221245198317567396330740):
+ Yields two valid t values:
+ - 100604475543156059528179355793117172998496451568296191191139
+ 8807521437683216171091013202870577238485832047490326971
+ - 15620013383368772677174003254551890147802280979855962
+ 77514975439801739125527323838522116502949589758528550231396418
+
+ Additionally we also have the additive inverses of these t values,
+ which are also valid kernel (non-unique) points. These are generated
+ using the relationship: `(-t) mod p === (p - t) mod p`
+ """
unique_kernel_ts = [
1731081574565817469321317449275278355306982786154072576198758675751495027640363897075486577327802192163339186341827,
861410691052762088300790587394810074303505896628048305535645284922135116676755956131724844456716837983264353875219,
@@ -641,17 +727,20 @@ def generate_g1_map_isogeny_kernel_points(cls) -> List[FP]:
@classmethod
def generate_g2_map_isogeny_kernel_points(cls) -> List[FP2]:
"""
- Return precomputed kernel points for the BLS12-381 G2 map to curve function. These map to
- the G2 identity point `Spec.INF_G2`. They are generated using sage math externally with the
- following script as its significantly faster than using `py_ecc` (200-1000x faster).
-
- For reference we can imagine the map to curve function as a simple 2 step process, where an
- input t value is mapped to a point on the auxiliary curve via a SWU map, and then that
- point is mapped to the BLS curve via a 3-isogeny. For reference:
+ Return precomputed kernel points for the BLS12-381 G2 map to curve
+ function. These map to the G2 identity point `Spec.INF_G2`. They are
+ generated using sage math externally with the following script as its
+ significantly faster than using `py_ecc` (200-1000x faster).
+
+ For reference we can imagine the map to curve function as a simple 2
+ step process, where an input t value is mapped to a point on the
+ auxiliary curve via a SWU map, and then that point is mapped to the
+ BLS curve via a 3-isogeny. For reference:
- https://eips.ethereum.org/assets/eip-2537/field_to_curve
- Note we cannot use sage math directly within EEST as it is not a pure python library and
- requires an external dependency to be installed on the system machine.
+ Note we cannot use sage math directly within EEST as it is not a pure
+ python library and requires an external dependency to be installed on
+ the system machine.
```sage
q = 0x1A0111EA397FE69A4B1BA7B6434BACD764774B84F38512BF6730D2A0F6B0F6241EABFFFEB153FFFFB9FEFFFFFFFFAAAB
@@ -694,30 +783,37 @@ def generate_g2_map_isogeny_kernel_points(cls) -> List[FP2]:
print(t)
```
- Add the script contents to a file called `points.sage`, run `sage points.sage`!
+ Add the script contents to a file called `points.sage`, run `sage
+ points.sage`!
Please see the sage math installation guide to replicate:
- https://doc.sagemath.org/html/en/installation/index.html
- As G2 uses an 3-degree isogeny, its kernel contains exactly 3 points on the auxiliary
- curve that maps to the point at infinity on the BLS curve. This includes the point at
- infinity (doesn't concern us as the initial SWU map can never output infinity from any int
- t) and 2 other kernel points.
-
- These 2 other kernel points correspond to 1 x-coord on the curve (since each x-coord
- yields two points with y and -y). Note that this root yields two equal t values due
- to specific properties of the isogeny in Fp2.
-
- However, the G2 case is different from G1 and requires additional verification for y, we
- must check that the computed y^2 actually has a square root in Fp2. Unlike G1, the G2
- singular isogeny kernel polynomial root does not correspond to a valid point on the
- auxiliary curve due to the failure of the additional check.
-
- - Root 1 (x=6*u + 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559781):
- Fails because its y^2 is not a square in Fp2.
-
- Due to the failure of the first root, we have no valid kernel points in G2 that map to the
- point at infinity on the BLS curve. This is why we return an empty list here. It is kept
- for consistency with the G1 case, and documentation purposes.
- """ # noqa: E501
+ As G2 uses an 3-degree isogeny, its kernel contains exactly 3 points on
+ the auxiliary curve that maps to the point at infinity on the BLS
+ curve. This includes the point at infinity (doesn't concern us as the
+ initial SWU map can never output infinity from any int t) and 2 other
+ kernel points.
+
+ These 2 other kernel points correspond to 1 x-coord on the curve (since
+ each x-coord yields two points with y and -y). Note that this root
+ yields two equal t values due to specific properties of the isogeny in
+ Fp2.
+
+ However, the G2 case is different from G1 and requires additional
+ verification for y, we must check that the computed y^2 actually has a
+ square root in Fp2. Unlike G1, the G2 singular isogeny kernel
+ polynomial root does not correspond to a valid point on the auxiliary
+ curve due to the failure of the additional check.
+
+ - Root 1 (x=6*u +
+ 4002409555221667393417789825735904156556882819939007885332058136124031650490837
+ 864442687629129015664037894272559781): Fails because its y^2 is not a
+ square in Fp2.
+
+ Due to the failure of the first root, we have no valid kernel points in
+ G2 that map to the point at infinity on the BLS curve. This is why we
+ return an empty list here. It is kept for consistency with the G1 case,
+ and documentation purposes.
+ """ # noqa: E501, W505
return []
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/spec.py b/tests/prague/eip2537_bls_12_381_precompiles/spec.py
index 591ede19347..808bcf11c8c 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/spec.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/spec.py
@@ -161,7 +161,8 @@ class Spec:
]
# fmt: on
- # Test constants (from https://github.com/ethereum/bls12-381-tests/tree/eip-2537)
+ # Test constants from
+ # https://github.com/ethereum/bls12-381-tests/tree/eip-2537
P1 = PointG1( # random point in G1
0x112B98340EEE2777CC3C14163DEA3EC97977AC3DC5C70DA32E6E87578F44912E902CCEF9EFE28D4A78B8999DFBCA9426,
0x186B28D92356C4DFEC4B5201AD099DBDEDE3781F8998DDF929B4CD7756192185CA7B8F4EF7088F813270AC3D48868A21,
@@ -249,7 +250,10 @@ def msm_discount(group: BLS12Group, k: int) -> int:
def msm_gas_func_gen(
group: BLS12Group, len_per_pair: int, multiplication_cost: int
) -> Callable[[int], int]:
- """Generate a function that calculates the gas cost for the G1MSM and G2MSM precompiles."""
+ """
+ Generate a function that calculates the gas cost for the G1MSM and G2MSM
+ precompiles.
+ """
def msm_gas(input_length: int) -> int:
"""Calculate gas cost for the G1MSM and G2MSM precompiles."""
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py
index 4dab178a35f..6d6effdde05 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py
@@ -1,7 +1,10 @@
"""
-abstract: Tests BLS12_G1ADD precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G1ADD precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests BLS12_G1ADD precompile.
+
+Tests the BLS12_G1ADD precompile implementation from [EIP-2537:
+Precompile for BLS12-381 curve operations]
+(https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -26,8 +29,8 @@
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("add_G1_bls.json")
+ [
- # Identity (infinity) element test cases.
- # Checks that any point added to the identity element (INF) equals itself.
+ # Identity (infinity) element test cases. Checks that any point added
+ # to the identity element (INF) equals itself.
pytest.param(
Spec.G1 + Spec.INF_G1,
Spec.G1,
@@ -114,15 +117,16 @@
None,
id="point_plus_reflected_point",
),
- # Not in the r-order subgroup test cases.
- # Checks that any point on the curve but not in the subgroup is used for operations.
+ # Not in the r-order subgroup test cases. Checks that any point on the
+ # curve but not in the subgroup is used for operations.
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Spec.P1_NOT_IN_SUBGROUP,
Spec.P1_NOT_IN_SUBGROUP_TIMES_2,
None,
id="non_sub_plus_non_sub",
),
- pytest.param( # `P1_NOT_IN_SUBGROUP` has an small order subgroup of 3: 3P = INF.
+ pytest.param( # `P1_NOT_IN_SUBGROUP` has an small order subgroup of 3:
+ # 3P = INF.
Spec.P1_NOT_IN_SUBGROUP + Spec.P1_NOT_IN_SUBGROUP_TIMES_2,
Spec.INF_G1,
None,
@@ -164,7 +168,8 @@
None,
id="doubled_non_sub_plus_neg",
),
- # More not in the r-order subgroup test cases, but using random generated points.
+ # More not in the r-order subgroup test cases, but using random
+ # generated points.
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Spec.P1,
add_points_g1(G1_POINTS_NOT_IN_SUBGROUP[0], Spec.P1),
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py
index fb69d0e4060..8d5a4e89c33 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_G1MSM precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G1MSM precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Test the BLS12_G1MSM precompile.
+
+Test the BLS12_G1MSM precompile introduced in
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -69,7 +71,8 @@
None,
id="multiple_points_zero_scalar",
),
- # Cases with maximum discount table (test vector for gas cost calculation)
+ # Cases with maximum discount table (test vector for gas cost
+ # calculation)
pytest.param(
(Spec.P1 + Scalar(Spec.Q)) * (len(Spec.G1MSM_DISCOUNT_TABLE) - 1),
Spec.INF_G1,
@@ -173,7 +176,8 @@ def test_valid(
id="scalar_too_large",
),
pytest.param(
- Spec.G1 + Scalar(1).x.to_bytes(16, byteorder="big"), # Invalid scalar length
+ # Invalid scalar length
+ Spec.G1 + Scalar(1).x.to_bytes(16, byteorder="big"),
id="scalar_too_short",
),
pytest.param(
@@ -198,7 +202,8 @@ def test_valid(
id="y_above_p_pos_1",
),
],
- # Input length tests can be found in ./test_bls12_variable_length_input_contracts.py
+ # Input length tests can be found in
+ # ./test_bls12_variable_length_input_contracts.py
)
@pytest.mark.parametrize(
"precompile_gas_modifier", [100_000], ids=[""]
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py
index 721d63a50b5..9824f875bd4 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py
@@ -1,7 +1,10 @@
"""
-abstract: Tests BLS12_G1MUL precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G1MUL precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests BLS12_G1MUL precompile.
+
+Tests the BLS12_G1MUL precompile implementation from [EIP-2537:
+Precompile for BLS12-381 curve operations]
+(https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -84,8 +87,8 @@
pytest.param(
Spec.P1 + Scalar(2**256 - 1),
PointG1(
- 0x3DA1F13DDEF2B8B5A46CD543CE56C0A90B8B3B0D6D43DEC95836A5FD2BACD6AA8F692601F870CF22E05DDA5E83F460B, # noqa: E501
- 0x18D64F3C0E9785365CBDB375795454A8A4FA26F30B9C4F6E33CA078EB5C29B7AEA478B076C619BC1ED22B14C95569B2D, # noqa: E501
+ 0x3DA1F13DDEF2B8B5A46CD543CE56C0A90B8B3B0D6D43DEC95836A5FD2BACD6AA8F692601F870CF22E05DDA5E83F460B,
+ 0x18D64F3C0E9785365CBDB375795454A8A4FA26F30B9C4F6E33CA078EB5C29B7AEA478B076C619BC1ED22B14C95569B2D,
),
None,
id="max_scalar_times_point",
@@ -182,7 +185,8 @@ def test_valid(
id="swapped_coordinates_times_0",
),
pytest.param(
- PointG1(0x01, 0x07) + Scalar(0), # Point on wrong curve y^2 = x^3 + 5
+ # Point on wrong curve y^2 = x^3 + 5
+ PointG1(0x01, 0x07) + Scalar(0),
id="point_on_wrong_curve_times_0",
),
pytest.param(
@@ -294,7 +298,8 @@ def test_valid(
Spec.P1_NOT_IN_SUBGROUP + Scalar(Spec.Q + 1),
id="not_in_subgroup_times_q_plus_1",
),
- # More not in the r-order subgroup test cases, but using random generated points.
+ # More not in the r-order subgroup test cases, but using random
+ # generated points.
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_times_1",
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py
index 17282fdd926..71e71ed7ebc 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py
@@ -1,7 +1,10 @@
"""
-abstract: Tests BLS12_G2ADD precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G2ADD precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests BLS12_G2ADD precompile.
+
+Tests the BLS12_G2ADD precompile implementation from [EIP-2537:
+Precompile for BLS12-381 curve operations]
+(https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -26,8 +29,8 @@
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("add_G2_bls.json")
+ [
- # Identity (infinity) element test cases.
- # Checks that any point added to the identity element (INF) equals itself.
+ # Identity (infinity) element test cases. Checks that any point added
+ # to the identity element (INF) equals itself.
pytest.param(
Spec.G2 + Spec.INF_G2,
Spec.G2,
@@ -115,8 +118,8 @@
None,
id="point_plus_reflected_point",
),
- # Not in the r-order subgroup test cases.
- # Checks that any point on the curve but not in the subgroup is used for operations.
+ # Not in the r-order subgroup test cases. Checks that any point on the
+ # curve but not in the subgroup is used for operations.
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Spec.P2_NOT_IN_SUBGROUP,
Spec.P2_NOT_IN_SUBGROUP_TIMES_2,
@@ -165,7 +168,8 @@
None,
id="doubled_non_sub_plus_neg",
),
- # More not in the r-order subgroup test cases, but using random generated points.
+ # More not in the r-order subgroup test cases, but using random
+ # generated points.
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[0] + Spec.P2,
add_points_g2(G2_POINTS_NOT_IN_SUBGROUP[0], Spec.P2),
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py
index c27b8f5599d..87a9cf77851 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_G2MSM precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G2MSM precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Test the BLS12_G2MSM precompile.
+
+Test the BLS12_G2MSM precompile introduced in
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -69,7 +71,8 @@
None,
id="multiple_points_zero_scalar",
),
- # Cases with maximum discount table (test vector for gas cost calculation)
+ # Cases with maximum discount table (test vector for gas cost
+ # calculation)
pytest.param(
(Spec.P2 + Scalar(Spec.Q)) * (len(Spec.G2MSM_DISCOUNT_TABLE) - 1),
Spec.INF_G2,
@@ -171,7 +174,8 @@ def test_valid(
id="scalar_too_large",
),
pytest.param(
- Spec.G2 + Scalar(1).x.to_bytes(16, byteorder="big"), # Invalid scalar length
+ # Invalid scalar length
+ Spec.G2 + Scalar(1).x.to_bytes(16, byteorder="big"),
id="scalar_too_short",
),
pytest.param(
@@ -224,7 +228,8 @@ def test_valid(
id="y_c1_above_p_pos_1",
),
],
- # Input length tests can be found in ./test_bls12_variable_length_input_contracts.py
+ # Input length tests can be found in
+ # ./test_bls12_variable_length_input_contracts.py
)
@pytest.mark.parametrize(
"precompile_gas_modifier", [100_000], ids=[""]
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py
index db72f7d7563..4256de3afc2 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_G2MUL precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_G2MUL precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Test the BLS12_G2MUL precompile.
+
+Test the BLS12_G2MUL precompile introduced in
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -85,12 +87,12 @@
Spec.P2 + Scalar(2**256 - 1),
PointG2(
(
- 0x2663E1C3431E174CA80E5A84489569462E13B52DA27E7720AF5567941603475F1F9BC0102E13B92A0A21D96B94E9B22, # noqa: E501
- 0x6A80D056486365020A6B53E2680B2D72D8A93561FC2F72B960936BB16F509C1A39C4E4174A7C9219E3D7EF130317C05, # noqa: E501
+ 0x2663E1C3431E174CA80E5A84489569462E13B52DA27E7720AF5567941603475F1F9BC0102E13B92A0A21D96B94E9B22,
+ 0x6A80D056486365020A6B53E2680B2D72D8A93561FC2F72B960936BB16F509C1A39C4E4174A7C9219E3D7EF130317C05,
),
(
- 0xC49EAD39E9EB7E36E8BC25824299661D5B6D0E200BBC527ECCB946134726BF5DBD861E8E6EC946260B82ED26AFE15FB, # noqa: E501
- 0x5397DAD1357CF8333189821B737172B18099ECF7EE8BDB4B3F05EBCCDF40E1782A6C71436D5ACE0843D7F361CBC6DB2, # noqa: E501
+ 0xC49EAD39E9EB7E36E8BC25824299661D5B6D0E200BBC527ECCB946134726BF5DBD861E8E6EC946260B82ED26AFE15FB,
+ 0x5397DAD1357CF8333189821B737172B18099ECF7EE8BDB4B3F05EBCCDF40E1782A6C71436D5ACE0843D7F361CBC6DB2,
),
),
None,
@@ -312,7 +314,8 @@ def test_valid(
Spec.P2_NOT_IN_SUBGROUP + Scalar(Spec.Q + 1),
id="not_in_subgroup_times_q_plus_1",
),
- # More not in the r-order subgroup test cases, but using random generated points.
+ # More not in the r-order subgroup test cases, but using random
+ # generated points.
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_times_1",
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py
index d2da70d7e90..095df9e8884 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_MAP_FP2_TO_G2 precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_MAP_FP2_TO_G2 precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Test the BLS12_MAP_FP2_TO_G2 precompile.
+
+Test the BLS12_MAP_FP2_TO_G2 precompile introduced in
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -22,12 +24,12 @@
G2_POINT_ZERO_FP = PointG2(
(
- 0x18320896EC9EEF9D5E619848DC29CE266F413D02DD31D9B9D44EC0C79CD61F18B075DDBA6D7BD20B7FF27A4B324BFCE, # noqa: E501
- 0xA67D12118B5A35BB02D2E86B3EBFA7E23410DB93DE39FB06D7025FA95E96FFA428A7A27C3AE4DD4B40BD251AC658892, # noqa: E501
+ 0x18320896EC9EEF9D5E619848DC29CE266F413D02DD31D9B9D44EC0C79CD61F18B075DDBA6D7BD20B7FF27A4B324BFCE,
+ 0xA67D12118B5A35BB02D2E86B3EBFA7E23410DB93DE39FB06D7025FA95E96FFA428A7A27C3AE4DD4B40BD251AC658892,
),
(
- 0x260E03644D1A2C321256B3246BAD2B895CAD13890CBE6F85DF55106A0D334604FB143C7A042D878006271865BC35941, # noqa: E501
- 0x4C69777A43F0BDA07679D5805E63F18CF4E0E7C6112AC7F70266D199B4F76AE27C6269A3CEEBDAE30806E9A76AADF5C, # noqa: E501
+ 0x260E03644D1A2C321256B3246BAD2B895CAD13890CBE6F85DF55106A0D334604FB143C7A042D878006271865BC35941,
+ 0x4C69777A43F0BDA07679D5805E63F18CF4E0E7C6112AC7F70266D199B4F76AE27C6269A3CEEBDAE30806E9A76AADF5C,
),
)
@@ -47,12 +49,12 @@
FP2((Spec.P - 1, Spec.P - 1)),
PointG2(
(
- 0x9BF1B857D8C15F317F649ACCFA7023EF21CFC03059936B83B487DB476FF9D2FE64C6147140A5F0A436B875F51FFDF07, # noqa: E501
- 0xBB10E09BDF236CB2951BD7BCC044E1B9A6BB5FD4B2019DCC20FFDE851D52D4F0D1A32382AF9D7DA2C5BA27E0F1C69E6, # noqa: E501
+ 0x9BF1B857D8C15F317F649ACCFA7023EF21CFC03059936B83B487DB476FF9D2FE64C6147140A5F0A436B875F51FFDF07,
+ 0xBB10E09BDF236CB2951BD7BCC044E1B9A6BB5FD4B2019DCC20FFDE851D52D4F0D1A32382AF9D7DA2C5BA27E0F1C69E6,
),
(
- 0xDD416A927AB1C15490AB753C973FD377387B12EFCBE6BED2BF768B9DC95A0CA04D1A8F0F30DBC078A2350A1F823CFD3, # noqa: E501
- 0x171565CE4FCD047B35EA6BCEE4EF6FDBFEC8CC73B7ACDB3A1EC97A776E13ACDFEFFC21ED6648E3F0EEC53DDB6C20FB61, # noqa: E501
+ 0xDD416A927AB1C15490AB753C973FD377387B12EFCBE6BED2BF768B9DC95A0CA04D1A8F0F30DBC078A2350A1F823CFD3,
+ 0x171565CE4FCD047B35EA6BCEE4EF6FDBFEC8CC73B7ACDB3A1EC97A776E13ACDFEFFC21ED6648E3F0EEC53DDB6C20FB61,
),
),
None,
@@ -61,8 +63,8 @@
pytest.param(
FP2(
(
- 3510328712861478240121438855244276237335901234329585006107499559909114695366216070652508985150831181717984778988906, # noqa: E501
- 2924545590598115509050131525615277284817672420174395176262156166974132393611647670391999011900253695923948997972401, # noqa: E501
+ 3510328712861478240121438855244276237335901234329585006107499559909114695366216070652508985150831181717984778988906,
+ 2924545590598115509050131525615277284817672420174395176262156166974132393611647670391999011900253695923948997972401,
)
),
Spec.INF_G2,
@@ -101,16 +103,18 @@ def test_isogeny_kernel_values(
tx: Transaction,
):
"""
- Test the BLS12_MAP_FP2_TO_G2 precompile with isogeny kernel values. Note this test only exists
- to align with the G1 test. `G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty so there are no cases.
-
- The isogeny kernel is simply the set of special field values, that after the two step mapping
- (first SWU onto an auxiliary curve, then a 3-degree isogeny back to G2), collapse exactly
- to the identity point.
-
- For the G2 case the only kernel element is the point at infinity, and SWU never produces the
- identity point from a finite input t. Hence `G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty. Please
- proceed to the generator in `helpers.py` for more details.
+ Test the BLS12_MAP_FP2_TO_G2 precompile with isogeny kernel values. Note
+ this test only exists to align with the G1 test.
+ `G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty so there are no cases.
+
+ The isogeny kernel is simply the set of special field values, that after
+ the two step mapping (first SWU onto an auxiliary curve, then a 3-degree
+ isogeny back to G2), collapse exactly to the identity point.
+
+ For the G2 case the only kernel element is the point at infinity, and SWU
+ never produces the identity point from a finite input t. Hence
+ `G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty. Please proceed to the generator
+ in `helpers.py` for more details.
"""
state_test(
env=Environment(),
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py
index 6612530513d..d8c0d73a2cd 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_MAP_FP_TO_G1 precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_MAP_FP_TO_G1 precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests BLS12_MAP_FP_TO_G1 precompile.
+
+Tests the BLS12_MAP_FP_TO_G1 precompile implementation from
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -21,8 +23,8 @@
]
G1_POINT_ZERO_FP = PointG1(
- 0x11A9A0372B8F332D5C30DE9AD14E50372A73FA4C45D5F2FA5097F2D6FB93BCAC592F2E1711AC43DB0519870C7D0EA415, # noqa: E501
- 0x92C0F994164A0719F51C24BA3788DE240FF926B55F58C445116E8BC6A47CD63392FD4E8E22BDF9FEAA96EE773222133, # noqa: E501
+ 0x11A9A0372B8F332D5C30DE9AD14E50372A73FA4C45D5F2FA5097F2D6FB93BCAC592F2E1711AC43DB0519870C7D0EA415,
+ 0x92C0F994164A0719F51C24BA3788DE240FF926B55F58C445116E8BC6A47CD63392FD4E8E22BDF9FEAA96EE773222133,
)
@@ -40,15 +42,15 @@
pytest.param(
FP(Spec.P - 1),
PointG1(
- 0x1073311196F8EF19477219CCEE3A48035FF432295AA9419EED45D186027D88B90832E14C4F0E2AA4D15F54D1C3ED0F93, # noqa: E501
- 0x16B3A3B2E3DDDF6A11459DDAF657FDE21C4F10282A56029D9B55AB3CE1F41E1CF39AD27E0EA35823C7D3250E81FF3D66, # noqa: E501
+ 0x1073311196F8EF19477219CCEE3A48035FF432295AA9419EED45D186027D88B90832E14C4F0E2AA4D15F54D1C3ED0F93,
+ 0x16B3A3B2E3DDDF6A11459DDAF657FDE21C4F10282A56029D9B55AB3CE1F41E1CF39AD27E0EA35823C7D3250E81FF3D66,
),
None,
id="fp_p_minus_1",
),
pytest.param(
FP(
- 799950832265136997107648781861994410980648980263584507133499364313075404851459407870655748616451882783569609925573 # noqa: E501
+ 799950832265136997107648781861994410980648980263584507133499364313075404851459407870655748616451882783569609925573
),
Spec.INF_G1,
None,
@@ -88,12 +90,12 @@ def test_isogeny_kernel_values(
"""
Test the BLS12_MAP_FP_TO_G1 precompile with isogeny kernel inputs.
- The isogeny kernel is simply the set of special field values, that after the two step mapping
- (first SWU onto an auxiliary curve, then an 11-degree isogeny back to G1), collapse exactly
- to the identity point.
+ The isogeny kernel is simply the set of special field values, that after
+ the two step mapping (first SWU onto an auxiliary curve, then an 11-degree
+ isogeny back to G1), collapse exactly to the identity point.
- Please proceed to the generator in `helpers.py` to see how the isogeny kernel values are
- generated.
+ Please proceed to the generator in `helpers.py` to see how the isogeny
+ kernel values are generated.
"""
state_test(
env=Environment(),
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py
index 4d1d5555ebb..d8798201a40 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests BLS12_PAIRING precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12_PAIRING precompile of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests BLS12_PAIRING precompile.
+
+Tests the BLS12_PAIRING precompile implementation from
+[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
@@ -147,8 +149,8 @@ def test_valid_multi_inf(
post: dict,
):
"""
- Test maximum input given the current environment gas limit for the BLS12_PAIRING
- precompile.
+ Test maximum input given the current environment gas limit for the
+ BLS12_PAIRING precompile.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
@@ -347,8 +349,8 @@ def test_invalid_multi_inf(
post: dict,
):
"""
- Test maximum input given the current environment gas limit for the BLS12_PAIRING
- precompile and an invalid tail.
+ Test maximum input given the current environment gas limit for the
+ BLS12_PAIRING precompile and an invalid tail.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py
index 2e1cc8c0080..9c56ff39ac0 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py
@@ -1,8 +1,10 @@
"""
-abstract: Tests BLS12 precompiles of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests BLS12 precompiles of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- before the Prague hard fork is active.
-""" # noqa: E501
+Tests BLS12 precompiles before fork activation.
+
+Tests the BLS12 precompiles behavior before fork activation from
+[EIP-2537: Precompile for BLS12-381 curve operations]
+(https://eips.ethereum.org/EIPS/eip-2537).
+"""
import pytest
diff --git a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py
index 527adb20096..8cc70be6aff 100644
--- a/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py
+++ b/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py
@@ -1,7 +1,11 @@
"""
-abstract: Tests minimum gas and input length for BLS12_G1MSM, BLS12_G2MSM, BLS12_PAIRING precompiles of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537)
- Tests minimum gas and input length for BLS12_G1MSM, BLS12_G2MSM, BLS12_PAIRING precompiles of [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
-""" # noqa: E501
+Tests minimum gas and input length for BLS12 precompiles.
+
+Tests minimum gas and input length requirements for BLS12_G1MSM,
+BLS12_G2MSM, and BLS12_PAIRING precompiles from [EIP-2537: Precompile
+for BLS12-381 curve operations]
+(https://eips.ethereum.org/EIPS/eip-2537).
+"""
from typing import Callable, List, SupportsBytes
@@ -29,7 +33,10 @@
@pytest.fixture
def input_data() -> bytes:
- """Calldata of the transaction is empty because all input in these tests is zero."""
+ """
+ Calldata of the transaction is empty because all input in these tests is
+ zero.
+ """
return b""
@@ -41,7 +48,9 @@ def gas_modifier() -> int:
@pytest.fixture
def input_length_modifier() -> int:
- """Input length modifier to apply to each element of the precompile_gas_list."""
+ """
+ Input length modifier to apply to each element of the precompile_gas_list.
+ """
return 0
@@ -71,38 +80,36 @@ def call_contract_code(
call_contract_post_storage: Storage,
) -> Bytecode:
"""
- Code of the test contract to validate minimum expected gas in precompiles, as well as
- expected input lengths on all variable-length input precompiles.
+ Code of the test contract to validate minimum expected gas in precompiles,
+ as well as expected input lengths on all variable-length input precompiles.
- Code differs from the one used in all other tests in this file, because it accepts a list of
- precompile gas values and a list of precompile data lengths, and for each pair of values, it
- calls the precompile with the given gas and data length, data being passed to the precompile
- is all zeros.
+ Code differs from the one used in all other tests in this file, because it
+ accepts a list of precompile gas values and a list of precompile data
+ lengths, and for each pair of values, it calls the precompile with the
+ given gas and data length, data being passed to the precompile is all
+ zeros.
Args:
- precompile_address:
- Address of the precompile to call.
- precompile_gas_list:
- List of gas values to be used to call the precompile, one for each call.
- precompile_data_length_list:
- List of data lengths to be used to call the precompile, one for each call.
- gas_modifier:
- Integer to add to the gas passed to the precompile.
- input_length_modifier:
- Integer to add to the length of the input passed to the precompile.
- expected_output:
- Expected output of the contract, it is only used to determine if the call is expected
- to succeed or fail.
- call_opcode:
- Type of call used to call the precompile (Op.CALL, Op.CALLCODE, Op.DELEGATECALL,
- Op.STATICCALL).
- call_contract_post_storage:
- Storage of the test contract after the transaction is executed.
+ precompile_address: Address of the precompile to call.
+ precompile_gas_list: List of gas values to be used to call the
+ precompile, one for each call.
+ precompile_data_length_list: List of data lengths to be used to call
+ the precompile, one for each call.
+ gas_modifier: Integer to add to the gas passed to the precompile.
+ input_length_modifier: Integer to add to the length of the input
+ passed to the precompile.
+ expected_output: Expected output of the contract, it is only used to
+ determine if the call is expected to succeed or fail.
+ call_opcode: Type of call used to call the precompile (Op.CALL,
+ Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL).
+ call_contract_post_storage: Storage of the test contract after the
+ transaction is executed.
"""
expected_output = bytes(expected_output)
- # Depending on the expected output, we can deduce if the call is expected to succeed or fail.
+ # Depending on the expected output, we can deduce if the call is expected
+ # to succeed or fail.
call_succeeds = len(expected_output) > 0
assert len(precompile_gas_list) == len(precompile_data_length_list)
@@ -114,8 +121,9 @@ def call_contract_code(
for precompile_gas, precompile_args_length in zip(
precompile_gas_list, precompile_data_length_list, strict=False
):
- # For each given precompile gas value, and given arguments length, call the precompile
- # with the given gas and call data (all zeros) and compare the result.
+ # For each given precompile gas value, and given arguments length, call
+ # the precompile with the given gas and call data (all zeros) and
+ # compare the result.
code += Op.SSTORE(
call_contract_post_storage.store_next(1 if call_succeeds else 0),
Op.CALL(
@@ -135,7 +143,10 @@ def call_contract_code(
def tx_gas_limit_calculator(
fork: Fork, precompile_gas_list: List[int], max_precompile_input_length: int
) -> int:
- """Calculate the gas used to execute the transaction with the given precompile gas list."""
+ """
+ Calculate the gas used to execute the transaction with the given precompile
+ gas list.
+ """
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 22_500 * len(precompile_gas_list)
@@ -154,7 +165,9 @@ def tx_gas_limit(
precompile_gas_list: List[int],
precompile_data_length_list: List[int],
) -> int:
- """Transaction gas limit used for the test (Can be overridden in the test)."""
+ """
+ Transaction gas limit used for the test (Can be overridden in the test).
+ """
assert len(input_data) == 0, "Expected empty data in the transaction."
return tx_gas_limit_calculator(fork, precompile_gas_list, max(precompile_data_length_list))
@@ -163,12 +176,13 @@ def get_split_discount_table_by_fork(
gas_fn: Callable, discount_table_length: int, element_length: int
) -> Callable[[Fork], List[ParameterSet]]:
"""
- Get the number of test cases needed to cover the given discount table adjusted for the
- fork transaction gas limit cap.
+ Get the number of test cases needed to cover the given discount table
+ adjusted for the fork transaction gas limit cap.
- The function will return the full discount table as a single test case if the
- fork has no transaction gas limit cap, otherwise it will iterate to determine the
- splits required to fit the full discount table across multiple test cases.
+ The function will return the full discount table as a single test case if
+ the fork has no transaction gas limit cap, otherwise it will iterate to
+ determine the splits required to fit the full discount table across
+ multiple test cases.
"""
def parametrize_by_fork(fork: Fork) -> List[ParameterSet]:
@@ -249,8 +263,9 @@ def test_valid_gas_g1msm(
tx: Transaction,
):
"""
- Test the BLS12_G1MSM discount gas table in full, by expecting the call to succeed for
- all possible input lengths because the appropriate amount of gas is provided.
+ Test the BLS12_G1MSM discount gas table in full, by expecting the call to
+ succeed for all possible input lengths because the appropriate amount of
+ gas is provided.
If any of the calls fail, the test will fail.
"""
@@ -307,8 +322,9 @@ def test_invalid_gas_g1msm(
tx: Transaction,
):
"""
- Test the BLS12_G1MSM discount gas table in full, by expecting the call to fail for
- all possible input lengths because the appropriate amount of gas is not provided.
+ Test the BLS12_G1MSM discount gas table in full, by expecting the call to
+ fail for all possible input lengths because the appropriate amount of gas
+ is not provided.
If any of the calls succeeds, the test will fail.
"""
@@ -371,8 +387,9 @@ def test_invalid_length_g1msm(
tx: Transaction,
):
"""
- Test the BLS12_G1MSM discount gas table in full, by expecting the call to fail for
- all possible input lengths provided because they are too long or short, or zero length.
+ Test the BLS12_G1MSM discount gas table in full, by expecting the call to
+ fail for all possible input lengths provided because they are too long or
+ short, or zero length.
If any of the calls succeeds, the test will fail.
"""
@@ -402,8 +419,9 @@ def test_valid_gas_g2msm(
tx: Transaction,
):
"""
- Test the BLS12_G2MSM discount gas table in full, by expecting the call to succeed for
- all possible input lengths because the appropriate amount of gas is provided.
+ Test the BLS12_G2MSM discount gas table in full, by expecting the call to
+ succeed for all possible input lengths because the appropriate amount of
+ gas is provided.
If any of the calls fail, the test will fail.
"""
@@ -460,8 +478,9 @@ def test_invalid_gas_g2msm(
tx: Transaction,
):
"""
- Test the BLS12_G2MSM discount gas table in full, by expecting the call to fail for
- all possible input lengths because the appropriate amount of gas is not provided.
+ Test the BLS12_G2MSM discount gas table in full, by expecting the call to
+ fail for all possible input lengths because the appropriate amount of gas
+ is not provided.
If any of the calls succeeds, the test will fail.
"""
@@ -524,8 +543,9 @@ def test_invalid_length_g2msm(
tx: Transaction,
):
"""
- Test the BLS12_G2MSM discount gas table in full, by expecting the call to fail for
- all possible input lengths provided because they are too long or short, or zero length.
+ Test the BLS12_G2MSM discount gas table in full, by expecting the call to
+ fail for all possible input lengths provided because they are too long or
+ short, or zero length.
If any of the calls succeeds, the test will fail.
"""
@@ -552,8 +572,8 @@ def test_valid_gas_pairing(
tx: Transaction,
):
"""
- Test the BLS12_PAIRING precompile, by expecting the call to succeed for all possible input
- lengths (up to k == PAIRINGS_TO_TEST).
+ Test the BLS12_PAIRING precompile, by expecting the call to succeed for all
+ possible input lengths (up to k == PAIRINGS_TO_TEST).
If any of the calls fails, the test will fail.
"""
@@ -608,8 +628,9 @@ def test_invalid_gas_pairing(
tx: Transaction,
):
"""
- Test the BLS12_PAIRING precompile, by expecting the call to fail for all possible input
- lengths (up to k == PAIRINGS_TO_TEST) because the appropriate amount of gas is not provided.
+ Test the BLS12_PAIRING precompile, by expecting the call to fail for all
+ possible input lengths (up to k == PAIRINGS_TO_TEST) because the
+ appropriate amount of gas is not provided.
If any of the calls succeeds, the test will fail.
"""
@@ -640,7 +661,9 @@ def test_invalid_zero_length_pairing(
post: dict,
tx: Transaction,
):
- """Test the BLS12_PAIRING precompile by passing an input with zero length."""
+ """
+ Test the BLS12_PAIRING precompile by passing an input with zero length.
+ """
state_test(
env=env,
pre=pre,
@@ -670,8 +693,9 @@ def test_invalid_length_pairing(
tx: Transaction,
):
"""
- Test the BLS12_PAIRING precompile, by expecting the call to fail for all possible input
- lengths (up to k == PAIRINGS_TO_TEST) because the incorrect input length was used.
+ Test the BLS12_PAIRING precompile, by expecting the call to fail for all
+ possible input lengths (up to k == PAIRINGS_TO_TEST) because the incorrect
+ input length was used.
If any of the calls succeeds, the test will fail.
"""
diff --git a/tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py b/tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py
index 5d9f77b801a..28c8c0e3419 100644
--- a/tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py
+++ b/tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935)
- Test [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
-""" # noqa: E501
+Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
+"""
from typing import Dict, List
@@ -33,14 +32,17 @@ def generate_block_check_code(
check_contract_first: bool = False,
) -> Bytecode:
"""
- Generate EVM code to check that the block hashes are correctly stored in the state.
+ Generate EVM code to check that the block hashes are correctly stored in
+ the state.
Args:
- check_block_number (int): The block number to check.
- current_block_number (int): The current block number where the check is taking place.
- fork_block_number (int): The block number of the fork transition.
- storage (Storage): The storage object to use.
- check_contract_first (bool): Whether to check the contract first, for slot warming checks.
+ check_block_number (int): The block number to check.
+ current_block_number (int): The current block number where the check is
+ taking place.
+ fork_block_number (int): The block number of the fork transition.
+ storage (Storage): The storage object to use.
+ check_contract_first (bool): Whether to check the contract first,
+ for slot warming checks.
"""
contract_ret_offset = 32
@@ -108,13 +110,13 @@ def test_block_hashes_history_at_transition(
blocks_after_fork: int,
):
"""
- Tests that block hashes are stored correctly at the system contract address after the fork
- transition. Block hashes are stored incrementally at the transition until the
- `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards the oldest block hash is replaced by the
- new one.
+ Tests that block hashes are stored correctly at the system contract address
+ after the fork transition. Block hashes are stored incrementally at the
+ transition until the `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards
+ the oldest block hash is replaced by the new one.
- Note: The block hashes before the fork are no longer stored in the contract at the moment of
- the transition.
+ Note: The block hashes before the fork are no longer stored in the contract
+ at the moment of the transition.
"""
blocks: List[Block] = []
assert blocks_before_fork >= 1 and blocks_before_fork < Spec.FORK_TIMESTAMP
@@ -127,9 +129,9 @@ def test_block_hashes_history_at_transition(
for i in range(blocks_before_fork):
txs: List[Transaction] = []
if i == blocks_before_fork - 1:
- # On the last block before the fork, `BLOCKHASH` must return values for the last 256
- # blocks but not for the blocks before that.
- # And `HISTORY_STORAGE_ADDRESS` should be empty.
+ # On the last block before the fork, `BLOCKHASH` must return values
+ # for the last 256 blocks but not for the blocks before that. And
+ # `HISTORY_STORAGE_ADDRESS` should be empty.
code = Bytecode()
storage = Storage()
@@ -165,17 +167,18 @@ def test_block_hashes_history_at_transition(
blocks.append(Block(timestamp=current_block_number, txs=txs))
current_block_number += 1
- # Add blocks after the fork transition to gradually fill up the `HISTORY_SERVE_WINDOW`
+ # Add blocks after the fork transition to gradually fill up the
+ # `HISTORY_SERVE_WINDOW`
for i in range(blocks_after_fork):
txs = []
- # On these blocks, `BLOCKHASH` will still return values for the last 256 blocks, and
- # `HISTORY_STORAGE_ADDRESS` should now serve values for the previous blocks in the new
- # fork.
+ # On these blocks, `BLOCKHASH` will still return values for the last
+ # 256 blocks, and `HISTORY_STORAGE_ADDRESS` should now serve values for
+ # the previous blocks in the new fork.
code = Bytecode()
storage = Storage()
- # Check that each block can return previous blockhashes if `BLOCKHASH_OLD_WINDOW` and or
- # `HISTORY_SERVE_WINDOW`.
+ # Check that each block can return previous blockhashes if
+ # `BLOCKHASH_OLD_WINDOW` and or `HISTORY_SERVE_WINDOW`.
for j in range(current_block_number):
code += generate_block_check_code(
check_block_number=j,
@@ -227,10 +230,10 @@ def test_block_hashes_history(
check_contract_first: bool,
):
"""
- Tests that block hashes are stored correctly at the system contract address after the fork
- transition. Block hashes are stored incrementally at the transition until the
- `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards the oldest block hash is replaced by the
- new one.
+ Tests that block hashes are stored correctly at the system contract address
+ after the fork transition. Block hashes are stored incrementally at the
+ transition until the `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards
+ the oldest block hash is replaced by the new one.
"""
blocks: List[Block] = []
@@ -245,9 +248,9 @@ def test_block_hashes_history(
current_block_number += 1
txs = []
- # On these blocks, `BLOCKHASH` will still return values for the last 256 blocks, and
- # `HISTORY_STORAGE_ADDRESS` should now serve values for the previous blocks in the new
- # fork.
+ # On these blocks, `BLOCKHASH` will still return values for the last 256
+ # blocks, and `HISTORY_STORAGE_ADDRESS` should now serve values for the
+ # previous blocks in the new fork.
code = Bytecode()
storage = Storage()
@@ -321,7 +324,10 @@ def test_block_hashes_history(
def test_block_hashes_call_opcodes(
blockchain_test: BlockchainTestFiller, pre: Alloc, call_opcode: Op
):
- """Test that the call opcodes can be used to call the history contract and get the block hashes.""" # noqa: E501
+ """
+ Test that the call opcodes can be used to call the history contract and get
+ the block hashes.
+ """
blocks = []
blocks.append(Block())
@@ -382,11 +388,11 @@ def test_invalid_history_contract_calls(
reverts: bool,
):
"""
- Test calling the history contract with invalid block numbers, such as blocks from the future
- or overflowing block numbers.
+ Test calling the history contract with invalid block numbers, such as
+ blocks from the future or overflowing block numbers.
- Also test the BLOCKHASH opcode with the same block numbers, which should not affect the
- behavior of the opcode, even after verkle.
+ Also test the BLOCKHASH opcode with the same block numbers, which should
+ not affect the behavior of the opcode, even after verkle.
"""
storage = Storage()
diff --git a/tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py b/tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py
index 2175b38338a..6593bd45275 100644
--- a/tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py
+++ b/tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
- Test system contract deployment for [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
-""" # noqa: E501
+Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
+"""
from os.path import realpath
from pathlib import Path
@@ -44,7 +43,8 @@ def test_system_contract_deployment(
**kwargs,
):
"""Verify deployment of the block hashes system contract."""
- # Deploy a contract that calls the history contract and verifies the block hashes.
+ # Deploy a contract that calls the history contract and verifies the block
+ # hashes.
yield Block() # Empty block just to have more history in the contract.
# We are going to query blocks even before contract deployment.
@@ -77,27 +77,31 @@ def test_system_contract_deployment(
storage: Dict
if test_type == DeploymentTestType.DEPLOY_BEFORE_FORK:
- # Fork happens at block 2, and the contract is already there, so from block number 1 and
- # after, the block hashes should be there.
+ # Fork happens at block 2, and the contract is already there, so from
+ # block number 1 and after, the block hashes should be there.
storage = {
1: 1, # Block prior to the fork, it's the first hash saved.
2: 1, # Fork block, hash should be there.
- 3: 1, # Empty block added at the start of this function, hash should be there.
+ 3: 1, # Empty block added at the start of this function, hash
+ # should be there.
}
elif test_type == DeploymentTestType.DEPLOY_ON_FORK_BLOCK:
# The contract should have the block hashes after contract deployment.
storage = {
1: 1, # Fork and deployment block, the first hash that gets added.
2: 1, # Deployment block, hash should be there.
- 3: 1, # Empty block added at the start of this function, hash should be there.
+ 3: 1, # Empty block added at the start of this function, hash
+ # should be there.
}
elif test_type == DeploymentTestType.DEPLOY_AFTER_FORK:
# The contract should have the block hashes after contract deployment.
storage = {
1: 0, # Fork block, but contract is not there yet.
- 2: 1, # Deployment block, this is the first hash that gets added because it's added on
+ 2: 1, # Deployment block, this is the first hash that gets added
+ # because it's added on
# the next block.
- 3: 1, # Empty block added at the start of this function, hash should be there.
+ 3: 1, # Empty block added at the start of this function, hash
+ # should be there.
}
post[deployed_contract] = Account(
diff --git a/tests/prague/eip6110_deposits/conftest.py b/tests/prague/eip6110_deposits/conftest.py
index 4c3e88e2623..11d9a4bcab4 100644
--- a/tests/prague/eip6110_deposits/conftest.py
+++ b/tests/prague/eip6110_deposits/conftest.py
@@ -13,8 +13,8 @@
@pytest.fixture
def update_pre(pre: Alloc, requests: List[DepositInteractionBase]):
"""
- Init state of the accounts. Every deposit transaction defines their own pre-state
- requirements, and this fixture aggregates them all.
+ Init state of the accounts. Every deposit transaction defines their own
+ pre-state requirements, and this fixture aggregates them all.
"""
for d in requests:
d.update_pre(pre)
@@ -34,7 +34,10 @@ def txs(
@pytest.fixture
def block_body_override_requests() -> List[DepositRequest] | None:
- """List of requests that overwrite the requests in the header. None by default."""
+ """
+ List of requests that overwrite the requests in the header. None by
+ default.
+ """
return None
@@ -48,7 +51,9 @@ def exception() -> BlockException | None:
def included_requests(
requests: List[DepositInteractionBase],
) -> List[DepositRequest]:
- """Return the list of deposit requests that should be included in each block."""
+ """
+ Return the list of deposit requests that should be included in each block.
+ """
valid_requests: List[DepositRequest] = []
for d in requests:
diff --git a/tests/prague/eip6110_deposits/helpers.py b/tests/prague/eip6110_deposits/helpers.py
index b664f8413ee..8c06c1f307c 100644
--- a/tests/prague/eip6110_deposits/helpers.py
+++ b/tests/prague/eip6110_deposits/helpers.py
@@ -77,22 +77,16 @@ class DepositRequest(DepositRequestBase):
"""Deposit request descriptor."""
valid: bool = True
- """
- Whether the deposit request is valid or not.
- """
+ """Whether the deposit request is valid or not."""
gas_limit: int = 1_000_000
- """
- Gas limit for the call.
- """
+ """Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
- """
- Calldata modifier function.
- """
+ """Calldata modifier function."""
extra_wei: int = 0
"""
- Extra amount in wei to be sent with the deposit.
- If this value modulo 10**9 is not zero, the deposit will be invalid.
- The value can be negative but if the total value is negative, an exception will be raised.
+ Extra amount in wei to be sent with the deposit. If this value modulo 10**9
+ is not zero, the deposit will be invalid. The value can be negative but if
+ the total value is negative, an exception will be raised.
"""
interaction_contract_address: ClassVar[Address] = Address(Spec.DEPOSIT_CONTRACT_ADDRESS)
@@ -100,8 +94,8 @@ class DepositRequest(DepositRequestBase):
@cached_property
def value(self) -> int:
"""
- Return the value of the deposit transaction, equal to the amount in gwei plus the
- extra amount in wei.
+ Return the value of the deposit transaction, equal to the amount in
+ gwei plus the extra amount in wei.
"""
value = (self.amount * 10**9) + self.extra_wei
if value < 0:
@@ -123,13 +117,14 @@ def deposit_data_root(self) -> Hash:
@cached_property
def calldata(self) -> bytes:
"""
- Return the calldata needed to call the beacon chain deposit contract and make the deposit.
+ Return the calldata needed to call the beacon chain deposit contract
+ and make the deposit.
deposit(
- bytes calldata pubkey,
- bytes calldata withdrawal_credentials,
- bytes calldata signature,
- bytes32 deposit_data_root
+ bytes calldata pubkey,
+ bytes calldata withdrawal_credentials,
+ bytes calldata signature,
+ bytes32 deposit_data_root
)
"""
offset_length = 32
@@ -155,11 +150,11 @@ def log(self, *, include_abi_encoding: bool = True) -> bytes:
Return the log data for the deposit event.
event DepositEvent(
- bytes pubkey,
- bytes withdrawal_credentials,
- bytes amount,
- bytes signature,
- bytes index
+ bytes pubkey,
+ bytes withdrawal_credentials,
+ bytes amount,
+ bytes signature,
+ bytes index
);
"""
data = bytearray(576)
@@ -199,17 +194,11 @@ class DepositInteractionBase:
"""Base class for all types of deposit transactions we want to test."""
sender_balance: int = 32_000_000_000_000_000_000 * 100
- """
- Balance of the account that sends the transaction.
- """
+ """Balance of the account that sends the transaction."""
sender_account: EOA | None = None
- """
- Account that sends the transaction.
- """
+ """Account that sends the transaction."""
requests: List[DepositRequest]
- """
- Deposit request to be included in the block.
- """
+ """Deposit request to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the deposit request."""
@@ -220,13 +209,19 @@ def update_pre(self, pre: Alloc):
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
- """Return the list of deposit requests that should be included in the block."""
+ """
+ Return the list of deposit requests that should be included in the
+ block.
+ """
raise NotImplementedError
@dataclass(kw_only=True)
class DepositTransaction(DepositInteractionBase):
- """Class used to describe a deposit originated from an externally owned account."""
+ """
+ Class used to describe a deposit originated from an externally owned
+ account.
+ """
def transactions(self) -> List[Transaction]:
"""Return a transaction for the deposit request."""
@@ -248,7 +243,10 @@ def update_pre(self, pre: Alloc):
self.sender_account = pre.fund_eoa(self.sender_balance)
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
- """Return the list of deposit requests that should be included in the block."""
+ """
+ Return the list of deposit requests that should be included in the
+ block.
+ """
return [
request
for request in self.requests
@@ -261,34 +259,23 @@ class DepositContract(DepositInteractionBase):
"""Class used to describe a deposit originated from a contract."""
tx_gas_limit: int = 1_000_000
- """
- Gas limit for the transaction.
- """
+ """Gas limit for the transaction."""
tx_value: int = 0
- """
- Value to send with the transaction.
- """
+ """Value to send with the transaction."""
contract_balance: int = 32_000_000_000_000_000_000 * 100
- """
- Balance of the contract that sends the deposit requests.
- """
+ """Balance of the contract that sends the deposit requests."""
contract_address: Address | None = None
- """
- Address of the contract that sends the deposit requests.
- """
+ """Address of the contract that sends the deposit requests."""
entry_address: Address | None = None
- """
- Address to send the transaction to.
- """
+ """Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
- """
- Type of call to be made to the deposit contract.
- """
+ """Type of call to be made to the deposit contract."""
call_depth: int = 2
"""
- Frame depth of the beacon chain deposit contract when it executes the deposit requests.
+ Frame depth of the beacon chain deposit contract when it executes the
+ deposit requests.
"""
extra_code: Bytecode = field(default_factory=Bytecode)
"""
@@ -357,5 +344,8 @@ def update_pre(self, pre: Alloc):
)
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
- """Return the list of deposit requests that should be included in the block."""
+ """
+ Return the list of deposit requests that should be included in the
+ block.
+ """
return [d for d in self.requests if d.valid and d.value >= current_minimum_fee]
diff --git a/tests/prague/eip6110_deposits/test_deposits.py b/tests/prague/eip6110_deposits/test_deposits.py
index 6e3d62fcc12..edf36f75107 100644
--- a/tests/prague/eip6110_deposits/test_deposits.py
+++ b/tests/prague/eip6110_deposits/test_deposits.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests [EIP-6110: Supply validator deposits on chain](https://eips.ethereum.org/EIPS/eip-6110)
- Test [EIP-6110: Supply validator deposits on chain](https://eips.ethereum.org/EIPS/eip-6110).
-""" # noqa: E501
+Tests validator deposit functionality.
+
+Tests the validator deposit functionality implementation from
+[EIP-6110: Supply validator deposits on chain](https://eips.ethereum.org/EIPS/eip-6110).
+"""
from typing import List
@@ -185,7 +187,8 @@
amount=32_000_000_000,
signature=0x03,
index=0x0,
- # From traces, gas used by the first tx is 82,718 so reduce by one here
+ # From traces, gas used by the first tx is 82,718
+ # so reduce by one here
gas_limit=0x1431D,
valid=False,
),
@@ -218,7 +221,8 @@
amount=32_000_000_000,
signature=0x03,
index=0x0,
- # From traces, gas used by the second tx is 68,594, reduce by one here
+ # From traces, gas used by the second tx is 68,594,
+ # reduce by one here
gas_limit=0x10BF1,
valid=False,
),
@@ -838,8 +842,8 @@
valid=False,
)
],
- # Send 32 ETH minus 1 wei to the contract, note `DepositRequest.amount` is in
- # gwei
+ # Send 32 ETH minus 1 wei to the contract, note
+ # `DepositRequest.amount` is in gwei
tx_value=32_000_000_000 * 10**9 - 1,
contract_balance=0,
),
@@ -1182,8 +1186,8 @@ def test_deposit_negative(
blocks: List[Block],
):
"""
- Test producing a block with the incorrect deposits in the body of the block,
- and/or Engine API payload.
+ Test producing a block with the incorrect deposits in the body of the
+ block, and/or Engine API payload.
"""
blockchain_test(
pre=pre,
diff --git a/tests/prague/eip6110_deposits/test_modified_contract.py b/tests/prague/eip6110_deposits/test_modified_contract.py
index bbb1f4fba22..80f58a868d0 100644
--- a/tests/prague/eip6110_deposits/test_modified_contract.py
+++ b/tests/prague/eip6110_deposits/test_modified_contract.py
@@ -1,4 +1,7 @@
-"""Test variants of the deposit contract which adheres the log-style as described in EIP-6110."""
+"""
+Test variants of the deposit contract which adheres the log-style as described
+in EIP-6110.
+"""
import pytest
@@ -44,8 +47,9 @@
DEFAULT_DEPOSIT_REQUEST_LOG_DATA_DICT = {
"pubkey_data": bytes(DEFAULT_DEPOSIT_REQUEST.pubkey),
"withdrawal_credentials_data": bytes(DEFAULT_DEPOSIT_REQUEST.withdrawal_credentials),
- # Note: after converting to bytes, it is converted to little-endian by `[::-1]`
- # (This happens on-chain also, but this is done by the solidity contract)
+ # Note: after converting to bytes, it is converted to little-endian by
+ # `[::-1]` (This happens on-chain also, but this is done by the solidity
+ # contract)
"amount_data": bytes.fromhex("0" + DEFAULT_DEPOSIT_REQUEST.amount.hex()[2:])[::-1],
"signature_data": bytes(DEFAULT_DEPOSIT_REQUEST.signature),
"index_data": bytes(DEFAULT_DEPOSIT_REQUEST.index),
@@ -77,36 +81,43 @@ def test_extra_logs(
pre: Alloc,
include_deposit_event: bool,
):
- """Test deposit contract emitting more log event types than the ones in mainnet."""
- # Supplant mainnet contract with a variant that emits a `Transfer`` log
- # If `include_deposit_event` is `True``, it will also emit a `DepositEvent` log`
+ """
+ Test deposit contract emitting more log event types than the ones in
+ mainnet.
+ """
+ # Supplant mainnet contract with a variant that emits a `Transfer`` log If
+ # `include_deposit_event` is `True``, it will also emit a `DepositEvent`
+ # log`
# ERC20 token transfer log (Sepolia)
- # https://sepolia.etherscan.io/tx/0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f
- # JSON RPC:
- # curl https://sepolia.infura.io/v3/APIKEY \
- # -X POST \
- # -H "Content-Type: application/json" \
- # -d '{"jsonrpc": "2.0", "method": "eth_getLogs",
- # "params": [{"address": "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D",
- # "blockHash": "0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53"}],
+ # https://sepolia.etherscan.io/tx/
+ # 0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f
+ # JSON RPC: curl https://sepolia.infura.io/v3/APIKEY \ -X POST \ -H
+ # "Content-Type: application/json" \ -d '{"jsonrpc": "2.0", "method":
+ # "eth_getLogs", "params": [{"address":
+ # "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D", "blockHash":
+ # "0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53"}],
# "id": 1}'
# {"jsonrpc":"2.0","id":1,"result":
# [{"removed":false,"logIndex":"0x80","transactionIndex":"0x56",
- # "transactionHash":"0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f",
- # "blockHash":"0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53",
+ # "transactionHash":
+ # "0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f",
+ # "blockHash":
+ # "0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53",
# "blockNumber":"0x794fb5",
# "address":"0x7f02c3e3c98b133055b8b348b2ac625669ed295d",
- # "data":"0x0000000000000000000000000000000000000000000000000000000000000001",
- # "topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
+ # "data":
+ # "0x0000000000000000000000000000000000000000000000000000000000000001",
+ # "topics":
+ # ["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
# "0x0000000000000000000000006885e36bfcb68cb383dfe90023a462c03bcb2ae5",
# "0x00000000000000000000000080b5dc88c98e528bf9cb4b7f0f076ac41da24651"]
bytecode = Op.LOG3(
- # ERC-20 token transfer log
- # ERC-20 token transfers are LOG3, since the topic, the sender, and receiver
- # are all topics (the sender and receiver are `indexed` in the solidity event)
+ # ERC-20 token transfer log ERC-20 token transfers are LOG3, since the
+ # topic, the sender, and receiver are all topics (the sender and
+ # receiver are `indexed` in the solidity event)
0,
32,
0xDDF252AD1BE2C89B69C2B068FC378DAA952BA7F163C4A11628F55A4DF523B3EF,
@@ -171,7 +182,9 @@ def test_extra_logs(
def test_invalid_layout(
blockchain_test: BlockchainTestFiller, pre: Alloc, log_argument: str, value: str
):
- """Test deposit contract emitting logs with invalid layouts (sizes/offsets)."""
+ """
+ Test deposit contract emitting logs with invalid layouts (sizes/offsets).
+ """
log_params = {**DEFAULT_DEPOSIT_REQUEST_LOG_DATA_DICT}
log_params[log_argument] = 0 if value == "zero" else 2**256 - 1 # type: ignore
@@ -225,7 +238,10 @@ def test_invalid_layout(
)
@pytest.mark.exception_test
def test_invalid_log_length(blockchain_test: BlockchainTestFiller, pre: Alloc, slice_bytes: bool):
- """Test deposit contract emitting logs with invalid log length (one byte more or less)."""
+ """
+ Test deposit contract emitting logs with invalid log length (one byte more
+ or less).
+ """
changed_log = DEFAULT_REQUEST_LOG[:-1] if slice_bytes else DEFAULT_REQUEST_LOG + b"\x00"
bytecode = Om.MSTORE(changed_log) + Op.LOG1(
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/conftest.py b/tests/prague/eip7002_el_triggerable_withdrawals/conftest.py
index c134463011a..ac66187db06 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/conftest.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/conftest.py
@@ -18,8 +18,8 @@ def update_pre(
blocks_withdrawal_requests: List[List[WithdrawalRequestInteractionBase]],
):
"""
- Init state of the accounts. Every deposit transaction defines their own pre-state
- requirements, and this fixture aggregates them all.
+ Init state of the accounts. Every deposit transaction defines their own
+ pre-state requirements, and this fixture aggregates them all.
"""
for requests in blocks_withdrawal_requests:
for r in requests:
@@ -31,7 +31,10 @@ def included_requests(
update_pre: None, # Fixture is used for its side effects
blocks_withdrawal_requests: List[List[WithdrawalRequestInteractionBase]],
) -> List[List[WithdrawalRequest]]:
- """Return the list of withdrawal requests that should be included in each block."""
+ """
+ Return the list of withdrawal requests that should be included in each
+ block.
+ """
excess_withdrawal_requests = 0
carry_over_requests: List[WithdrawalRequest] = []
per_block_included_requests: List[List[WithdrawalRequest]] = []
@@ -110,8 +113,10 @@ def blocks(
timestamp += 1
return blocks + [
+ # Add an empty block at the end to verify that no more withdrawal
+ # requests are included
Block(
header_verify=Header(requests_hash=Requests()),
timestamp=timestamp,
)
- ] # Add an empty block at the end to verify that no more withdrawal requests are included
+ ]
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/helpers.py b/tests/prague/eip7002_el_triggerable_withdrawals/helpers.py
index 2a0f7216ad9..d8fa578a6c0 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/helpers.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/helpers.py
@@ -17,22 +17,17 @@ class WithdrawalRequest(WithdrawalRequestBase):
fee: int = 0
"""
- Fee to be paid to the system contract for the withdrawal request.
- This is different from `amount` which is the amount of gwei to be withdrawn on the beacon
- chain.
+ Fee to be paid to the system contract for the withdrawal request. This is
+ different from `amount` which is the amount of gwei to be withdrawn on the
+ beacon chain.
+
"""
valid: bool = True
- """
- Whether the withdrawal request is valid or not.
- """
+ """Whether the withdrawal request is valid or not."""
gas_limit: int = 1_000_000
- """
- Gas limit for the call.
- """
+ """Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
- """
- Calldata modifier function.
- """
+ """Calldata modifier function."""
interaction_contract_address: ClassVar[Address] = Address(
Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS
@@ -41,23 +36,26 @@ class WithdrawalRequest(WithdrawalRequestBase):
@property
def value(self) -> int:
"""
- Return the value of the call to the withdrawal request contract, equal to the fee
- to be paid.
+ Return the value of the call to the withdrawal request contract, equal
+ to the fee to be paid.
"""
return self.fee
@cached_property
def calldata(self) -> bytes:
"""
- Return the calldata needed to call the withdrawal request contract and make the
- withdrawal.
+ Return the calldata needed to call the withdrawal request contract and
+ make the withdrawal.
"""
return self.calldata_modifier(
self.validator_pubkey + self.amount.to_bytes(8, byteorder="big")
)
def with_source_address(self, source_address: Address) -> "WithdrawalRequest":
- """Return a new instance of the withdrawal request with the source address set."""
+ """
+ Return a new instance of the withdrawal request with the source address
+ set.
+ """
return self.copy(source_address=source_address)
@@ -66,17 +64,11 @@ class WithdrawalRequestInteractionBase:
"""Base class for all types of withdrawal transactions we want to test."""
sender_balance: int = 1_000_000_000_000_000_000
- """
- Balance of the account that sends the transaction.
- """
+ """Balance of the account that sends the transaction."""
sender_account: EOA | None = None
- """
- Account that will send the transaction.
- """
+ """Account that will send the transaction."""
requests: List[WithdrawalRequest]
- """
- Withdrawal request to be included in the block.
- """
+ """Withdrawal request to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the withdrawal request."""
@@ -87,13 +79,19 @@ def update_pre(self, pre: Alloc):
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[WithdrawalRequest]:
- """Return the list of withdrawal requests that should be valid in the block."""
+ """
+ Return the list of withdrawal requests that should be valid in the
+ block.
+ """
raise NotImplementedError
@dataclass(kw_only=True)
class WithdrawalRequestTransaction(WithdrawalRequestInteractionBase):
- """Class used to describe a withdrawal request originated from an externally owned account."""
+ """
+ Class used to describe a withdrawal request originated from an externally
+ owned account.
+ """
def transactions(self) -> List[Transaction]:
"""Return a transaction for the withdrawal request."""
@@ -129,9 +127,7 @@ class WithdrawalRequestContract(WithdrawalRequestInteractionBase):
"""Class used to describe a withdrawal originated from a contract."""
tx_gas_limit: int = 1_000_000
- """
- Gas limit for the transaction.
- """
+ """Gas limit for the transaction."""
contract_balance: int = 1_000_000_000_000_000_000
"""
@@ -142,22 +138,14 @@ class WithdrawalRequestContract(WithdrawalRequestInteractionBase):
Address of the contract that will make the call to the pre-deploy contract.
"""
entry_address: Address | None = None
- """
- Address to send the transaction to.
- """
+ """Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
- """
- Type of call to be used to make the withdrawal request.
- """
+ """Type of call to be used to make the withdrawal request."""
call_depth: int = 2
- """
- Frame depth of the pre-deploy contract when it executes the call.
- """
+ """Frame depth of the pre-deploy contract when it executes the call."""
extra_code: Bytecode = field(default_factory=Bytecode)
- """
- Extra code to be added to the contract code.
- """
+ """Extra code to be added to the contract code."""
@property
def contract_code(self) -> Bytecode:
@@ -243,12 +231,12 @@ def get_n_fee_increments(n: int) -> List[int]:
def get_n_fee_increment_blocks(n: int) -> List[List[WithdrawalRequestContract]]:
"""
- Return N blocks that should be included in the test such that each subsequent block has an
- increasing fee for the withdrawal requests.
+ Return N blocks that should be included in the test such that each
+ subsequent block has an increasing fee for the withdrawal requests.
- This is done by calculating the number of withdrawals required to reach the next fee increment
- and creating a block with that number of withdrawal requests plus the number of withdrawals
- required to reach the target.
+ This is done by calculating the number of withdrawals required to reach the
+ next fee increment and creating a block with that number of withdrawal
+ requests plus the number of withdrawals required to reach the target.
"""
blocks = []
previous_excess = 0
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/spec.py b/tests/prague/eip7002_el_triggerable_withdrawals/spec.py
index e30a15011be..30f98338726 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/spec.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/spec.py
@@ -1,7 +1,8 @@
"""
Common procedures to test
-[EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
-""" # noqa: E501
+[EIP-7002: Execution layer triggerable
+withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
+"""
from dataclasses import dataclass
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py b/tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py
index 90f690180ed..0d1aa8b5178 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
- Test system contract deployment for [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
-""" # noqa: E501
+Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
+"""
from os.path import realpath
from pathlib import Path
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py b/tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py
index 7346a630ae1..e47a564ede4 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002)
- Test execution layer triggered exits [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
-
-""" # noqa: E501
+Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
+"""
from typing import List
@@ -91,7 +89,10 @@ def test_extra_withdrawals(
pre: Alloc,
requests_list: List[WithdrawalRequest],
):
- """Test how clients were to behave when more than 16 withdrawals would be allowed per block."""
+ """
+ Test how clients were to behave when more than 16 withdrawals would be
+ allowed per block.
+ """
modified_code: Bytecode = Bytecode()
memory_offset: int = 0
amount_of_requests: int = 0
@@ -116,7 +117,8 @@ def test_extra_withdrawals(
balance=0,
)
- # given a list of withdrawal requests construct a withdrawal request transaction
+ # given a list of withdrawal requests construct a withdrawal request
+ # transaction
withdrawal_request_transaction = WithdrawalRequestTransaction(requests=requests_list)
# prepare withdrawal senders
withdrawal_request_transaction.update_pre(pre=pre)
@@ -144,8 +146,8 @@ def test_extra_withdrawals(
)
def test_system_contract_errors():
"""
- Test system contract raising different errors when called by the system account at the
- end of the block execution.
+ Test system contract raising different errors when called by the system
+ account at the end of the block execution.
To see the list of generated tests, please refer to the
`generate_system_contract_error_test` decorator definition.
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py b/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py
index d764ceccbc3..0613eae2367 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002)
- Test execution layer triggered exits [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
-
-""" # noqa: E501
+Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
+"""
from typing import List
@@ -816,8 +814,8 @@ def test_withdrawal_requests_negative(
exception: BlockException,
):
"""
- Test blocks where the requests list and the actual withdrawal requests that happened in the
- block's transactions do not match.
+ Test blocks where the requests list and the actual withdrawal requests that
+ happened in the block's transactions do not match.
"""
for d in requests:
d.update_pre(pre)
diff --git a/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py b/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py
index e61cf87b7c6..6f9948c0385 100644
--- a/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py
+++ b/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002)
- Test execution layer triggered exits [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
-
-""" # noqa: E501
+Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
+"""
from os.path import realpath
from pathlib import Path
@@ -57,8 +55,9 @@
validator_pubkey=0x02,
amount=0,
fee=Spec.get_fee(10),
- # First post-fork withdrawal request, will not be included
- # because the inhibitor is cleared at the end of the block
+ # First post-fork withdrawal request, will not
+ # be included because the inhibitor is cleared
+ # at the end of the block
valid=False,
)
],
@@ -91,8 +90,12 @@ def test_withdrawal_requests_during_fork(
blocks: List[Block],
pre: Alloc,
):
- """Test making a withdrawal request to the beacon chain at the time of the fork."""
- # We need to delete the deployed contract that comes by default in the pre state.
+ """
+ Test making a withdrawal request to the beacon chain at the time of the
+ fork.
+ """
+ # We need to delete the deployed contract that comes by default in the pre
+ # state.
pre[Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS] = Account(
balance=0,
code=bytes(),
diff --git a/tests/prague/eip7251_consolidations/conftest.py b/tests/prague/eip7251_consolidations/conftest.py
index 3fd5b8be640..f116162ca47 100644
--- a/tests/prague/eip7251_consolidations/conftest.py
+++ b/tests/prague/eip7251_consolidations/conftest.py
@@ -18,8 +18,8 @@ def update_pre(
blocks_consolidation_requests: List[List[ConsolidationRequestInteractionBase]],
):
"""
- Init state of the accounts. Every deposit transaction defines their own pre-state
- requirements, and this fixture aggregates them all.
+ Init state of the accounts. Every deposit transaction defines their own
+ pre-state requirements, and this fixture aggregates them all.
"""
for requests in blocks_consolidation_requests:
for r in requests:
@@ -31,7 +31,10 @@ def included_requests(
update_pre: None, # Fixture is used for its side effects
blocks_consolidation_requests: List[List[ConsolidationRequestInteractionBase]],
) -> List[List[ConsolidationRequest]]:
- """Return the list of consolidation requests that should be included in each block."""
+ """
+ Return the list of consolidation requests that should be included in each
+ block.
+ """
excess_consolidation_requests = 0
carry_over_requests: List[ConsolidationRequest] = []
per_block_included_requests: List[List[ConsolidationRequest]] = []
@@ -39,7 +42,8 @@ def included_requests(
# Get fee for the current block
current_minimum_fee = Spec.get_fee(excess_consolidation_requests)
- # With the fee, get the valid consolidation requests for the current block
+ # With the fee, get the valid consolidation requests for the current
+ # block
current_block_requests = []
for w in block_consolidation_requests:
current_block_requests += w.valid_requests(current_minimum_fee)
@@ -111,4 +115,5 @@ def blocks(
header_verify=Header(requests_hash=Requests()),
timestamp=timestamp,
)
- ] # Add an empty block at the end to verify that no more consolidation requests are included
+ ] # Add an empty block at the end to verify that no more consolidation
+ # requests are included
diff --git a/tests/prague/eip7251_consolidations/helpers.py b/tests/prague/eip7251_consolidations/helpers.py
index 466afb8455a..20795482dbb 100644
--- a/tests/prague/eip7251_consolidations/helpers.py
+++ b/tests/prague/eip7251_consolidations/helpers.py
@@ -16,21 +16,13 @@ class ConsolidationRequest(ConsolidationRequestBase):
"""Class used to describe a consolidation request in a test."""
fee: int = 0
- """
- Fee to be paid to the system contract for the consolidation request.
- """
+ """Fee to be paid to the system contract for the consolidation request."""
valid: bool = True
- """
- Whether the consolidation request is valid or not.
- """
+ """Whether the consolidation request is valid or not."""
gas_limit: int = 1_000_000
- """
- Gas limit for the call.
- """
+ """Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
- """
- Calldata modifier function.
- """
+ """Calldata modifier function."""
interaction_contract_address: ClassVar[Address] = Address(
Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS
@@ -39,40 +31,39 @@ class ConsolidationRequest(ConsolidationRequestBase):
@property
def value(self) -> int:
"""
- Return the value of the call to the consolidation request contract, equal to the fee
- to be paid.
+ Return the value of the call to the consolidation request contract,
+ equal to the fee to be paid.
"""
return self.fee
@cached_property
def calldata(self) -> bytes:
"""
- Return the calldata needed to call the consolidation request contract and make the
- consolidation.
+ Return the calldata needed to call the consolidation request contract
+ and make the consolidation.
"""
return self.calldata_modifier(self.source_pubkey + self.target_pubkey)
def with_source_address(self, source_address: Address) -> "ConsolidationRequest":
- """Return a new instance of the consolidation request with the source address set."""
+ """
+ Return a new instance of the consolidation request with the source
+ address set.
+ """
return self.copy(source_address=source_address)
@dataclass(kw_only=True)
class ConsolidationRequestInteractionBase:
- """Base class for all types of consolidation transactions we want to test."""
-
- sender_balance: int = 1_000_000_000_000_000_000
"""
- Balance of the account that sends the transaction.
+ Base class for all types of consolidation transactions we want to test.
"""
+
+ sender_balance: int = 1_000_000_000_000_000_000
+ """Balance of the account that sends the transaction."""
sender_account: EOA | None = None
- """
- Account that will send the transaction.
- """
+ """Account that will send the transaction."""
requests: List[ConsolidationRequest]
- """
- Consolidation requests to be included in the block.
- """
+ """Consolidation requests to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the consolidation request."""
@@ -83,13 +74,19 @@ def update_pre(self, pre: Alloc):
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[ConsolidationRequest]:
- """Return the list of consolidation requests that should be valid in the block."""
+ """
+ Return the list of consolidation requests that should be valid in the
+ block.
+ """
raise NotImplementedError
@dataclass(kw_only=True)
class ConsolidationRequestTransaction(ConsolidationRequestInteractionBase):
- """Class to describe a consolidation request originated from an externally owned account."""
+ """
+ Class to describe a consolidation request originated from an externally
+ owned account.
+ """
def transactions(self) -> List[Transaction]:
"""Return a transaction for the consolidation request."""
@@ -125,9 +122,7 @@ class ConsolidationRequestContract(ConsolidationRequestInteractionBase):
"""Class used to describe a consolidation originated from a contract."""
tx_gas_limit: int = 10_000_000
- """
- Gas limit for the transaction.
- """
+ """Gas limit for the transaction."""
contract_balance: int = 1_000_000_000_000_000_000
"""
@@ -138,22 +133,14 @@ class ConsolidationRequestContract(ConsolidationRequestInteractionBase):
Address of the contract that will make the call to the pre-deploy contract.
"""
entry_address: Address | None = None
- """
- Address to send the transaction to.
- """
+ """Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
- """
- Type of call to be used to make the consolidation request.
- """
+ """Type of call to be used to make the consolidation request."""
call_depth: int = 2
- """
- Frame depth of the pre-deploy contract when it executes the call.
- """
+ """Frame depth of the pre-deploy contract when it executes the call."""
extra_code: Bytecode = field(default_factory=Bytecode)
- """
- Extra code to be added to the contract code.
- """
+ """Extra code to be added to the contract code."""
@property
def contract_code(self) -> Bytecode:
@@ -239,12 +226,13 @@ def get_n_fee_increments(n: int) -> List[int]:
def get_n_fee_increment_blocks(n: int) -> List[List[ConsolidationRequestContract]]:
"""
- Return N blocks that should be included in the test such that each subsequent block has an
- increasing fee for the consolidation requests.
+ Return N blocks that should be included in the test such that each
+ subsequent block has an increasing fee for the consolidation requests.
- This is done by calculating the number of consolidations required to reach the next fee
- increment and creating a block with that number of consolidation requests plus the number of
- consolidations required to reach the target.
+ This is done by calculating the number of consolidations required to reach
+ the next fee increment and creating a block with that number of
+ consolidation requests plus the number of consolidations required to reach
+ the target.
"""
blocks = []
previous_excess = 0
diff --git a/tests/prague/eip7251_consolidations/test_consolidations.py b/tests/prague/eip7251_consolidations/test_consolidations.py
index bd5f8d81e63..929ba8ba09b 100644
--- a/tests/prague/eip7251_consolidations/test_consolidations.py
+++ b/tests/prague/eip7251_consolidations/test_consolidations.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251)
- Test execution layer triggered consolidations [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
-
-""" # noqa: E501
+Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
+"""
from typing import List
@@ -866,8 +864,8 @@ def test_consolidation_requests_negative(
exception: BlockException,
):
"""
- Test blocks where the requests list and the actual consolidation requests that happened in the
- block's transactions do not match.
+ Test blocks where the requests list and the actual consolidation requests
+ that happened in the block's transactions do not match.
"""
for d in requests:
d.update_pre(pre)
diff --git a/tests/prague/eip7251_consolidations/test_consolidations_during_fork.py b/tests/prague/eip7251_consolidations/test_consolidations_during_fork.py
index 4731b3842f1..36f948c1029 100644
--- a/tests/prague/eip7251_consolidations/test_consolidations_during_fork.py
+++ b/tests/prague/eip7251_consolidations/test_consolidations_during_fork.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251)
- Test execution layer triggered consolidations [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
-
-""" # noqa: E501
+Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
+"""
from os.path import realpath
from pathlib import Path
@@ -57,8 +55,9 @@
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(10),
- # First post-fork consolidation request, will not be included
- # because the inhibitor is cleared at the end of the block
+ # First post-fork consolidation request, will
+ # not be included because the inhibitor is
+ # cleared at the end of the block
valid=False,
)
],
@@ -91,8 +90,12 @@ def test_consolidation_requests_during_fork(
blocks: List[Block],
pre: Alloc,
):
- """Test making a consolidation request to the beacon chain at the time of the fork."""
- # We need to delete the deployed contract that comes by default in the pre state.
+ """
+ Test making a consolidation request to the beacon chain at the time of the
+ fork.
+ """
+ # We need to delete the deployed contract that comes by default in the pre
+ # state.
pre[Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS] = Account(
balance=0,
code=bytes(),
diff --git a/tests/prague/eip7251_consolidations/test_contract_deployment.py b/tests/prague/eip7251_consolidations/test_contract_deployment.py
index f0a66d2b963..0d195306ac1 100644
--- a/tests/prague/eip7251_consolidations/test_contract_deployment.py
+++ b/tests/prague/eip7251_consolidations/test_contract_deployment.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
- Test system contract deployment for [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
-""" # noqa: E501
+Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
+"""
from os.path import realpath
from pathlib import Path
diff --git a/tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py b/tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py
index d29c1177e20..a3f33ac2ec2 100644
--- a/tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py
+++ b/tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py
@@ -1,8 +1,6 @@
"""
-abstract: Tests [EIP-7251: Execution layer triggerable consolidation](https://eips.ethereum.org/EIPS/eip-7251)
- Test execution layer triggered exits [EIP-7251: Execution layer triggerable consolidation](https://eips.ethereum.org/EIPS/eip-7251).
-
-""" # noqa: E501
+Tests [EIP-7251: Execution layer triggerable consolidation](https://eips.ethereum.org/EIPS/eip-7251).
+"""
from typing import List
@@ -91,7 +89,9 @@ def test_extra_consolidations(
pre: Alloc,
requests_list: List[ConsolidationRequest],
):
- """Test how clients were to behave with more than 2 consolidations per block."""
+ """
+ Test how clients were to behave with more than 2 consolidations per block.
+ """
modified_code: Bytecode = Bytecode()
memory_offset: int = 0
amount_of_requests: int = 0
@@ -116,7 +116,8 @@ def test_extra_consolidations(
balance=0,
)
- # given a list of consolidation requests construct a consolidation request transaction
+ # given a list of consolidation requests construct a consolidation request
+ # transaction
consolidation_request_transaction = ConsolidationRequestTransaction(requests=requests_list)
# prepare consolidation senders
consolidation_request_transaction.update_pre(pre=pre)
@@ -144,8 +145,8 @@ def test_extra_consolidations(
)
def test_system_contract_errors():
"""
- Test system contract raising different errors when called by the system account at the
- end of the block execution.
+ Test system contract raising different errors when called by the system
+ account at the end of the block execution.
To see the list of generated tests, please refer to the
`generate_system_contract_error_test` decorator definition.
diff --git a/tests/prague/eip7623_increase_calldata_cost/__init__.py b/tests/prague/eip7623_increase_calldata_cost/__init__.py
index 951922c6f92..ef3f7c8099e 100644
--- a/tests/prague/eip7623_increase_calldata_cost/__init__.py
+++ b/tests/prague/eip7623_increase_calldata_cost/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623)
- Tests for [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
+Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
"""
diff --git a/tests/prague/eip7623_increase_calldata_cost/conftest.py b/tests/prague/eip7623_increase_calldata_cost/conftest.py
index 5f0a4b50d0d..a4f30d29113 100644
--- a/tests/prague/eip7623_increase_calldata_cost/conftest.py
+++ b/tests/prague/eip7623_increase_calldata_cost/conftest.py
@@ -48,8 +48,8 @@ def to(
@pytest.fixture
def protected() -> bool:
"""
- Return whether the transaction is protected or not.
- Only valid for type-0 transactions.
+ Return whether the transaction is protected or not. Only valid for type-0
+ transactions.
"""
return True
@@ -62,7 +62,10 @@ def access_list() -> List[AccessList] | None:
@pytest.fixture
def authorization_refund() -> bool:
- """Return whether the transaction has an existing authority in the authorization list."""
+ """
+ Return whether the transaction has an existing authority in the
+ authorization list.
+ """
return False
@@ -75,9 +78,9 @@ def authorization_list(
"""
Authorization-list for the transaction.
- This fixture needs to be parametrized indirectly in order to generate the authorizations with
- valid signers using `pre` in this function, and the parametrized value should be a list of
- addresses.
+ This fixture needs to be parametrized indirectly in order to generate the
+ authorizations with valid signers using `pre` in this function, and the
+ parametrized value should be a list of addresses.
"""
if not hasattr(request, "param"):
return None
@@ -127,20 +130,24 @@ def tx_data(
intrinsic_gas_data_floor_minimum_delta: int,
) -> Bytes:
"""
- All tests in this file use data that is generated dynamically depending on the case and the
- attributes of the transaction in order to reach the edge cases where the floor gas cost is
- equal or barely greater than the intrinsic gas cost.
+ All tests in this file use data that is generated dynamically depending on
+ the case and the attributes of the transaction in order to reach the edge
+ cases where the floor gas cost is equal or barely greater than the
+ intrinsic gas cost.
We have two different types of tests:
- - FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS: The floor gas cost is less than or equal
- to the intrinsic gas cost, which means that the size of the tokens in the data are not
- enough to trigger the floor gas cost.
- - FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS: The floor gas cost is greater than the intrinsic
- gas cost, which means that the size of the tokens in the data are enough to trigger the
- floor gas cost.
-
- E.g. Given a transaction with a single access list and a single storage key, its intrinsic gas
- cost (as of Prague fork) can be calculated as:
+
+ - FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS: The floor gas cost is
+ less than or equal to the intrinsic gas cost, which means that the size
+ of the tokens in the data are not enough to trigger the floor gas cost.
+
+ - FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS: The floor gas cost is greater
+ than the intrinsic gas cost, which means that the size of the tokens in
+ the data are enough to trigger the floor gas cost.
+
+ E.g. Given a transaction with a single access list and a single storage
+ key, its intrinsic gas cost (as of Prague fork) can be calculated as:
+
- 21,000 gas for the transaction
- 2,400 gas for the access list
- 1,900 gas for the storage key
@@ -152,18 +159,19 @@ def tx_data(
- 40 gas for each non-zero byte in the data
- 10 gas for each zero byte in the data
- Notice that the data included in the transaction affects both the intrinsic gas cost and the
- floor data cost, but at different rates.
+ Notice that the data included in the transaction affects both the intrinsic
+ gas cost and the floor data cost, but at different rates.
- The purpose of this function is to find the exact amount of data where the floor data gas
- cost starts exceeding the intrinsic gas cost.
+ The purpose of this function is to find the exact amount of data where the
+ floor data gas cost starts exceeding the intrinsic gas cost.
- After a binary search we find that adding 717 tokens of data (179 non-zero bytes +
- 1 zero byte) triggers the floor gas cost.
+ After a binary search we find that adding 717 tokens of data (179 non-zero
+ bytes + 1 zero byte) triggers the floor gas cost.
- Therefore, this function will return a Bytes object with 179 non-zero bytes and 1 zero byte
- for `FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS` and a Bytes object with 179 non-zero bytes
- and no zero bytes for `FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS`
+ Therefore, this function will return a Bytes object with 179 non-zero bytes
+ and 1 zero byte for `FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS` and a Bytes
+ object with 179 non-zero bytes and no zero bytes for
+ `FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS`
"""
def tokens_to_data(tokens: int) -> Bytes:
@@ -188,11 +196,11 @@ def transaction_intrinsic_cost_calculator(tokens: int) -> int:
def transaction_data_floor_cost_calculator(tokens: int) -> int:
return fork_data_floor_cost_calculator(data=tokens_to_data(tokens))
- # Start with zero data and check the difference in the gas calculator between the
- # intrinsic gas cost and the floor gas cost.
+ # Start with zero data and check the difference in the gas calculator
+ # between the intrinsic gas cost and the floor gas cost.
if transaction_data_floor_cost_calculator(0) >= transaction_intrinsic_cost_calculator(0):
- # Special case which is a transaction with no extra intrinsic gas costs other than the
- # data cost, any data will trigger the floor gas cost.
+ # Special case which is a transaction with no extra intrinsic gas costs
+ # other than the data cost, any data will trigger the floor gas cost.
if data_test_type == DataTestType.FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS:
return Bytes(b"")
else:
@@ -213,13 +221,15 @@ def tx_gas_delta() -> int:
"""
Gas delta to modify the gas amount included with the transaction.
- If negative, the transaction will be invalid because the intrinsic gas cost is greater than the
- gas limit.
+ If negative, the transaction will be invalid because the intrinsic gas cost
+ is greater than the gas limit.
- This value operates regardless of whether the floor data gas cost is reached or not.
+ This value operates regardless of whether the floor data gas cost is
+ reached or not.
- If the value is greater than zero, the transaction will also be valid and the test will check
- that transaction processing does not consume more gas than it should.
+ If the value is greater than zero, the transaction will also be valid and
+ the test will check that transaction processing does not consume more gas
+ than it should.
"""
return 0
@@ -258,10 +268,11 @@ def tx_intrinsic_gas_cost_including_floor_data_cost(
"""
Transaction intrinsic gas cost.
- The calculated value takes into account the normal intrinsic gas cost and the floor data gas
- cost if it is greater than the intrinsic gas cost.
+ The calculated value takes into account the normal intrinsic gas cost and
+ the floor data gas cost if it is greater than the intrinsic gas cost.
- In other words, this is the value that is required for the transaction to be valid.
+ In other words, this is the value that is required for the transaction to
+ be valid.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
return intrinsic_gas_cost_calculator(
@@ -290,7 +301,8 @@ def tx_gas_limit(
"""
Gas limit for the transaction.
- The gas delta is added to the intrinsic gas cost to generate different test scenarios.
+ The gas delta is added to the intrinsic gas cost to generate different test
+ scenarios.
"""
return tx_intrinsic_gas_cost_including_floor_data_cost + tx_gas_delta
diff --git a/tests/prague/eip7623_increase_calldata_cost/helpers.py b/tests/prague/eip7623_increase_calldata_cost/helpers.py
index d4235976c3a..fc4f94bd63c 100644
--- a/tests/prague/eip7623_increase_calldata_cost/helpers.py
+++ b/tests/prague/eip7623_increase_calldata_cost/helpers.py
@@ -16,16 +16,18 @@ def find_floor_cost_threshold(
intrinsic_gas_cost_calculator: Callable[[int], int],
) -> int:
"""
- Find the minimum amount of tokens that will trigger the floor gas cost, by using a binary
- search and the intrinsic gas cost and floor data calculators.
+ Find the minimum amount of tokens that will trigger the floor gas cost, by
+ using a binary search and the intrinsic gas cost and floor data
+ calculators.
"""
- # Start with 1000 tokens and if the intrinsic gas cost is greater than the floor gas cost,
- # multiply the number of tokens by 2 until it's not.
+ # Start with 1000 tokens and if the intrinsic gas cost is greater than the
+ # floor gas cost, multiply the number of tokens by 2 until it's not.
tokens = 1000
while floor_data_gas_cost_calculator(tokens) < intrinsic_gas_cost_calculator(tokens):
tokens *= 2
- # Binary search to find the minimum number of tokens that will trigger the floor gas cost.
+ # Binary search to find the minimum number of tokens that will trigger the
+ # floor gas cost.
left = 0
right = tokens
while left < right:
@@ -39,7 +41,8 @@ def find_floor_cost_threshold(
if floor_data_gas_cost_calculator(tokens) > intrinsic_gas_cost_calculator(tokens):
tokens -= 1
- # Verify that increasing the tokens by one would always trigger the floor gas cost.
+ # Verify that increasing the tokens by one would always trigger the floor
+ # gas cost.
assert (
floor_data_gas_cost_calculator(tokens) <= intrinsic_gas_cost_calculator(tokens)
) and floor_data_gas_cost_calculator(tokens + 1) > intrinsic_gas_cost_calculator(tokens + 1), (
diff --git a/tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py b/tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py
index c0ef506023e..53eb2957b5c 100644
--- a/tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py
+++ b/tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py
@@ -1,7 +1,6 @@
"""
-abstract: Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623)
- Test execution gas consumption after [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
-""" # noqa: E501
+Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
+"""
from typing import List
@@ -41,7 +40,10 @@ class TestGasConsumption:
@pytest.fixture
def intrinsic_gas_data_floor_minimum_delta(self) -> int:
- """Force a minimum delta in order to have some gas to execute the invalid opcode."""
+ """
+ Force a minimum delta in order to have some gas to execute the invalid
+ opcode.
+ """
return 50_000
@pytest.fixture
@@ -49,7 +51,10 @@ def to(
self,
pre: Alloc,
) -> Address | None:
- """Return a contract that consumes all gas when executed by calling an invalid opcode."""
+ """
+ Return a contract that consumes all gas when executed by calling an
+ invalid opcode.
+ """
return pre.deploy_contract(Op.INVALID)
@pytest.mark.parametrize(
@@ -78,7 +83,10 @@ def test_full_gas_consumption(
pre: Alloc,
tx: Transaction,
) -> None:
- """Test executing a transaction that fully consumes its execution gas allocation."""
+ """
+ Test executing a transaction that fully consumes its execution gas
+ allocation.
+ """
tx.expected_receipt = TransactionReceipt(gas_used=tx.gas_limit)
state_test(
pre=pre,
@@ -136,8 +144,8 @@ def to(
@pytest.mark.parametrize(
"tx_gas_delta",
[
- # Test with exact gas and extra gas, to verify that the refund is correctly applied
- # to the full consumed execution gas.
+ # Test with exact gas and extra gas, to verify that the refund is
+ # correctly applied to the full consumed execution gas.
pytest.param(0, id="exact_gas"),
],
)
@@ -148,7 +156,9 @@ def test_gas_consumption_below_data_floor(
tx: Transaction,
tx_floor_data_cost: int,
) -> None:
- """Test executing a transaction that almost consumes the floor data cost."""
+ """
+ Test executing a transaction that almost consumes the floor data cost.
+ """
tx.expected_receipt = TransactionReceipt(gas_used=tx_floor_data_cost)
state_test(
pre=pre,
diff --git a/tests/prague/eip7623_increase_calldata_cost/test_refunds.py b/tests/prague/eip7623_increase_calldata_cost/test_refunds.py
index 10781bb9e0f..94908fdf0df 100644
--- a/tests/prague/eip7623_increase_calldata_cost/test_refunds.py
+++ b/tests/prague/eip7623_increase_calldata_cost/test_refunds.py
@@ -1,7 +1,6 @@
"""
-abstract: Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623)
- Test applied refunds for [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
-""" # noqa: E501
+Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
+"""
from enum import Enum, Flag, auto
from typing import Dict, List
@@ -35,18 +34,16 @@ class RefundTestType(Enum):
EXECUTION_GAS_MINUS_REFUND_GREATER_THAN_DATA_FLOOR = 0
"""
- The execution gas minus the refund is greater than the data floor, hence the execution gas cost
- is charged.
+ The execution gas minus the refund is greater than the data floor, hence
+ the execution gas cost is charged.
"""
EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR = 1
"""
- The execution gas minus the refund is less than the data floor, hence the data floor cost is
- charged.
+ The execution gas minus the refund is less than the data floor, hence the
+ data floor cost is charged.
"""
EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR = 2
- """
- The execution gas minus the refund is equal to the data floor.
- """
+ """The execution gas minus the refund is equal to the data floor."""
class RefundType(Flag):
@@ -56,7 +53,10 @@ class RefundType(Flag):
"""The storage is cleared from a non-zero value."""
AUTHORIZATION_EXISTING_AUTHORITY = auto()
- """The authorization list contains an authorization where the authority exists in the state."""
+ """
+ The authorization list contains an authorization where the authority exists
+ in the state.
+ """
@pytest.fixture
@@ -67,7 +67,10 @@ def data_test_type() -> DataTestType:
@pytest.fixture
def authorization_list(pre: Alloc, refund_type: RefundType) -> List[AuthorizationTuple] | None:
- """Modify fixture from conftest to automatically read the refund_type information."""
+ """
+ Modify fixture from conftest to automatically read the refund_type
+ information.
+ """
if RefundType.AUTHORIZATION_EXISTING_AUTHORITY not in refund_type:
return None
return [AuthorizationTuple(signer=pre.fund_eoa(1), address=Address(1))]
@@ -75,7 +78,10 @@ def authorization_list(pre: Alloc, refund_type: RefundType) -> List[Authorizatio
@pytest.fixture
def ty(refund_type: RefundType) -> int:
- """Modify fixture from conftest to automatically read the refund_type information."""
+ """
+ Modify fixture from conftest to automatically read the refund_type
+ information.
+ """
if RefundType.AUTHORIZATION_EXISTING_AUTHORITY in refund_type:
return 4
return 2
@@ -125,8 +131,8 @@ def code_storage(refund_type: RefundType) -> Dict:
@pytest.fixture
def contract_creating_tx() -> bool:
"""
- Override fixture in order to avoid a circular fixture dependency since
- none of these tests are contract creating transactions.
+ Override fixture in order to avoid a circular fixture dependency since none
+ of these tests are contract creating transactions.
"""
return False
@@ -137,12 +143,14 @@ def intrinsic_gas_data_floor_minimum_delta() -> int:
Induce a minimum delta between the transaction intrinsic gas cost and the
floor data gas cost.
- Since at least one of the cases requires some execution gas expenditure (SSTORE clearing),
- we need to introduce an increment of the floor data cost above the transaction intrinsic gas
- cost, otherwise the floor data cost would always be the below the execution gas cost even
- after the refund is applied.
+ Since at least one of the cases requires some execution gas expenditure
+ (SSTORE clearing), we need to introduce an increment of the floor data cost
+ above the transaction intrinsic gas cost, otherwise the floor data cost
+ would always be the below the execution gas cost even after the refund is
+ applied.
- This value has been set as of Prague and should be adjusted if the gas costs change.
+ This value has been set as of Prague and should be adjusted if the gas
+ costs change.
"""
return 250
@@ -160,10 +168,11 @@ def execution_gas_used(
This gas amount is on top of the transaction intrinsic gas cost.
- If this value were zero it would result in the refund being applied to the execution gas cost
- and the resulting amount being always below the floor data cost, hence we need to find a
- higher value in this function to ensure we get both scenarios where the refund drives
- the execution cost below the floor data cost and above the floor data cost.
+ If this value were zero it would result in the refund being applied to the
+ execution gas cost and the resulting amount being always below the floor
+ data cost, hence we need to find a higher value in this function to ensure
+ we get both scenarios where the refund drives the execution cost below the
+ floor data cost and above the floor data cost.
"""
def execution_gas_cost(execution_gas: int) -> int:
@@ -177,7 +186,8 @@ def execution_gas_cost(execution_gas: int) -> int:
"test to fail. Try increasing the intrinsic_gas_data_floor_minimum_delta fixture."
)
- # Dumb for-loop to find the execution gas cost that will result in the expected refund.
+ # Dumb for-loop to find the execution gas cost that will result in the
+ # expected refund.
while execution_gas_cost(execution_gas) < tx_floor_data_cost:
execution_gas += 1
if refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR:
@@ -212,7 +222,8 @@ def to(
"""
Return a contract that consumes the expected execution gas.
- At the moment we naively use JUMPDEST to consume the gas, which can yield very big contracts.
+ At the moment we naively use JUMPDEST to consume the gas, which can yield
+ very big contracts.
Ideally, we can use memory expansion to consume gas.
"""
@@ -232,7 +243,8 @@ def tx_gas_limit(
"""
Gas limit for the transaction.
- The gas delta is added to the intrinsic gas cost to generate different test scenarios.
+ The gas delta is added to the intrinsic gas cost to generate different test
+ scenarios.
"""
tx_gas_limit = tx_intrinsic_gas_cost_before_execution + execution_gas_used
assert tx_gas_limit >= tx_intrinsic_gas_cost_including_floor_data_cost
@@ -265,7 +277,10 @@ def test_gas_refunds_from_data_floor(
refund: int,
refund_test_type: RefundTestType,
) -> None:
- """Test gas refunds deducted from the execution gas cost and not the data floor."""
+ """
+ Test gas refunds deducted from the execution gas cost and not the data
+ floor.
+ """
gas_used = tx_intrinsic_gas_cost_before_execution + execution_gas_used - refund
if refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR:
assert gas_used < tx_floor_data_cost
@@ -278,17 +293,17 @@ def test_gas_refunds_from_data_floor(
if gas_used < tx_floor_data_cost:
gas_used = tx_floor_data_cost
# This is the actual test verification:
- # - During test filling, the receipt returned by the transition tool (t8n) is verified against
- # the expected receipt.
- # - During test consumption, this is reflected in the balance difference and the state
- # root.
+ # - During test filling, the receipt returned by the transition tool
+ # (t8n) is verified against the expected receipt.
+ # - During test consumption, this is reflected in the balance difference
+ # and the state root.
tx.expected_receipt = TransactionReceipt(gas_used=gas_used)
state_test(
pre=pre,
post={
tx.to: {
- # Verify that the storage was cleared (for storage clear refund).
- # See `code_storage` fixture for more details.
+ # Verify that the storage was cleared (for storage clear
+ # refund). See `code_storage` fixture for more details.
"storage": {0: 0},
}
},
diff --git a/tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py b/tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py
index 27457fc3287..dba9565f94b 100644
--- a/tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py
+++ b/tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py
@@ -1,7 +1,6 @@
"""
-abstract: Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623)
- Test transaction validity on [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
-""" # noqa: E501
+Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
+"""
import pytest
@@ -33,8 +32,9 @@
pytest.mark.parametrize(
"tx_gas_delta",
[
- # Test the case where the included gas is greater than the intrinsic gas to verify that
- # the data floor does not consume more gas than it should.
+ # Test the case where the included gas is greater than the
+ # intrinsic gas to verify that the data floor does not consume more
+ # gas than it should.
pytest.param(1, id="extra_gas"),
pytest.param(0, id="exact_gas"),
pytest.param(-1, id="insufficient_gas", marks=pytest.mark.exception_test),
@@ -81,7 +81,10 @@ def test_transaction_validity_type_0(
pre: Alloc,
tx: Transaction,
) -> None:
- """Test transaction validity for transactions without access lists and contract creation."""
+ """
+ Test transaction validity for transactions without access lists and
+ contract creation.
+ """
state_test(
pre=pre,
post={},
@@ -143,7 +146,10 @@ def test_transaction_validity_type_1_type_2(
pre: Alloc,
tx: Transaction,
) -> None:
- """Test transaction validity for transactions with access lists and contract creation."""
+ """
+ Test transaction validity for transactions with access lists and contract
+ creation.
+ """
state_test(
pre=pre,
post={},
@@ -188,9 +194,9 @@ def test_transaction_validity_type_1_type_2(
],
)
@pytest.mark.parametrize(
- # Blobs don't really have an effect because the blob gas does is not considered in the
- # intrinsic gas calculation, but we still test it to make sure that the transaction is
- # correctly processed.
+ # Blobs don't really have an effect because the blob gas does is not
+ # considered in the intrinsic gas calculation, but we still test it to make
+ # sure that the transaction is correctly processed.
"blob_versioned_hashes",
[
pytest.param(
@@ -219,8 +225,8 @@ def test_transaction_validity_type_3(
tx: Transaction,
) -> None:
"""
- Test transaction validity for transactions with access lists, blobs,
- but no contract creation.
+ Test transaction validity for transactions with access lists, blobs, but no
+ contract creation.
"""
state_test(
pre=pre,
@@ -289,8 +295,8 @@ def test_transaction_validity_type_4(
tx: Transaction,
) -> None:
"""
- Test transaction validity for transactions with access lists, authorization lists, but no
- contract creation.
+ Test transaction validity for transactions with access lists, authorization
+ lists, but no contract creation.
"""
state_test(
pre=pre,
diff --git a/tests/prague/eip7685_general_purpose_el_requests/conftest.py b/tests/prague/eip7685_general_purpose_el_requests/conftest.py
index 1d347199a70..03fa406f0dc 100644
--- a/tests/prague/eip7685_general_purpose_el_requests/conftest.py
+++ b/tests/prague/eip7685_general_purpose_el_requests/conftest.py
@@ -29,7 +29,10 @@
def block_body_override_requests(
request: pytest.FixtureRequest,
) -> List[DepositRequest | WithdrawalRequest | ConsolidationRequest] | None:
- """List of requests that overwrite the requests in the header. None by default."""
+ """
+ List of requests that overwrite the requests in the header. None by
+ default.
+ """
if hasattr(request, "param"):
return request.param
return None
@@ -38,9 +41,9 @@ def block_body_override_requests(
@pytest.fixture
def correct_requests_hash_in_header() -> bool:
"""
- Whether to include the correct requests hash in the header so the calculated
- block hash is correct, even though the requests in the new payload parameters might
- be wrong.
+ Whether to include the correct requests hash in the header so the
+ calculated block hash is correct, even though the requests in the new
+ payload parameters might be wrong.
"""
return False
diff --git a/tests/prague/eip7685_general_purpose_el_requests/spec.py b/tests/prague/eip7685_general_purpose_el_requests/spec.py
index 414e47d290c..758fe930762 100644
--- a/tests/prague/eip7685_general_purpose_el_requests/spec.py
+++ b/tests/prague/eip7685_general_purpose_el_requests/spec.py
@@ -1,7 +1,8 @@
"""
Common procedures to test
-[EIP-7685: General purpose execution layer requests](https://eips.ethereum.org/EIPS/eip-7685).
-""" # noqa: E501
+[EIP-7685: General purpose execution
+layer requests](https://eips.ethereum.org/EIPS/eip-7685).
+"""
from dataclasses import dataclass
diff --git a/tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py b/tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py
index d11e62cffdb..382b1cb404c 100644
--- a/tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py
+++ b/tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py
@@ -1,8 +1,9 @@
"""
-abstract: Tests [EIP-7685: General purpose execution layer requests](https://eips.ethereum.org/EIPS/eip-7685)
- Cross testing for withdrawal and deposit request for [EIP-7685: General purpose execution layer requests](https://eips.ethereum.org/EIPS/eip-7685).
+Tests EIP-7685 General purpose execution layer requests.
-""" # noqa: E501
+Cross testing for withdrawal and deposit request for
+[EIP-7685: General purpose execution layer requests](https://eips.ethereum.org/EIPS/eip-7685).
+"""
from itertools import permutations
from typing import Callable, Dict, Generator, List, Tuple
@@ -299,7 +300,8 @@ def get_contract_permutations(n: int = 3) -> Generator[ParameterSet, None, None]
[
single_consolidation_from_contract(0),
single_consolidation_from_contract(1),
- # the following performs single_withdrawal_from_contract(0) to (16)
+ # the following performs single_withdrawal_from_contract(0) to
+ # (16)
*[
single_withdrawal_from_contract(i)
for i in range(
@@ -307,10 +309,10 @@ def get_contract_permutations(n: int = 3) -> Generator[ParameterSet, None, None]
16,
)
],
- # single_withdrawal_from_contract(16) not allowed cuz only
- # 16 MAX WITHDRAWALS PER BLOCK (EIP-7002)
- #
- # the following performs single_deposit_from_contract(0) to (18)
+ # single_withdrawal_from_contract(16) not allowed cuz only 16
+ # MAX WITHDRAWALS PER BLOCK (EIP-7002)
+ # the following performs single_deposit_from_contract(0) to
+ # (18)
*[
single_deposit_from_contract(i)
for i in range(
@@ -354,8 +356,8 @@ def test_valid_multi_type_request_from_same_tx(
fork: Fork,
):
"""
- Test making a deposit to the beacon chain deposit contract and a withdrawal in
- the same tx.
+ Test making a deposit to the beacon chain deposit contract and a withdrawal
+ in the same tx.
"""
withdrawal_request_fee: int = 1
consolidation_request_fee: int = 1
@@ -440,8 +442,9 @@ def invalid_requests_block_combinations(
"""
Return a list of invalid request combinations for the given fork.
- In the event of a new request type, the `all_request_types` dictionary should be updated
- with the new request type and its corresponding request-generating transaction.
+ In the event of a new request type, the `all_request_types` dictionary
+ should be updated with the new request type and its corresponding
+ request-generating transaction.
Returned parameters are: requests, block_body_override_requests, exception
"""
@@ -474,9 +477,10 @@ def func(fork: Fork) -> List[ParameterSet]:
expected_exceptions: List[BlockException] = [BlockException.INVALID_REQUESTS]
if correct_requests_hash_in_header:
- # The client also might reject the block with an invalid-block-hash error because it
- # might convert the requests in the new payload parameters to the requests hash in the
- # header and compare it with the block hash.
+ # The client also might reject the block with an invalid-block-hash
+ # error because it might convert the requests in the new payload
+ # parameters to the requests hash in the header and compare it with
+ # the block hash.
expected_exceptions.append(BlockException.INVALID_BLOCK_HASH)
# - Empty requests list with invalid hash
@@ -485,8 +489,10 @@ def func(fork: Fork) -> List[ParameterSet]:
[],
[
bytes([i]) for i in range(fork.max_request_type() + 1)
- ], # Using empty requests, calculate the hash using an invalid calculation method:
- # sha256(sha256(b"\0") ++ sha256(b"\1") ++ sha256(b"\2") ++ ...)
+ ], # Using empty requests, calculate the hash using an invalid
+ # calculation method:
+ # sha256(sha256(b"\0") ++ sha256(b"\1") ++ sha256(b"\2") ++
+ # ...)
expected_exceptions,
id="no_requests_and_invalid_hash_calculation_method",
),
@@ -494,7 +500,8 @@ def func(fork: Fork) -> List[ParameterSet]:
[],
[
bytes([]) for _ in range(fork.max_request_type() + 1)
- ], # Using empty requests, calculate the hash using an invalid calculation method:
+ ], # Using empty requests, calculate the hash using an invalid
+ # calculation method:
# sha256(sha256(b"") ++ sha256(b"") ++ sha256(b"") ++ ...)
expected_exceptions,
id="no_requests_and_invalid_hash_calculation_method_2",
@@ -507,9 +514,8 @@ def func(fork: Fork) -> List[ParameterSet]:
[
pytest.param(
[eoa_request],
- [
- block_request
- ], # The request type byte missing because we need to use `Requests`
+ [block_request], # The request type byte missing because we need to
+ # use `Requests`
expected_exceptions,
id=f"single_{request_type}_missing_type_byte",
),
@@ -636,10 +642,11 @@ def test_invalid_multi_type_requests(
"""
Negative testing for all request types in the same block.
- In these tests, the requests hash in the header reflects what's received in the parameters
- portion of the `engine_newPayloadVX` call, so the block hash calculation might pass if
- a client copies the info received verbatim, but block validation must fail after
- the block is executed (via RLP or Engine API).
+ In these tests, the requests hash in the header reflects what's received in
+ the parameters portion of the `engine_newPayloadVX` call, so the block hash
+ calculation might pass if a client copies the info received verbatim, but
+ block validation must fail after the block is executed (via RLP or Engine
+ API).
"""
blockchain_test(
genesis_environment=Environment(),
@@ -665,26 +672,26 @@ def test_invalid_multi_type_requests_engine(
blocks: List[Block],
):
"""
- Negative testing for all request types in the same block with incorrect parameters
- in the Engine API new payload parameters, but with the correct requests hash in the header
- so the block hash is correct.
-
- In these tests, the requests hash in the header reflects what's actually in the executed block,
- so the block might execute properly if the client ignores the requests in the new payload
- parameters.
-
- Note that the only difference between the engine version produced by this test and
- the ones produced by `test_invalid_multi_type_requests` is the
- `blockHash` value in the new payloads, which is calculated using different request hashes
- for each test, but since the request hash is not a value that is included in the payload,
- it might not be immediately apparent.
-
- Also these tests would not fail if the block is imported via RLP (syncing from a peer),
- so we only generate the BlockchainTestEngine for them.
-
- The client also might reject the block with an invalid-block-hash error because it might
- convert the requests in the new payload parameters to the requests hash in the header
- and compare it with the block hash.
+ Negative testing for all request types in the same block with incorrect
+ parameters in the Engine API new payload parameters, but with the correct
+ requests hash in the header so the block hash is correct.
+
+ In these tests, the requests hash in the header reflects what's actually in
+ the executed block, so the block might execute properly if the client
+ ignores the requests in the new payload parameters.
+
+ Note that the only difference between the engine version produced by this
+ test and the ones produced by `test_invalid_multi_type_requests` is the
+ `blockHash` value in the new payloads, which is calculated using different
+ request hashes for each test, but since the request hash is not a value
+ that is included in the payload, it might not be immediately apparent.
+
+ Also these tests would not fail if the block is imported via RLP (syncing
+ from a peer), so we only generate the BlockchainTestEngine for them.
+
+ The client also might reject the block with an invalid-block-hash error
+ because it might convert the requests in the new payload parameters to the
+ requests hash in the header and compare it with the block hash.
"""
blockchain_test(
genesis_environment=Environment(),
diff --git a/tests/prague/eip7702_set_code_tx/helpers.py b/tests/prague/eip7702_set_code_tx/helpers.py
index 83bd4eb91b6..8cc21940297 100644
--- a/tests/prague/eip7702_set_code_tx/helpers.py
+++ b/tests/prague/eip7702_set_code_tx/helpers.py
@@ -1,12 +1,15 @@
-"""Helper types, functions and classes for testing EIP-7702 Set Code Transaction."""
+"""
+Helper types, functions and classes for testing EIP-7702 Set Code Transaction.
+"""
from enum import Enum, auto
class AddressType(Enum):
"""
- Different types of addresses used to specify the type of authority that signs an authorization,
- and the type of address to which the authority authorizes to set the code to.
+ Different types of addresses used to specify the type of authority that
+ signs an authorization, and the type of address to which the authority
+ authorizes to set the code to.
"""
EMPTY_ACCOUNT = auto()
diff --git a/tests/prague/eip7702_set_code_tx/test_gas.py b/tests/prague/eip7702_set_code_tx/test_gas.py
index af634fc29d0..1b416b28fbe 100644
--- a/tests/prague/eip7702_set_code_tx/test_gas.py
+++ b/tests/prague/eip7702_set_code_tx/test_gas.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests related to gas of set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702)
- Tests related to gas of set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
-""" # noqa: E501
+Tests related to gas of set-code transactions from EIP-7702.
+
+Tests related to gas of set-code transactions from
+[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
+"""
from dataclasses import dataclass
from enum import Enum, auto
@@ -43,7 +45,10 @@
class SignerType(Enum):
- """Different cases of authorization lists for testing gas cost of set-code transactions."""
+ """
+ Different cases of authorization lists for testing gas cost of set-code
+ transactions.
+ """
SINGLE_SIGNER = auto()
MULTIPLE_SIGNERS = auto()
@@ -59,7 +64,10 @@ class AuthorizationInvalidityType(Enum):
class AccessListType(Enum):
- """Different cases of access lists for testing gas cost of set-code transactions."""
+ """
+ Different cases of access lists for testing gas cost of set-code
+ transactions.
+ """
EMPTY = auto()
CONTAINS_AUTHORITY = auto()
@@ -75,8 +83,8 @@ def contains_authority(self) -> bool:
def contains_set_code_address(self) -> bool:
"""
- Return True if the access list contains the address to which the authority authorizes to
- set the code to.
+ Return True if the access list contains the address to which the
+ authority authorizes to set the code to.
"""
return self in {
AccessListType.CONTAINS_SET_CODE_ADDRESS,
@@ -92,21 +100,21 @@ class AuthorityWithProperties:
"""Dataclass to hold the properties of the authority address."""
authority: EOA
- """
- The address of the authority to be used in the transaction.
- """
+ """The address of the authority to be used in the transaction."""
address_type: AddressType
- """
- The type of the address the authority was before the authorization.
- """
+ """The type of the address the authority was before the authorization."""
invalidity_type: AuthorizationInvalidityType | None
"""
- Whether the authorization will be invalid and if so, which type of invalidity it is.
+ Whether the authorization will be invalid and if so, which type of
+ invalidity it is.
"""
@property
def empty(self) -> bool:
- """Return True if the authority address is an empty account before the authorization."""
+ """
+ Return True if the authority address is an empty account before the
+ authorization.
+ """
return self.address_type == AddressType.EMPTY_ACCOUNT
@@ -189,28 +197,29 @@ class AuthorizationWithProperties:
"""Dataclass to hold the properties of the authorization list."""
tuple: AuthorizationTuple
- """
- The authorization tuple to be used in the transaction.
- """
+ """The authorization tuple to be used in the transaction."""
invalidity_type: AuthorizationInvalidityType | None
"""
- Whether the authorization is invalid and if so, which type of invalidity it is.
+ Whether the authorization is invalid and if so, which type of invalidity it
+ is.
"""
authority_type: AddressType
- """
- The type of the address the authority was before the authorization.
- """
+ """The type of the address the authority was before the authorization."""
skip: bool
"""
- Whether the authorization should be skipped and therefore not included in the transaction.
+ Whether the authorization should be skipped and therefore not included in
+ the transaction.
- Used for tests where the authorization was already in the state before the transaction was
- created.
+ Used for tests where the authorization was already in the state before the
+ transaction was created.
"""
@property
def empty(self) -> bool:
- """Return True if the authority address is an empty account before the authorization."""
+ """
+ Return True if the authority address is an empty account before the
+ authorization.
+ """
return self.authority_type == AddressType.EMPTY_ACCOUNT
@@ -227,14 +236,17 @@ def authorization_list_with_properties(
self_sponsored: bool,
re_authorize: bool,
) -> List[AuthorizationWithProperties]:
- """Fixture to return the authorization-list-with-properties for the given case."""
+ """
+ Fixture to return the authorization-list-with-properties for the given
+ case.
+ """
authorization_list: List[AuthorizationWithProperties] = []
environment_chain_id = chain_config.chain_id
match signer_type:
case SignerType.SINGLE_SIGNER:
authority_with_properties = next(authority_iterator)
- # We have to take into account the cases where the nonce has already been increased
- # before the authorization is processed.
+ # We have to take into account the cases where the nonce has
+ # already been increased before the authorization is processed.
increased_nonce = (
self_sponsored
or authority_with_properties.address_type == AddressType.EOA_WITH_SET_CODE
@@ -360,7 +372,10 @@ def authorization_list(
@pytest.fixture()
def authorize_to_address(request: pytest.FixtureRequest, pre: Alloc) -> Address:
- """Fixture to return the address to which the authority authorizes to set the code to."""
+ """
+ Fixture to return the address to which the authority authorizes to set the
+ code to.
+ """
match request.param:
case AddressType.EMPTY_ACCOUNT:
return pre.fund_eoa(0)
@@ -421,14 +436,18 @@ def gas_test_parameter_args(
include_pre_authorized: bool = True,
execution_gas_allowance: bool = False,
):
- """Return the parametrize decorator that can be used in all gas test functions."""
+ """
+ Return the parametrize decorator that can be used in all gas test
+ functions.
+ """
multiple_authorizations_count = 2
defaults = {
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": None,
"authorizations_count": 1,
- "invalid_authorization_index": -1, # All authorizations are equally invalid
+ "invalid_authorization_index": -1, # All authorizations are equally
+ # invalid
"chain_id_type": ChainIDType.GENERIC,
"authorize_to_address": AddressType.EMPTY_ACCOUNT,
"access_list_case": AccessListType.EMPTY,
@@ -694,7 +713,8 @@ def gas_test_parameter_args(
]
if include_many:
- # Fit as many authorizations as possible within the transaction gas limit.
+ # Fit as many authorizations as possible within the transaction gas
+ # limit.
max_gas = 16_777_216 - 21_000
if execution_gas_allowance:
# Leave some gas for the execution of the test code.
@@ -743,7 +763,10 @@ def test_gas_cost(
access_list: List[AccessList],
sender: EOA,
):
- """Test gas at the execution start of a set-code transaction in multiple scenarios."""
+ """
+ Test gas at the execution start of a set-code transaction in multiple
+ scenarios.
+ """
# Calculate the intrinsic gas cost of the authorizations, by default the
# full empty account cost is charged for each authorization.
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(
@@ -768,9 +791,10 @@ def test_gas_cost(
Spec.PER_EMPTY_ACCOUNT_COST - Spec.PER_AUTH_BASE_COST
) * discounted_authorizations
- # We calculate the exact gas required to execute the test code.
- # We add SSTORE opcodes in order to make sure that the refund is less than one fifth (EIP-3529)
- # of the total gas used, so we can see the full discount being reflected in most of the tests.
+ # We calculate the exact gas required to execute the test code. We add
+ # SSTORE opcodes in order to make sure that the refund is less than one
+ # fifth (EIP-3529) of the total gas used, so we can see the full discount
+ # being reflected in most of the tests.
gas_costs = fork.gas_costs()
gas_opcode_cost = gas_costs.G_BASE
sstore_opcode_count = 10
@@ -781,8 +805,8 @@ def test_gas_cost(
execution_gas = gas_opcode_cost + push_opcode_cost + sstore_opcode_cost + cold_storage_cost
- # The first opcode that executes in the code is the GAS opcode, which costs 2 gas, so we
- # subtract that from the expected gas measure.
+ # The first opcode that executes in the code is the GAS opcode, which costs
+ # 2 gas, so we subtract that from the expected gas measure.
expected_gas_measure = execution_gas - gas_opcode_cost
test_code_storage = Storage()
@@ -801,7 +825,8 @@ def test_gas_cost(
max_discount = tx_gas_limit // 5
if discount_gas > max_discount:
- # Only one test hits this condition, but it's ok to also test this case.
+ # Only one test hits this condition, but it's ok to also test this
+ # case.
discount_gas = max_discount
gas_used = tx_gas_limit - discount_gas
@@ -841,8 +866,12 @@ def test_account_warming(
sender: EOA,
check_delegated_account_first: bool,
):
- """Test warming of the authority and authorized accounts for set-code transactions."""
- # Overhead cost is the single push operation required for the address to check.
+ """
+ Test warming of the authority and authorized accounts for set-code
+ transactions.
+ """
+ # Overhead cost is the single push operation required for the address to
+ # check.
overhead_cost = 3 * len(Op.CALL.kwargs) # type: ignore
cold_account_cost = 2600
@@ -850,8 +879,8 @@ def test_account_warming(
access_list_addresses = {access_list.address for access_list in access_list}
- # Dictionary to keep track of the addresses to check for warming, and the expected cost of
- # accessing such account.
+ # Dictionary to keep track of the addresses to check for warming, and the
+ # expected cost of accessing such account.
addresses_to_check: Dict[Address, int] = {}
for authorization_with_properties in authorization_list_with_properties:
@@ -861,9 +890,9 @@ def test_account_warming(
authority_contains_delegation_after_authorization = (
authorization_with_properties.invalidity_type is None
- # If the authority already contained a delegation prior to the transaction,
- # even if the authorization is invalid, there will be a delegation when we
- # check the address.
+ # If the authority already contained a delegation prior to the
+ # transaction, even if the authorization is invalid, there will be
+ # a delegation when we check the address.
or authorization_with_properties.authority_type == AddressType.EOA_WITH_SET_CODE
)
@@ -896,8 +925,9 @@ def test_account_warming(
)
if authority_contains_delegation_after_authorization:
- # The double charge for accessing the delegated account, only if the
- # account ends up with a delegation in its code.
+ # The double charge for accessing the delegated account,
+ # only if the account ends up with a delegation in its
+ # code.
access_cost += warm_account_cost
addresses_to_check[authority] = access_cost
@@ -920,8 +950,8 @@ def test_account_warming(
access_cost = warm_account_cost
if (
- # We can only charge the delegated account access cost if the authorization
- # went through
+ # We can only charge the delegated account access cost if
+ # the authorization went through
authority_contains_delegation_after_authorization
):
if (
@@ -999,8 +1029,8 @@ def test_intrinsic_gas_cost(
valid: bool,
):
"""
- Test sending a transaction with the exact intrinsic gas required and also insufficient
- gas.
+ Test sending a transaction with the exact intrinsic gas required and also
+ insufficient gas.
"""
# Calculate the intrinsic gas cost of the authorizations, by default the
# full empty account cost is charged for each authorization.
@@ -1097,7 +1127,9 @@ def test_call_to_pre_authorized_oog(
fork: Fork,
call_opcode: Op,
):
- """Test additional cost of delegation contract access in call instructions."""
+ """
+ Test additional cost of delegation contract access in call instructions.
+ """
# Delegation contract. It should never be reached by a call.
delegation_code = Op.SSTORE(0, 1)
delegation = pre.deploy_contract(delegation_code)
diff --git a/tests/prague/eip7702_set_code_tx/test_invalid_tx.py b/tests/prague/eip7702_set_code_tx/test_invalid_tx.py
index 75067b08a9d..4855a738238 100644
--- a/tests/prague/eip7702_set_code_tx/test_invalid_tx.py
+++ b/tests/prague/eip7702_set_code_tx/test_invalid_tx.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests invalid set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702)
- Tests invalid set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
-""" # noqa: E501
+Tests invalid set-code transactions from EIP-7702.
+
+Tests invalid set-code transactions from
+[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
+"""
from enum import Enum, auto
from typing import List, Type
@@ -102,7 +104,10 @@ def test_invalid_auth_signature(
s: int,
delegate_address: Address,
):
- """Test sending a transaction where one of the signature elements is out of range."""
+ """
+ Test sending a transaction where one of the signature elements is out of
+ range.
+ """
tx = Transaction(
gas_limit=100_000,
to=0,
@@ -150,8 +155,8 @@ def test_invalid_tx_invalid_auth_chain_id(
delegate_address: Address,
):
"""
- Test sending a transaction where the chain id field of an authorization overflows the
- maximum value.
+ Test sending a transaction where the chain id field of an authorization
+ overflows the maximum value.
"""
authorization = AuthorizationTuple(
address=delegate_address,
@@ -193,8 +198,8 @@ def test_invalid_tx_invalid_auth_chain_id_encoding(
auth_chain_id: int,
):
"""
- Test sending a transaction where the chain id field of an authorization has an incorrect
- encoding.
+ Test sending a transaction where the chain id field of an authorization has
+ an incorrect encoding.
"""
class ModifiedAuthorizationTuple(AuthorizationTuple):
@@ -243,8 +248,8 @@ def test_invalid_tx_invalid_nonce(
delegate_address: Address,
):
"""
- Test sending a transaction where the nonce field of an authorization overflows the maximum
- value.
+ Test sending a transaction where the nonce field of an authorization
+ overflows the maximum value.
"""
auth_signer = pre.fund_eoa()
@@ -291,8 +296,8 @@ def test_invalid_tx_invalid_nonce_as_list(
delegate_address: Address,
):
"""
- Test sending a transaction where the nonce field of an authorization overflows the maximum
- value.
+ Test sending a transaction where the nonce field of an authorization
+ overflows the maximum value.
"""
auth_signer = pre.fund_eoa()
@@ -333,8 +338,8 @@ def test_invalid_tx_invalid_nonce_encoding(
delegate_address: Address,
):
"""
- Test sending a transaction where the chain id field of an authorization has an incorrect
- encoding.
+ Test sending a transaction where the chain id field of an authorization has
+ an incorrect encoding.
"""
class ModifiedAuthorizationTuple(AuthorizationTuple):
@@ -391,8 +396,8 @@ def test_invalid_tx_invalid_address(
address_type: Type[FixedSizeBytes],
):
"""
- Test sending a transaction where the address field of an authorization is incorrectly
- serialized.
+ Test sending a transaction where the address field of an authorization is
+ incorrectly serialized.
"""
auth_signer = pre.fund_eoa()
@@ -435,8 +440,8 @@ def test_invalid_tx_invalid_authorization_tuple_extra_element(
extra_element_value: int,
):
"""
- Test sending a transaction where the authorization tuple field of the type-4 transaction
- is serialized to contain an extra element.
+ Test sending a transaction where the authorization tuple field of the
+ type-4 transaction is serialized to contain an extra element.
"""
auth_signer = pre.fund_eoa()
@@ -444,7 +449,9 @@ class ExtraElementAuthorizationTuple(AuthorizationTuple):
extra_element: HexNumber # type: ignore
def get_rlp_fields(self) -> List[str]:
- """Append the extra field to the list of fields to be encoded in RLP."""
+ """
+ Append the extra field to the list of fields to be encoded in RLP.
+ """
rlp_fields = super().get_rlp_fields()[:]
rlp_fields.append("extra_element")
return rlp_fields
@@ -496,8 +503,8 @@ def test_invalid_tx_invalid_authorization_tuple_missing_element(
missing_index: int,
):
"""
- Test sending a transaction where the authorization tuple field of the type-4 transaction
- is serialized to miss one element.
+ Test sending a transaction where the authorization tuple field of the
+ type-4 transaction is serialized to miss one element.
"""
auth_signer = pre.fund_eoa()
@@ -505,7 +512,9 @@ class MissingElementAuthorizationTuple(AuthorizationTuple):
missing_element_index: int
def get_rlp_fields(self) -> List[str]:
- """Remove the field that is specified by the missing element index."""
+ """
+ Remove the field that is specified by the missing element index.
+ """
rlp_fields = super().get_rlp_fields()[:]
rlp_fields.pop(self.missing_element_index)
return rlp_fields
@@ -545,8 +554,9 @@ def test_invalid_tx_invalid_authorization_tuple_encoded_as_bytes(
delegate_address: Address,
):
"""
- Test sending a transaction where the authorization tuple field of the type-4 transaction
- is encoded in the outer element as bytes instead of a list of elements.
+ Test sending a transaction where the authorization tuple field of the
+ type-4 transaction is encoded in the outer element as bytes instead of a
+ list of elements.
"""
class ModifiedTransaction(Transaction):
@@ -595,8 +605,8 @@ def test_invalid_tx_invalid_rlp_encoding(
invalid_rlp_mode: InvalidRLPMode,
):
"""
- Test sending a transaction type-4 where the RLP encoding of the transaction is
- invalid.
+ Test sending a transaction type-4 where the RLP encoding of the transaction
+ is invalid.
"""
auth_signer = pre.fund_eoa()
diff --git a/tests/prague/eip7702_set_code_tx/test_set_code_txs.py b/tests/prague/eip7702_set_code_tx/test_set_code_txs.py
index 090fea22fb4..c401488bd4e 100644
--- a/tests/prague/eip7702_set_code_tx/test_set_code_txs.py
+++ b/tests/prague/eip7702_set_code_tx/test_set_code_txs.py
@@ -1,7 +1,9 @@
"""
-abstract: Tests use of set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702)
- Tests use of set-code transactions from [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
-""" # noqa: E501
+Tests use of set-code transactions from EIP-7702.
+
+Tests use of set-code transactions from
+[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
+"""
from enum import StrEnum
from hashlib import sha256
@@ -88,11 +90,11 @@ def test_self_sponsored_set_code(
"""
Test the executing a self-sponsored set-code transaction.
- The transaction is sent to the sender, and the sender is the signer of the only authorization
- tuple in the authorization list.
+ The transaction is sent to the sender, and the sender is the signer of the
+ only authorization tuple in the authorization list.
- The authorization tuple has a nonce of 1 because the self-sponsored transaction increases the
- nonce of the sender from zero to one first.
+ The authorization tuple has a nonce of 1 because the self-sponsored
+ transaction increases the nonce of the sender from zero to one first.
The expected nonce at the end of the transaction is 2.
"""
@@ -268,7 +270,10 @@ def test_set_code_to_sstore_then_sload(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test the executing a simple SSTORE then SLOAD in two separate set-code transactions."""
+ """
+ Test the executing a simple SSTORE then SLOAD in two separate set-code
+ transactions.
+ """
auth_signer = pre.fund_eoa(auth_account_start_balance)
sender = pre.fund_eoa()
@@ -346,8 +351,8 @@ def test_set_code_to_tstore_reentry(
evm_code_type: EVMCodeType,
):
"""
- Test the executing a simple TSTORE in a set-code transaction, which also performs a
- re-entry to TLOAD the value.
+ Test the executing a simple TSTORE in a set-code transaction, which also
+ performs a re-entry to TLOAD the value.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -403,9 +408,10 @@ def test_set_code_to_tstore_available_at_correct_address(
call_eoa_first: bool,
):
"""
- Test TLOADing from slot 2 and then SSTORE this in slot 1, then TSTORE 3 in slot 2.
- This is done both from the EOA which is delegated to account A, and then A is called.
- The storage should stay empty on both the EOA and the delegated account.
+ Test TLOADing from slot 2 and then SSTORE this in slot 1, then TSTORE 3 in
+ slot 2. This is done both from the EOA which is delegated to account A, and
+ then A is called. The storage should stay empty on both the EOA and the
+ delegated account.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -522,7 +528,9 @@ def test_set_code_to_contract_creator(
create_opcode: Op,
evm_code_type: EVMCodeType,
):
- """Test the executing a contract-creating opcode in a set-code transaction."""
+ """
+ Test the executing a contract-creating opcode in a set-code transaction.
+ """
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -677,8 +685,8 @@ def test_set_code_max_depth_call_stack(
fork: Fork,
):
"""
- Test re-entry to delegated account until the max call stack depth possible in a
- transaction is reached.
+ Test re-entry to delegated account until the max call stack depth possible
+ in a transaction is reached.
"""
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -867,7 +875,10 @@ def test_tx_into_self_delegating_set_code(
state_test: StateTestFiller,
pre: Alloc,
):
- """Test a transaction that has entry-point into a set-code account that delegates to itself."""
+ """
+ Test a transaction that has entry-point into a set-code account that
+ delegates to itself.
+ """
auth_signer = pre.fund_eoa(auth_account_start_balance)
tx = Transaction(
@@ -902,8 +913,8 @@ def test_tx_into_chain_delegating_set_code(
pre: Alloc,
):
"""
- Test a transaction that has entry-point into a set-code account that delegates to another
- set-code account.
+ Test a transaction that has entry-point into a set-code account that
+ delegates to another set-code account.
"""
auth_signer_1 = pre.fund_eoa(auth_account_start_balance)
auth_signer_2 = pre.fund_eoa(auth_account_start_balance)
@@ -994,7 +1005,10 @@ def test_call_into_chain_delegating_set_code(
pre: Alloc,
call_opcode: Op,
):
- """Test call into a set-code account that delegates to another set-code account."""
+ """
+ Test call into a set-code account that delegates to another set-code
+ account.
+ """
auth_signer_1 = pre.fund_eoa(auth_account_start_balance)
auth_signer_2 = pre.fund_eoa(auth_account_start_balance)
@@ -1310,8 +1324,8 @@ def test_set_code_address_and_authority_warm_state_call_types(
):
"""
Test set to code address and authority warm status after a call to
- authority address, or vice-versa, using all available call opcodes
- without using `GAS` opcode (unavailable in EOF).
+ authority address, or vice-versa, using all available call opcodes without
+ using `GAS` opcode (unavailable in EOF).
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -1376,7 +1390,10 @@ def test_ext_code_on_self_delegating_set_code(
pre: Alloc,
balance: int,
):
- """Test different ext*code operations on a set-code address that delegates to itself."""
+ """
+ Test different ext*code operations on a set-code address that delegates to
+ itself.
+ """
auth_signer = pre.fund_eoa(balance)
slot = count(1)
@@ -1438,8 +1455,8 @@ def test_ext_code_on_chain_delegating_set_code(
pre: Alloc,
):
"""
- Test different ext*code operations on a set-code address that references another delegated
- address.
+ Test different ext*code operations on a set-code address that references
+ another delegated address.
"""
auth_signer_1_balance = 1
auth_signer_2_balance = 0
@@ -1601,8 +1618,9 @@ def test_set_code_to_account_deployed_in_same_tx(
evm_code_type: EVMCodeType,
):
"""
- Test setting the code of an account to an address that is deployed in the same transaction,
- and test calling the set-code address and the deployed contract.
+ Test setting the code of an account to an address that is deployed in the
+ same transaction, and test calling the set-code address and the deployed
+ contract.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -1720,9 +1738,9 @@ def test_set_code_to_self_destructing_account_deployed_in_same_tx(
balance: int,
):
"""
- Test setting the code of an account to an account that contains the SELFDESTRUCT opcode and
- was deployed in the same transaction, and test calling the set-code address and the deployed
- in both sequence orders.
+ Test setting the code of an account to an account that contains the
+ SELFDESTRUCT opcode and was deployed in the same transaction, and test
+ calling the set-code address and the deployed in both sequence orders.
"""
auth_signer = pre.fund_eoa(balance)
if external_sendall_recipient:
@@ -1818,8 +1836,8 @@ def test_set_code_multiple_first_valid_authorization_tuples_same_signer(
pre: Alloc,
):
"""
- Test setting the code of an account with multiple authorization tuples
- from the same signer.
+ Test setting the code of an account with multiple authorization tuples from
+ the same signer.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -1866,8 +1884,9 @@ def test_set_code_multiple_valid_authorization_tuples_same_signer_increasing_non
pre: Alloc,
):
"""
- Test setting the code of an account with multiple authorization tuples from the same signer
- and each authorization tuple has an increasing nonce, therefore the last tuple is executed.
+ Test setting the code of an account with multiple authorization tuples from
+ the same signer and each authorization tuple has an increasing nonce,
+ therefore the last tuple is executed.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -1914,9 +1933,10 @@ def test_set_code_multiple_valid_authorization_tuples_same_signer_increasing_non
pre: Alloc,
):
"""
- Test setting the code of an account with multiple authorization tuples from the same signer
- and each authorization tuple has an increasing nonce, therefore the last tuple is executed,
- and the transaction is self-sponsored.
+ Test setting the code of an account with multiple authorization tuples from
+ the same signer and each authorization tuple has an increasing nonce,
+ therefore the last tuple is executed, and the transaction is
+ self-sponsored.
"""
auth_signer = pre.fund_eoa()
@@ -1962,8 +1982,8 @@ def test_set_code_multiple_valid_authorization_tuples_first_invalid_same_signer(
pre: Alloc,
):
"""
- Test setting the code of an account with multiple authorization tuples from the same signer
- but the first tuple is invalid.
+ Test setting the code of an account with multiple authorization tuples from
+ the same signer but the first tuple is invalid.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -2010,8 +2030,8 @@ def test_set_code_all_invalid_authorization_tuples(
pre: Alloc,
):
"""
- Test setting the code of an account with multiple authorization tuples from the same signer
- and all of them are invalid.
+ Test setting the code of an account with multiple authorization tuples from
+ the same signer and all of them are invalid.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -2050,7 +2070,10 @@ def test_set_code_using_chain_specific_id(
pre: Alloc,
chain_config: ChainConfig,
):
- """Test sending a transaction to set the code of an account using a chain-specific ID."""
+ """
+ Test sending a transaction to set the code of an account using a
+ chain-specific ID.
+ """
auth_signer = pre.fund_eoa(auth_account_start_balance)
success_slot = 1
@@ -2113,7 +2136,10 @@ def test_set_code_using_valid_synthetic_signatures(
r: int,
s: int,
):
- """Test sending a transaction to set the code of an account using synthetic signatures."""
+ """
+ Test sending a transaction to set the code of an account using synthetic
+ signatures.
+ """
success_slot = 1
set_code = Op.SSTORE(success_slot, 1) + Op.STOP
@@ -2162,8 +2188,10 @@ def test_set_code_using_valid_synthetic_signatures(
pytest.param(2, 1, 1, id="v=2"),
pytest.param(27, 1, 1, id="v=27"), # Type-0 transaction valid value
pytest.param(28, 1, 1, id="v=28"), # Type-0 transaction valid value
- pytest.param(35, 1, 1, id="v=35"), # Type-0 replay-protected transaction valid value
- pytest.param(36, 1, 1, id="v=36"), # Type-0 replay-protected transaction valid value
+ pytest.param(35, 1, 1, id="v=35"), # Type-0 replay-protected
+ # transaction valid value
+ pytest.param(36, 1, 1, id="v=36"), # Type-0 replay-protected
+ # transaction valid value
pytest.param(2**8 - 1, 1, 1, id="v=2**8-1"),
# R
pytest.param(1, 0, 1, id="r=0"),
@@ -2194,8 +2222,9 @@ def test_valid_tx_invalid_auth_signature(
s: int,
):
"""
- Test sending a transaction to set the code of an account using synthetic signatures,
- the transaction is valid but the authorization should not go through.
+ Test sending a transaction to set the code of an account using synthetic
+ signatures, the transaction is valid but the authorization should not go
+ through.
"""
success_slot = 1
@@ -2237,8 +2266,9 @@ def test_signature_s_out_of_range(
chain_config: ChainConfig,
):
"""
- Test sending a transaction with an authorization tuple where the signature s value is out of
- range by modifying its value to be `SECP256K1N - S` and flipping the v value.
+ Test sending a transaction with an authorization tuple where the signature
+ s value is out of range by modifying its value to be `SECP256K1N - S` and
+ flipping the v value.
"""
auth_signer = pre.fund_eoa(0)
@@ -2309,8 +2339,8 @@ def test_valid_tx_invalid_chain_id(
invalid_chain_id_case: InvalidChainID,
):
"""
- Test sending a transaction where the chain id field does not match
- the current chain's id.
+ Test sending a transaction where the chain id field does not match the
+ current chain's id.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -2405,11 +2435,11 @@ def test_nonce_validity(
authorization_nonce: int,
):
"""
- Test sending a transaction where the nonce field of an authorization almost overflows the
- maximum value.
+ Test sending a transaction where the nonce field of an authorization almost
+ overflows the maximum value.
- Also test calling the account of the authorization signer in order to verify that the account
- is not warm.
+ Also test calling the account of the authorization signer in order to
+ verify that the account is not warm.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance, nonce=account_nonce)
@@ -2470,8 +2500,8 @@ def test_nonce_overflow_after_first_authorization(
pre: Alloc,
):
"""
- Test sending a transaction with two authorization where the first one bumps the nonce
- to 2**64-1 and the second would result in overflow.
+ Test sending a transaction with two authorization where the first one bumps
+ the nonce to 2**64-1 and the second would result in overflow.
"""
nonce = 2**64 - 2
auth_signer = pre.fund_eoa(auth_account_start_balance, nonce=nonce)
@@ -2547,7 +2577,10 @@ def test_set_code_to_log(
pre: Alloc,
log_opcode: Op,
):
- """Test setting the code of an account to a contract that performs the log operation."""
+ """
+ Test setting the code of an account to a contract that performs the log
+ operation.
+ """
sender = pre.fund_eoa()
log_kwargs = {}
@@ -2604,8 +2637,8 @@ def test_set_code_to_precompile(
call_opcode: Op,
):
"""
- Test setting the code of an account to a pre-compile address and executing all call
- opcodes.
+ Test setting the code of an account to a pre-compile address and executing
+ all call opcodes.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
@@ -2663,8 +2696,8 @@ def test_set_code_to_precompile_not_enough_gas_for_precompile_execution(
precompile: int,
):
"""
- Test set code to precompile and making direct call in same transaction with intrinsic gas
- only, no extra gas for precompile execution.
+ Test set code to precompile and making direct call in same transaction with
+ intrinsic gas only, no extra gas for precompile execution.
"""
auth_signer = pre.fund_eoa(amount=1)
auth = AuthorizationTuple(address=Address(precompile), nonce=0, signer=auth_signer)
@@ -2738,10 +2771,12 @@ def test_set_code_to_system_contract(
call_value = 0
- # Setup the initial storage of the account to mimic the system contract if required
+ # Setup the initial storage of the account to mimic the system contract if
+ # required
match system_contract:
case Address(0x00000000219AB540356CBB839CBE05303D7705FA): # EIP-6110
- # Deposit contract needs specific storage values, so we set them on the account
+ # Deposit contract needs specific storage values, so we set them on
+ # the account
auth_signer = pre.fund_eoa(
auth_account_start_balance, storage=deposit_contract_initial_storage()
)
@@ -2788,8 +2823,8 @@ def test_set_code_to_system_contract(
caller_payload = consolidation_request.calldata
call_value = consolidation_request.value
case Address(0x0000F90827F1C53A10CB7A02335B175320002935): # EIP-2935
- # This payload is used to identify the number of blocks to be subtracted from the
- # latest block number
+ # This payload is used to identify the number of blocks to be
+ # subtracted from the latest block number
caller_payload = Hash(1)
caller_code_storage[call_return_data_size_slot] = 32
case _:
@@ -2798,8 +2833,8 @@ def test_set_code_to_system_contract(
# Setup the code to call the system contract
match system_contract:
case Address(0x0000F90827F1C53A10CB7A02335B175320002935): # EIP-2935
- # Do a trick here to get the block number of the penultimate block to ensure it is
- # saved in the history contract
+ # Do a trick here to get the block number of the penultimate block
+ # to ensure it is saved in the history contract
check_block_number = Op.SUB(Op.NUMBER, Op.CALLDATALOAD(0))
call_system_contract_code = Op.MSTORE(0, check_block_number) + Op.SSTORE(
call_return_code_slot,
@@ -2844,7 +2879,8 @@ def test_set_code_to_system_contract(
blocks=[
Block(
txs=txs,
- requests_hash=Requests(), # Verify nothing slipped into the requests trie
+ requests_hash=Requests(), # Verify nothing slipped into the
+ # requests trie
)
],
post={
@@ -2885,7 +2921,10 @@ def test_eoa_tx_after_set_code(
evm_code_type: EVMCodeType,
same_block: bool,
):
- """Test sending a transaction from an EOA after code has been set to the account."""
+ """
+ Test sending a transaction from an EOA after code has been set to the
+ account.
+ """
auth_signer = pre.fund_eoa()
set_code = Op.SSTORE(1, Op.ADD(Op.SLOAD(1), 1)) + Op.STOP
@@ -3145,12 +3184,14 @@ def test_delegation_clearing(
self_sponsored: bool,
):
"""
- Test clearing the delegation of an account under a variety of circumstances.
-
- - pre_set_delegation_code: The code to set on the account before clearing delegation, or None
- if the account should not have any code set.
- - self_sponsored: Whether the delegation clearing transaction is self-sponsored.
-
+ Test clearing the delegation of an account under a variety of
+ circumstances.
+
+ - pre_set_delegation_code: The code to set on the account before clearing
+ delegation, or None if the account should not
+ have any code set.
+ - self_sponsored: Whether the delegation clearing transaction is
+ self-sponsored.
""" # noqa: D417
pre_set_delegation_address: Address | None = None
if pre_set_delegation_code is not None:
@@ -3238,10 +3279,11 @@ def test_delegation_clearing_tx_to(
"""
Tests directly calling the account which delegation is being cleared.
- - pre_set_delegation_code: The code to set on the account before clearing delegation, or None
- if the account should not have any code set.
- - self_sponsored: Whether the delegation clearing transaction is self-sponsored.
-
+ - pre_set_delegation_code: The code to set on the account before clearing
+ delegation, or None if the account should not
+ have any code set.
+ - self_sponsored: Whether the delegation clearing transaction is
+ self-sponsored.
""" # noqa: D417
pre_set_delegation_address: Address | None = None
if pre_set_delegation_code is not None:
@@ -3295,11 +3337,11 @@ def test_delegation_clearing_and_set(
pre_set_delegation_code: Bytecode | None,
):
"""
- Tests clearing and setting the delegation again in the same authorization list.
-
- - pre_set_delegation_code: The code to set on the account before clearing delegation, or None
- if the account should not have any code set.
+ Tests clearing and setting the delegation again in the same authorization
+ list.
+ - pre_set_delegation_code: The code to set on the account before clearing
+ delegation, or None if the account should not have any code set.
""" # noqa: D417
pre_set_delegation_address: Address | None = None
if pre_set_delegation_code is not None:
@@ -3361,7 +3403,10 @@ def test_delegation_clearing_failing_tx(
pre: Alloc,
entry_code: Bytecode,
):
- """Test clearing the delegation of an account in a transaction that fails, OOGs or reverts.""" # noqa: D417
+ """
+ Test clearing the delegation of an account in a transaction that fails,
+ OOGs or reverts.
+ """ # noqa: D417
pre_set_delegation_code = Op.RETURN(0, 1)
pre_set_delegation_address = pre.deploy_contract(pre_set_delegation_code)
@@ -3459,8 +3504,10 @@ def test_creating_delegation_designation_contract(
initcode_is_delegation_designation: bool,
):
"""
- Tx -> create -> pointer bytecode
- Attempt to deploy contract with magic bytes result in no contract being created.
+ Tx -> create -> pointer bytecode.
+
+ Attempt to deploy contract with magic bytes result in no
+ contract being created.
"""
env = Environment()
@@ -3522,14 +3569,16 @@ def test_many_delegations(
signer_balance: int,
):
"""
- Perform as many delegations as possible in a transaction using the entire block gas limit.
+ Perform as many delegations as possible in a transaction using the entire
+ block gas limit.
Every delegation comes from a different signer.
- The account of can be empty or not depending on the `signer_balance` parameter.
+ The account of can be empty or not depending on the `signer_balance`
+ parameter.
- The transaction is expected to succeed and the state after the transaction is expected to have
- the code of the entry contract set to 1.
+ The transaction is expected to succeed and the state after the transaction
+ is expected to have the code of the entry contract set to 1.
"""
env = Environment()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
@@ -3587,8 +3636,8 @@ def test_invalid_transaction_after_authorization(
pre: Alloc,
):
"""
- Test an invalid block due to a transaction reusing the same nonce as an authorization
- included in a prior transaction.
+ Test an invalid block due to a transaction reusing the same nonce as an
+ authorization included in a prior transaction.
"""
auth_signer = pre.fund_eoa()
recipient = pre.fund_eoa(amount=0)
@@ -3636,8 +3685,8 @@ def test_authorization_reusing_nonce(
pre: Alloc,
):
"""
- Test an authorization reusing the same nonce as a prior transaction included in the same
- block.
+ Test an authorization reusing the same nonce as a prior transaction
+ included in the same block.
"""
auth_signer = pre.fund_eoa()
sender = pre.fund_eoa()
@@ -3694,12 +3743,12 @@ def test_set_code_from_account_with_non_delegating_code(
self_sponsored: bool,
):
"""
- Test that a transaction is correctly rejected,
- if the sender account has a non-delegating code set.
+ Test that a transaction is correctly rejected, if the sender account has a
+ non-delegating code set.
- The auth transaction is sent from sender which has contract code (not delegating)
- But at the same time it has auth tuple that will point this sender account
- To be eoa, delegation, contract .. etc
+ The auth transaction is sent from sender which has contract code (not
+ delegating) But at the same time it has auth tuple that will point this
+ sender account To be eoa, delegation, contract .. etc
"""
sender = pre.fund_eoa(nonce=1)
random_address = pre.fund_eoa(0)
@@ -3719,7 +3768,8 @@ def test_set_code_from_account_with_non_delegating_code(
raise ValueError(f"Unsupported set code type: {set_code_type}")
callee_address = pre.deploy_contract(Op.SSTORE(0, 1) + Op.STOP)
- # Set the sender account to have some code, that is specifically not a delegation.
+ # Set the sender account to have some code, that is specifically not a
+ # delegation.
sender_account = pre[sender]
assert sender_account is not None
sender_account.code = Bytes(Op.STOP)
@@ -3781,7 +3831,9 @@ def test_set_code_transaction_fee_validations(
max_priority_fee_per_gas: int,
expected_error: TransactionException,
):
- """Test that a transaction with an insufficient max fee per gas is rejected."""
+ """
+ Test that a transaction with an insufficient max fee per gas is rejected.
+ """
set_to_code = pre.deploy_contract(Op.STOP)
auth_signer = pre.fund_eoa(amount=0)
tx = Transaction(
diff --git a/tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py b/tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py
index e6a622246e2..ef4208b76d2 100644
--- a/tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py
+++ b/tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py
@@ -1,4 +1,6 @@
-"""A state test for [EIP-7702 SetCodeTX](https://eips.ethereum.org/EIPS/eip-7702)."""
+"""
+A state test for [EIP-7702 SetCodeTX](https://eips.ethereum.org/EIPS/eip-7702).
+"""
from enum import Enum, IntEnum
@@ -40,8 +42,9 @@ def test_pointer_contract_pointer_loop(state_test: StateTestFiller, pre: Alloc):
"""
Tx -> call -> pointer A -> contract A -> pointer B -> contract loop C.
- Call pointer that goes more level of depth to call a contract loop
- Loop is created only if pointers are set with auth tuples
+ Call pointer that goes more level of depth to call a contract loop.
+
+ Loop is created only if pointers are set with auth tuples.
"""
env = Environment()
@@ -137,8 +140,10 @@ def test_pointer_to_pointer(state_test: StateTestFiller, pre: Alloc):
@pytest.mark.valid_from("Prague")
def test_pointer_normal(blockchain_test: BlockchainTestFiller, pre: Alloc):
"""
- Tx -> call -> pointer A -> contract
- Other normal tx can interact with previously assigned pointers.
+ Tx -> call -> pointer A -> contract.
+
+ Other normal tx can interact with
+ previously assigned pointers.
"""
env = Environment()
@@ -197,8 +202,10 @@ def test_pointer_normal(blockchain_test: BlockchainTestFiller, pre: Alloc):
@pytest.mark.valid_from("Prague")
def test_pointer_measurements(blockchain_test: BlockchainTestFiller, pre: Alloc):
"""
- Check extcode* operations on pointer before and after pointer is set
- Check context opcode results when called under pointer call
+ Check extcode* operations on pointer before and after pointer is set.
+
+ Check context opcode results when called under pointer call.
+
Opcodes have context of an original pointer account (balance, storage).
"""
env = Environment()
@@ -208,7 +215,8 @@ def test_pointer_measurements(blockchain_test: BlockchainTestFiller, pre: Alloc)
storage_normal: Storage = Storage()
storage_pointer: Storage = Storage()
- storage_pointer_code: Storage = Storage() # this storage will be applied to pointer address
+ storage_pointer_code: Storage = Storage() # this storage will be applied
+ # to pointer address
pointer_code = pre.deploy_contract(
balance=200,
code=Op.SSTORE(storage_pointer_code.store_next(pointer, "address"), Op.ADDRESS())
@@ -327,9 +335,10 @@ def test_call_to_precompile_in_pointer_context(
state_test: StateTestFiller, pre: Alloc, precompile: int
):
"""
- Tx -> call -> pointer A -> precompile contract
- Make sure that gas consumed when calling precompiles in normal call are the same
- As from inside the pointer context call.
+ Tx -> call -> pointer A -> precompile contract.
+
+ Make sure that gas consumed when calling precompiles in normal call
+ are the same As from inside the pointer context call.
"""
env = Environment()
@@ -400,11 +409,13 @@ def test_pointer_to_precompile(state_test: StateTestFiller, pre: Alloc, precompi
"""
Tx -> call -> pointer A -> precompile contract.
- In case a delegation designator points to a precompile address, retrieved code is considered
- empty and CALL, CALLCODE, STATICCALL, DELEGATECALL instructions targeting this account will
- execute empty code, i.e. succeed with no execution given enough gas.
+ In case a delegation designator points to a precompile address, retrieved
+ code is considered empty and CALL, CALLCODE, STATICCALL, DELEGATECALL
+ instructions targeting this account will execute empty code, i.e. succeed
+ with no execution given enough gas.
- So call to a pointer that points to a precompile is like call to an empty account
+ So call to a pointer that points to a precompile is like call to an empty
+ account
"""
env = Environment()
@@ -440,7 +451,8 @@ def test_pointer_to_precompile(state_test: StateTestFiller, pre: Alloc, precompi
ret_offset=1000,
ret_size=32,
)
- # pointer call to a precompile with 0 gas always return 1 as if calling empty address
+ # pointer call to a precompile with 0 gas always return 1 as if calling
+ # empty address
+ Op.SSTORE(storage.store_next(1, "pointer_call_result"), Op.MLOAD(1000))
)
@@ -727,9 +739,9 @@ def test_pointer_call_followed_by_direct_call(
):
"""
If we first call by pointer then direct call, will the call/sload be hot
- The direct call will warm because pointer access marks it warm
- But the sload is still cold because
- storage marked hot from pointer's account in a pointer call.
+ The direct call will warm because pointer access marks it warm But the
+ sload is still cold because storage marked hot from pointer's account in a
+ pointer call.
"""
env = Environment()
@@ -747,8 +759,10 @@ def test_pointer_call_followed_by_direct_call(
)
direct_call_gas = (
gas_costs.G_STORAGE_SET
- + gas_costs.G_WARM_ACCOUNT_ACCESS # since previous pointer call, contract is now warm
- + gas_costs.G_COLD_SLOAD # but storage is cold, because it's contract's direct
+ + gas_costs.G_WARM_ACCOUNT_ACCESS # since previous pointer call,
+ # contract is now warm
+ + gas_costs.G_COLD_SLOAD # but storage is cold, because it's
+ # contract's direct
+ opcodes_price
)
@@ -803,7 +817,8 @@ def test_pointer_call_followed_by_direct_call(
@pytest.mark.valid_from("Prague")
def test_pointer_to_static(state_test: StateTestFiller, pre: Alloc):
"""
- Tx -> call -> pointer A -> static -> static violation
+ Tx -> call -> pointer A -> static -> static violation.
+
Verify that static context is active when called under pointer.
"""
env = Environment()
@@ -849,7 +864,8 @@ def test_pointer_to_static(state_test: StateTestFiller, pre: Alloc):
@pytest.mark.valid_from("Prague")
def test_static_to_pointer(state_test: StateTestFiller, pre: Alloc):
"""
- Tx -> staticcall -> pointer A -> static violation
+ Tx -> staticcall -> pointer A -> static violation.
+
Verify that static context is active when make sub call to pointer.
"""
env = Environment()
@@ -895,7 +911,8 @@ def test_static_to_pointer(state_test: StateTestFiller, pre: Alloc):
@pytest.mark.valid_from("EOFv1")
def test_pointer_to_eof(state_test: StateTestFiller, pre: Alloc):
"""
- Tx -> call -> pointer A -> EOF
+ Tx -> call -> pointer A -> EOF.
+
Pointer to eof contract works.
"""
env = Environment()
@@ -1013,7 +1030,9 @@ def test_contract_storage_to_pointer_with_storage(
state_test: StateTestFiller, pre: Alloc, call_type: Op
):
"""
- Tx call -> contract with storage -> pointer A with storage -> storage/tstorage modify
+ Tx call -> contract with storage -> pointer A with storage ->
+ storage/tstorage modify.
+
Check storage/tstorage modifications when interacting with pointers.
"""
env = Environment()
@@ -1056,18 +1075,21 @@ def test_contract_storage_to_pointer_with_storage(
code=Op.TSTORE(third_slot, 1)
+ call_type(address=pointer_b, gas=500_000)
+ Op.SSTORE(third_slot, Op.TLOAD(third_slot))
- # Verify tstorage in contract after interacting with pointer, it must be 0
+ # Verify tstorage in contract after interacting with pointer, it must
+ # be 0
+ Op.MSTORE(0, 1)
+ Op.CALL(address=contract_b, gas=500_000, args_offset=0, args_size=32),
storage={
storage_a.store_next(
- # caller storage is modified when calling pointer with delegate or callcode
+ # caller storage is modified when calling pointer with delegate
+ # or callcode
6 if call_type in [Op.DELEGATECALL, Op.CALLCODE] else 5,
"first_slot",
): 5,
storage_a.store_next(2, "second_slot"): 2,
storage_a.store_next(
- # TSTORE is modified when calling pointer with delegate or callcode
+ # TSTORE is modified when calling pointer with delegate or
+ # callcode
2 if call_type in [Op.DELEGATECALL, Op.CALLCODE] else 1,
"third_slot",
): 3,
@@ -1113,8 +1135,10 @@ class ReentryAction(IntEnum):
@pytest.mark.valid_from("Prague")
def test_pointer_reentry(state_test: StateTestFiller, pre: Alloc):
"""
- Check operations when reenter the pointer again
- TODO: feel free to extend the code checks under given scenarios in switch case.
+ Check operations when reenter the pointer again.
+
+ TODO: feel free to extend the code checks under given scenarios in
+ switch case.
"""
env = Environment()
arg_contract = 0
@@ -1152,7 +1176,8 @@ def test_pointer_reentry(state_test: StateTestFiller, pre: Alloc):
+ Op.STOP(),
),
Case(
- # This code is executed under pointer -> proxy -> pointer context
+ # This code is executed under pointer -> proxy -> pointer
+ # context
condition=Op.EQ(Op.MLOAD(arg_action), ReentryAction.MEASURE_VALUES),
action=Op.SSTORE(storage_pointer_b.store_next(sender, "origin"), Op.ORIGIN())
+ Op.SSTORE(storage_pointer_b.store_next(pointer_b, "address"), Op.ADDRESS())
@@ -1172,7 +1197,8 @@ def test_pointer_reentry(state_test: StateTestFiller, pre: Alloc):
Case(
# This code is executed under
# pointer -> proxy -> pointer -> contract
- # so pointer calling the code of it's dest after reentry to itself
+ # so pointer calling the code of it's dest
+ # after reentry to itself
condition=Op.EQ(Op.MLOAD(arg_action), ReentryAction.MEASURE_VALUES_CONTRACT),
action=Op.SSTORE(storage_b.store_next(sender, "origin"), Op.ORIGIN())
+ Op.SSTORE(slot_reentry_address, Op.ADDRESS())
@@ -1216,9 +1242,11 @@ def test_pointer_reentry(state_test: StateTestFiller, pre: Alloc):
@pytest.mark.valid_from("Prague")
def test_eoa_init_as_pointer(state_test: StateTestFiller, pre: Alloc):
"""
- It was agreed before that senders don't have code
- And there were issues with tests sending transactions from account's with code
- With EIP7702 it is again possible, let's check the test runners are ok.
+ It was agreed before that senders don't have code.
+
+ And there were issues with tests sending transactions from account's
+ with code With EIP7702 it is again possible,
+ let's check the test runners are ok.
"""
env = Environment()
storage = Storage()
@@ -1249,13 +1277,17 @@ def test_call_pointer_to_created_from_create_after_oog_call_again(
"""
Set pointer to account that we are about to create.
- Pointer is set to create address that is yet not in the state
- During the call, address is created. pointer is called from init code to do nothing
+ Pointer is set to create address that is yet not in the state.
+
+ During the call, address is created. pointer is called from init code
+ to do nothing.
+
Then after account is created it is called again to run created code
- Then revert / no revert
+ Then revert / no revert.
- Call pointer again from the upper level to ensure it does not call reverted code
+ Call pointer again from the upper level to ensure it does not call reverted
+ code.
"""
env = Environment()
@@ -1544,10 +1576,12 @@ def test_pointer_resets_an_empty_code_account_with_storage(
pre: Alloc,
):
"""
- So in Block1 we create a sender with empty code, but non empty storage using pointers
- In Block2 we create account that perform suicide, then we check that when calling
- a pointer, that points to newly created account and runs suicide,
- is not deleted as well as its storage.
+ So in Block1 we create a sender with empty code, but non empty storage
+ using pointers.
+
+ In Block2 we create account that perform suicide, then we
+ check that when calling a pointer, that points to newly created account and
+ runs suicide, is not deleted as well as its storage.
This one is a little messy.
"""
@@ -1624,7 +1658,9 @@ def test_pointer_resets_an_empty_code_account_with_storage(
# Block 2
# Sender with storage and pointer code calling selfdestruct on itself
- # But it points to a newly created account, check that pointer storage is not deleted
+ #
+ # But it points to a newly created account, check that pointer
+ # storage is not deleted
suicide_dest = pre.fund_eoa(amount=0)
deploy_code = Op.SSTORE(5, 5) + Op.SELFDESTRUCT(suicide_dest)
sender_storage[5] = 5
@@ -1637,7 +1673,8 @@ def test_pointer_resets_an_empty_code_account_with_storage(
+ Op.SSTORE(1, Op.CREATE(0, 0, Op.CALLDATASIZE()))
+ Op.CALL(address=sender) # run suicide from pointer
+ Op.CALL(address=Op.SLOAD(1)) # run suicide directly
- + Op.CALL(address=another_pointer) # run suicide from pointer that is not sender
+ + Op.CALL(address=another_pointer) # run suicide from pointer that is
+ # not sender
)
newly_created_address = compute_create_address(address=contract_create, nonce=1)
@@ -1679,9 +1716,13 @@ def test_pointer_resets_an_empty_code_account_with_storage(
post=post,
blocks=[
# post = {
- # pointer: Account(nonce=2, balance=0, storage=pointer_storage, code=bytes()),
- # sender: Account(storage=pointer_storage, code=bytes()),
- # }
+ # pointer: Account(nonce=2,
+ # balance=0,
+ # storage=pointer_storage,
+ # code=bytes()
+ # ),
+ # sender: Account(storage=pointer_storage, code=bytes())
+ # }
Block(
txs=[
tx_set_pointer_storage,
@@ -1710,7 +1751,8 @@ def test_set_code_type_tx_pre_fork(
Reject blocks with set code type transactions before the Prague fork.
This test was based on:
- tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_self_sponsored_set_code
+ tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_self_sponsored_set_
+ code
"""
storage = Storage()
sender = pre.fund_eoa()
@@ -1763,9 +1805,9 @@ def test_delegation_replacement_call_previous_contract(
fork: Fork,
):
"""
- Test setting the code of an EOA that already has
- delegation, calling the previous delegated contract.
- Previous contract shouldn't be warm when doing the CALL.
+ Test setting the code of an EOA that already has delegation, calling the
+ previous delegated contract. Previous contract shouldn't be warm when doing
+ the CALL.
"""
pre_set_delegation_code = Op.STOP
pre_set_delegation_address = pre.deploy_contract(pre_set_delegation_code)
diff --git a/tests/shanghai/eip3651_warm_coinbase/__init__.py b/tests/shanghai/eip3651_warm_coinbase/__init__.py
index f24c6b66a59..66363cc4b39 100644
--- a/tests/shanghai/eip3651_warm_coinbase/__init__.py
+++ b/tests/shanghai/eip3651_warm_coinbase/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651)
- Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).
+Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).
"""
diff --git a/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py b/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py
index b49b8569fdf..7f535043bae 100644
--- a/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py
+++ b/tests/shanghai/eip3651_warm_coinbase/test_warm_coinbase.py
@@ -1,9 +1,8 @@
"""
-abstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651)
- Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).
+Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).
-note: Tests ported from:
- - [ethereum/tests/pull/1082](https://github.com/ethereum/tests/pull/1082).
+Tests ported from:
+[ethereum/tests/pull/1082](https://github.com/ethereum/tests/pull/1082).
"""
import pytest
diff --git a/tests/shanghai/eip3855_push0/__init__.py b/tests/shanghai/eip3855_push0/__init__.py
index a8372e6ccfc..986e36b5494 100644
--- a/tests/shanghai/eip3855_push0/__init__.py
+++ b/tests/shanghai/eip3855_push0/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855)
- Tests for [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855).
+Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855).
"""
diff --git a/tests/shanghai/eip3855_push0/test_push0.py b/tests/shanghai/eip3855_push0/test_push0.py
index c490d0d8fa1..aa3e471f469 100644
--- a/tests/shanghai/eip3855_push0/test_push0.py
+++ b/tests/shanghai/eip3855_push0/test_push0.py
@@ -1,9 +1,8 @@
"""
-abstract: Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855)
- Tests for [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855).
+Tests [EIP-3855: PUSH0 Instruction](https://eips.ethereum.org/EIPS/eip-3855).
-note: Tests ported from:
- - [ethereum/tests/pull/1033](https://github.com/ethereum/tests/pull/1033).
+Tests ported from:
+[ethereum/tests/pull/1033](https://github.com/ethereum/tests/pull/1033).
"""
import pytest
@@ -92,16 +91,21 @@ def test_push0_contracts(
class TestPush0CallContext:
"""
- Tests the PUSH0 operation during various call contexts including:
- - CALL
- - CALLCODE
- - DELEGATECALL
+ Test the PUSH0 operation in various contract call contexts.
+
+ Test PUSH0 in the following contract call contexts:
+
+ - CALL,
+ - CALLCODE,
+ - DELEGATECALL,
- STATICCALL.
"""
@pytest.fixture
def push0_contract_callee(self, pre: Alloc) -> Address:
- """Deploys a PUSH0 contract callee to the pre alloc returning its address."""
+ """
+ Deploys a PUSH0 contract callee to the pre alloc returning its address.
+ """
push0_contract = pre.deploy_contract(Op.MSTORE8(Op.PUSH0, 0xFF) + Op.RETURN(Op.PUSH0, 1))
return push0_contract
@@ -110,8 +114,9 @@ def push0_contract_caller(
self, pre: Alloc, call_opcode: Op, push0_contract_callee: Address
) -> Address:
"""
- Deploy contract responsible for calling the callee PUSH0 contract
- returning its address.
+ Deploy the contract that calls the callee PUSH0 contract into `pre`.
+
+ This fixture returns its address.
"""
call_code = (
Op.SSTORE(0, call_opcode(gas=100_000, address=push0_contract_callee))
diff --git a/tests/shanghai/eip3860_initcode/__init__.py b/tests/shanghai/eip3860_initcode/__init__.py
index 62ee051a08a..0d50d930d9e 100644
--- a/tests/shanghai/eip3860_initcode/__init__.py
+++ b/tests/shanghai/eip3860_initcode/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860)
- Tests for [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860).
+Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860).
"""
diff --git a/tests/shanghai/eip3860_initcode/spec.py b/tests/shanghai/eip3860_initcode/spec.py
index 65dad3fcea3..67ccec1e539 100644
--- a/tests/shanghai/eip3860_initcode/spec.py
+++ b/tests/shanghai/eip3860_initcode/spec.py
@@ -17,7 +17,9 @@ class ReferenceSpec:
@dataclass(frozen=True)
class Spec:
"""
- Parameters from the EIP-3860 specifications as defined at
+ Define parameters from the EIP-3860 specifications.
+
+ These are the parameters defined at
https://eips.ethereum.org/EIPS/eip-3860#parameters.
"""
diff --git a/tests/shanghai/eip3860_initcode/test_initcode.py b/tests/shanghai/eip3860_initcode/test_initcode.py
index 1b52b29b039..8f31bb2f206 100644
--- a/tests/shanghai/eip3860_initcode/test_initcode.py
+++ b/tests/shanghai/eip3860_initcode/test_initcode.py
@@ -1,10 +1,9 @@
"""
-abstract: Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860)
- Tests for [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860).
+Test [EIP-3860: Limit and meter initcode](https://eips.ethereum.org/EIPS/eip-3860).
-note: Tests ported from:
- - [ethereum/tests/pull/990](https://github.com/ethereum/tests/pull/990)
- - [ethereum/tests/pull/1012](https://github.com/ethereum/tests/pull/990)
+Tests ported from:
+- [ethereum/tests/pull/990](https://github.com/ethereum/tests/pull/990)
+- [ethereum/tests/pull/1012](https://github.com/ethereum/tests/pull/990)
"""
from typing import List
@@ -39,9 +38,7 @@
pytestmark = pytest.mark.valid_from("Shanghai")
-"""
-Initcode templates used throughout the tests
-"""
+"""Initcode templates used throughout the tests"""
INITCODE_ONES_MAX_LIMIT = Initcode(
deploy_code=INITCODE_RESULTING_DEPLOYED_CODE,
initcode_length=Spec.MAX_INITCODE_SIZE,
@@ -112,9 +109,7 @@
SINGLE_BYTE_INITCODE.deployment_gas = 0
SINGLE_BYTE_INITCODE.execution_gas = 0
-"""
-Test cases using a contract creating transaction
-"""
+"""Test cases using a contract creating transaction"""
@pytest.mark.xdist_group(name="bigmem")
@@ -137,8 +132,7 @@ def test_contract_creating_tx(
initcode: Initcode,
):
"""
- Tests creating a contract using a transaction with an initcode that is
- on/over the max allowed limit.
+ Test creating a contract with initcode that is on/over the allowed limit.
"""
create_contract_address = compute_create_address(
address=sender,
@@ -209,15 +203,18 @@ def valid_gas_test_case(initcode: Initcode, gas_test_case: str) -> bool:
)
class TestContractCreationGasUsage:
"""
- Tests the following cases that verify the gas cost behavior of a
- contract creating transaction.
-
- 1. Test with exact intrinsic gas minus one, contract create fails
- and tx is invalid.
- 2. Test with exact intrinsic gas, contract create fails,
- but tx is valid.
- 3. Test with exact execution gas minus one, contract create fails,
- but tx is valid.
+ Test the gas cost behavior of a contract creating transaction.
+
+ The following scenarios are tested:
+
+ 1. Test with exact intrinsic gas minus one, contract create fails and tx is
+ invalid.
+
+ 2. Test with exact intrinsic gas, contract create fails, but tx is valid.
+
+ 3. Test with exact execution gas minus one, contract create fails, but tx
+ is valid.
+
4. Test with exact execution gas, contract create succeeds.
Initcode must be within a valid EIP-3860 length.
@@ -226,8 +223,10 @@ class TestContractCreationGasUsage:
@pytest.fixture
def tx_access_list(self) -> List[AccessList]:
"""
- On EIP-7623, we need to use an access list to raise the intrinsic gas cost to
- be above the floor data cost.
+ Return an access list to raise the intrinsic gas cost.
+
+ Upon EIP-7623 activation, we need to use an access list to raise the
+ intrinsic gas cost to be above the floor data cost.
"""
return [AccessList(address=Address(i), storage_keys=[]) for i in range(1, 478)]
@@ -235,7 +234,9 @@ def tx_access_list(self) -> List[AccessList]:
def exact_intrinsic_gas(
self, fork: Fork, initcode: Initcode, tx_access_list: List[AccessList]
) -> int:
- """Calculate the intrinsic tx gas cost."""
+ """
+ Calculate the intrinsic tx gas cost.
+ """
tx_intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
assert tx_intrinsic_gas_cost_calculator(
calldata=initcode,
@@ -255,12 +256,16 @@ def exact_intrinsic_gas(
@pytest.fixture
def exact_execution_gas(self, exact_intrinsic_gas: int, initcode: Initcode) -> int:
- """Calculate total execution gas cost."""
+ """
+ Calculate total execution gas cost.
+ """
return exact_intrinsic_gas + initcode.deployment_gas + initcode.execution_gas
@pytest.fixture
def tx_error(self, gas_test_case: str) -> TransactionException | None:
"""
+ Return the transaction exception, or None, as expected.
+
Check that the transaction is invalid if too little intrinsic gas is
specified, otherwise the tx is valid and succeeds.
"""
@@ -280,7 +285,9 @@ def tx(
exact_execution_gas: int,
) -> Transaction:
"""
- Implement the gas_test_case by setting the gas_limit of the tx
+ Return a tx with `gas_limit` corresponding to the `gas_test_case`.
+
+ Implement the gas_test_case by setting the `gas_limit` of the tx
appropriately and test whether the tx succeeds or fails with
appropriate error.
"""
@@ -318,8 +325,7 @@ def post(
exact_execution_gas: int,
) -> Alloc:
"""
- Test that contract creation fails unless enough execution gas is
- provided.
+ Test contract creation fails unless enough execution gas is provided.
"""
create_contract_address = compute_create_address(
address=sender,
@@ -341,7 +347,9 @@ def test_gas_usage(
post: Alloc,
tx: Transaction,
):
- """Test transaction and contract creation behavior for different gas limits."""
+ """
+ Test transaction and contract creation using different gas limits.
+ """
state_test(
env=env,
pre=pre,
@@ -369,18 +377,24 @@ def test_gas_usage(
@pytest.mark.parametrize("opcode", [Op.CREATE, Op.CREATE2], ids=get_create_id)
class TestCreateInitcode:
"""
- Test contract creation via the CREATE/CREATE2 opcodes that have an initcode
- that is on/over the max allowed limit.
+ Test contract creation with valid and invalid initcode lengths.
+
+ Test contract creation via CREATE/CREATE2, parametrized by initcode that is
+ on/over the max allowed limit.
"""
@pytest.fixture
def create2_salt(self) -> int:
- """Salt value used for CREATE2 contract creation."""
+ """
+ Salt value used for CREATE2 contract creation.
+ """
return 0xDEADBEEF
@pytest.fixture
def creator_code(self, opcode: Op, create2_salt: int) -> Bytecode:
- """Generate code for the creator contract which performs the CREATE/CREATE2 operation."""
+ """
+ Generate code for the creator contract which calls CREATE/CREATE2.
+ """
return (
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.GAS
@@ -415,7 +429,9 @@ def created_contract_address( # noqa: D103
initcode: Initcode,
creator_contract_address: Address,
) -> Address:
- """Calculate address of the contract created by the creator contract."""
+ """
+ Calculate address of the contract created by the creator contract.
+ """
return compute_create_address(
address=creator_contract_address,
nonce=1,
@@ -426,7 +442,9 @@ def created_contract_address( # noqa: D103
@pytest.fixture
def caller_code(self, creator_contract_address: Address) -> Bytecode:
- """Generate code for the caller contract that calls the creator contract."""
+ """
+ Generate code for the caller contract that calls the creator contract.
+ """
return Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE) + Op.SSTORE(
Op.CALL(5000000, creator_contract_address, 0, 0, Op.CALLDATASIZE, 0, 0), 1
)
@@ -499,8 +517,10 @@ def test_create_opcode_initcode(
create2_word_cost: int,
):
"""
- Test contract creation via the CREATE/CREATE2 opcodes that have an
- initcode that is on/over the max allowed limit.
+ Test contract creation with valid and invalid initcode lengths.
+
+ Test contract creation via CREATE/CREATE2, parametrized by initcode
+ that is on/over the max allowed limit.
"""
if len(initcode) > Spec.MAX_INITCODE_SIZE:
# Call returns 0 as out of gas s[0]==1
diff --git a/tests/shanghai/eip3860_initcode/test_with_eof.py b/tests/shanghai/eip3860_initcode/test_with_eof.py
index 433ba31d9fe..f3b7318518a 100644
--- a/tests/shanghai/eip3860_initcode/test_with_eof.py
+++ b/tests/shanghai/eip3860_initcode/test_with_eof.py
@@ -1,4 +1,6 @@
-"""Tests interaction between edge case size CREATE / CREATE2 and EOF, including EIP-3860 limits."""
+"""
+Test CREATE / CREATE2 and EOF interaction for EIP-3860 initcode limits.
+"""
import itertools
@@ -54,9 +56,12 @@ def test_legacy_create_edge_code_size(
init_code: Bytecode,
):
"""
- Verifies that legacy initcode/deploycode having 0 or max size continues to work in the fork
- where EOF is enabled. Handling of EOF magic prefix and version interferes with the handling
- of legacy creation, so a specific test was proposed to test behavior doesn't change.
+ Test legacy initcode and deployed code edge cases with EOF enabled.
+
+ Verify that legacy initcode/deploycode having 0 or max size continues to
+ work in the fork where EOF is enabled. Handling of EOF magic prefix and
+ version interferes with the handling of legacy creation, so a specific test
+ was proposed to test behavior doesn't change.
"""
env = Environment()
diff --git a/tests/shanghai/eip4895_withdrawals/__init__.py b/tests/shanghai/eip4895_withdrawals/__init__.py
index bb4a893dcb3..c10cd7154bd 100644
--- a/tests/shanghai/eip4895_withdrawals/__init__.py
+++ b/tests/shanghai/eip4895_withdrawals/__init__.py
@@ -1,5 +1,3 @@
"""
-abstract: Tests [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895)
- Test cases for [EIP-4895: Beacon chain push withdrawals as
- operations](https://eips.ethereum.org/EIPS/eip-4895).
+Tests [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895).
"""
diff --git a/tests/shanghai/eip4895_withdrawals/test_withdrawals.py b/tests/shanghai/eip4895_withdrawals/test_withdrawals.py
index 44bbbeb9021..df2124df950 100644
--- a/tests/shanghai/eip4895_withdrawals/test_withdrawals.py
+++ b/tests/shanghai/eip4895_withdrawals/test_withdrawals.py
@@ -1,7 +1,5 @@
"""
-abstract: Tests [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895)
- Test cases for [EIP-4895: Beacon chain push withdrawals as
- operations](https://eips.ethereum.org/EIPS/eip-4895).
+Tests for [EIP-4895: Beacon chain withdrawals](https://eips.ethereum.org/EIPS/eip-4895).
"""
from enum import Enum, unique
@@ -50,11 +48,11 @@ class TestUseValueInTx:
"""
Test that the value from a withdrawal can be used in a transaction.
- 1. `tx_in_withdrawals_block`: Test that the withdrawal value can not be used by a transaction
- in the same block as the withdrawal.
+ 1. `tx_in_withdrawals_block`: Test that the withdrawal value can not be
+ used by a transaction in the same block as the withdrawal.
- 2. `tx_after_withdrawals_block`: Test that the withdrawal value can be used by a transaction
- in the subsequent block.
+ 2. `tx_after_withdrawals_block`: Test that the withdrawal value can be used
+ by a transaction in the subsequent block.
"""
@pytest.fixture
@@ -189,8 +187,7 @@ def test_use_value_in_contract(
def test_balance_within_block(blockchain_test: BlockchainTestFiller, pre: Alloc):
"""
- Test Withdrawal balance increase within the same block,
- inside contract call.
+ Test withdrawal balance increase within the same block in a contract call.
"""
save_balance_on_block_number = Op.SSTORE(
Op.NUMBER,
@@ -304,8 +301,7 @@ def test_multiple_withdrawals_same_address(
blocks: List[Block],
):
"""
- Test Withdrawals can be done to the same address multiple times in
- the same block.
+ Test withdrawals to the same address multiple times in the same block.
"""
# Expected post is the same for both test cases.
post = {}
@@ -323,8 +319,7 @@ def test_many_withdrawals(
pre: Alloc,
):
"""
- Test Withdrawals with a count of N withdrawals in a single block where
- N is a high number not expected to be seen in mainnet.
+ Test an unexpected high number of withdrawals in a single block.
"""
n = 400
withdrawals = []
@@ -362,8 +357,9 @@ def test_self_destructing_account(
):
"""
Test withdrawals can be done to self-destructed accounts.
- Account `0x100` self-destructs and sends all its balance to `0x200`.
- Then, a withdrawal is received at `0x100` with 99 wei.
+
+ Account `0x100` self-destructs and sends all its balance to `0x200`. Then,
+ a withdrawal is received at `0x100` with 99 wei.
"""
self_destruct_code = Op.SELFDESTRUCT(Op.CALLDATALOAD(0))
sender = pre.fund_eoa()
@@ -419,7 +415,7 @@ def test_newly_created_contract(
include_value_in_tx: bool,
request,
):
- """Test Withdrawing to a newly created contract."""
+ """Test withdrawing to a newly created contract."""
sender = pre.fund_eoa()
initcode = Op.RETURN(0, 1)
tx = Transaction(
@@ -463,7 +459,7 @@ def test_no_evm_execution(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
):
- """Test Withdrawals don't trigger EVM execution."""
+ """Test withdrawals don't trigger EVM execution."""
sender = pre.fund_eoa()
contracts = [pre.deploy_contract(Op.SSTORE(Op.NUMBER, 1)) for _ in range(4)]
blocks = [
@@ -554,16 +550,19 @@ def test_zero_amount(
test_case: ZeroAmountTestCases,
):
"""
- Test withdrawals with zero amount for the following cases, all withdrawals
- are included in one block.
+ Test withdrawal scenarios with a zero amount in a single block.
+
+ All the withdrawals in the following scenarios are included in one block.
1. Two withdrawals of zero amount to two different addresses; one to an
- untouched account, one to an account with a balance.
+ untouched account, one to an account with a balance.
+
2. As 1., but with an additional withdrawal with positive value.
+
3. As 2., but with an additional withdrawal containing the maximum value
possible.
- 4. As 3., but with order of withdrawals in the block reversed.
+ 4. As 3., but with order of withdrawals in the block reversed.
"""
empty_accounts = [pre.fund_eoa(0) for _ in range(3)]
zero_balance_contract = pre.deploy_contract(Op.STOP)
@@ -657,8 +656,9 @@ def test_large_amount(
pre: Alloc,
):
"""
- Test Withdrawals that have a large gwei amount, so that (gwei * 1e9)
- could overflow uint64 but not uint256.
+ Test withdrawals that have a large gwei amount.
+
+ Test such that that (gwei * 1e9) could overflow uint64 but not uint256.
"""
withdrawals: List[Withdrawal] = []
amounts: List[int] = [
diff --git a/tests/unscheduled/__init__.py b/tests/unscheduled/__init__.py
index c0bc59e8d52..f4907906a27 100644
--- a/tests/unscheduled/__init__.py
+++ b/tests/unscheduled/__init__.py
@@ -1 +1,4 @@
-"""Test cases for unscheduled EVM functionality. A temporary home for features that are not yet CFI'd for inclusion in the next hardfork.""" # noqa: E501
+"""
+Test cases for unscheduled EVM functionality. A temporary home for features
+that are not yet CFI'd for inclusion in the next hardfork.
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/__init__.py b/tests/unscheduled/eip7692_eof_v1/__init__.py
index 39b893bb4af..47bb9a0684c 100644
--- a/tests/unscheduled/eip7692_eof_v1/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/__init__.py
@@ -1,6 +1,6 @@
"""
-abstract: Test cases for [EIP-7692: EVM Object Format (EOFv1) Meta](https://eips.ethereum.org/EIPS/eip-7692)
- Test cases for the EIPs included in [EIP-7692 EOFv1 Meta](https://eips.ethereum.org/EIPS/eip-7692).
+Test cases for [EIP-7692: EVM Object Format (EOFv1) Meta](https://eips.ethereum.org/EIPS/eip-7692).
+ Test cases for the EIPs included in EIP-7692 EOFv1 Meta.
* [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663).
* [EIP-3540: EOF - EVM Object Format v1](https://eips.ethereum.org/EIPS/eip-3540).
@@ -16,7 +16,7 @@
## Devnet Specifications
-- [ethpandaops/eof-devnet-0](https://notes.ethereum.org/@ethpandaops/eof-devnet-0).
-""" # noqa: E501
+ - [ethpandaops/eof-devnet-0](https://notes.ethereum.org/@ethpandaops/eof-devnet-0).
+"""
EOF_FORK_NAME = "EOFv1"
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/__init__.py
index de9eb0a8f92..3d699145372 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/__init__.py
@@ -1,5 +1,10 @@
"""
-abstract: Test cases for [EIP-3540: EOF - EVM Object Format v1](https://eips.ethereum.org/EIPS/eip-3540)
- EIP-3540 introduces a structured format for EVM bytecode, with separate sections for code and data.
- Opcodes introduced: None (defines a new bytecode structure but no new opcodes).
-""" # noqa: E501
+EOF - EVM Object Format v1 tests.
+
+Test cases for
+[EIP-3540: EOF - EVM Object Format v1](https://eips.ethereum.org/EIPS/eip-3540).
+
+EIP-3540 introduces a structured format for EVM bytecode, with separate
+sections for code and data. Opcodes introduced: None (defines a new
+bytecode structure but no new opcodes).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_all_opcodes_in_container.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_all_opcodes_in_container.py
index c7310bcaf0a..314c6998384 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_all_opcodes_in_container.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_all_opcodes_in_container.py
@@ -1,4 +1,7 @@
-"""EOF Container: check how every opcode behaves in the middle of the valid eof container code."""
+"""
+EOF Container: check how every opcode behaves in the middle of the valid eof
+container code.
+"""
import itertools
from typing import Any, Dict, Generator, List, Tuple
@@ -22,7 +25,8 @@
all_opcodes = set(Op)
undefined_opcodes = set(UndefinedOpcodes)
-# Invalid Opcodes will produce EOFException.UNDEFINED_INSTRUCTION when used in EOFContainer
+# Invalid Opcodes will produce EOFException.UNDEFINED_INSTRUCTION when used in
+# EOFContainer
invalid_eof_opcodes = {
Op.CODESIZE,
Op.SELFDESTRUCT,
@@ -44,7 +48,8 @@
valid_eof_opcodes = all_opcodes - invalid_eof_opcodes
-# Halting the execution opcodes can be placed without STOP instruction at the end
+# Halting the execution opcodes can be placed without STOP instruction at the
+# end
halting_opcodes = {
Op.STOP,
Op.RETURNCODE,
@@ -53,7 +58,8 @@
Op.INVALID,
}
-# Opcodes that end the code section and can be placed without STOP instruction at the end
+# Opcodes that end the code section and can be placed without STOP instruction
+# at the end
section_terminating_opcodes = {
Op.RETF,
Op.JUMPF,
@@ -62,7 +68,8 @@
data_portion_opcodes = {op for op in all_opcodes if op.has_data_portion()}
-# NOTE: `sorted` is used to ensure that the tests are collected in a deterministic order.
+# NOTE: `sorted` is used to ensure that the tests are collected in a
+# deterministic order.
@pytest.mark.parametrize(
@@ -74,8 +81,7 @@ def test_all_opcodes_in_container(
opcode: Opcode,
):
"""
- Test all opcodes inside valid container
- 257 because 0x5B is duplicated.
+ Test all opcodes inside valid container 257 because 0x5B is duplicated.
"""
data_portion = 1 if opcode == Op.CALLF else 0
opcode_with_data_portion = opcode[data_portion] if opcode.has_data_portion() else opcode
@@ -134,7 +140,10 @@ def test_invalid_opcodes_after_stop(
opcode: Opcode,
terminating_opcode: Opcode,
):
- """Test that an invalid opcode placed after STOP (terminating instruction) invalidates EOF."""
+ """
+ Test that an invalid opcode placed after STOP (terminating instruction)
+ invalidates EOF.
+ """
terminating_code = Bytecode(terminating_opcode)
match terminating_opcode: # Enhance the code for complex opcodes.
case Op.RETURNCODE:
@@ -180,8 +189,9 @@ def test_all_invalid_terminating_opcodes(
):
"""Test all opcodes that are invalid as the last opcode in a container."""
if opcode.has_data_portion():
- # Add the appropriate data portion to the opcode by using the get_item method.
- # On the CALLF opcode we need to reference the second code section, hence the [1] index.
+ # Add the appropriate data portion to the opcode by using the get_item
+ # method. On the CALLF opcode we need to reference the second code
+ # section, hence the [1] index.
opcode = opcode[0] if opcode != Op.CALLF else opcode[1]
bytecode = (Op.PUSH0 * opcode.min_stack_height) + opcode
@@ -322,11 +332,13 @@ def test_all_unreachable_terminating_opcodes_before_stop(
@pytest.mark.parametrize(
"exception",
# We test two types of exceptions here:
- # 1. Invalid max stack height, where we modify the `max_stack_height` field of the code section
- # to the maximum stack height allowed by the EIP-3540, so the code still has to be checked
- # for stack overflow.
- # 2. Max stack height above limit, where we don't modify the `max_stack_height` field of the
- # code section, so the actual code doesn't have to be verified for the stack overflow.
+ # 1. Invalid max stack height, where we modify the `max_stack_height`
+ # field of the code section to the maximum stack height allowed by
+ # the EIP-3540, so the code still has to be checked for stack overflow.
+ #
+ # 2. Max stack height above limit, where we don't modify the
+ # `max_stack_height` field of the code section, so the actual
+ # code doesn't have to be verified for the stack overflow.
[EOFException.INVALID_MAX_STACK_INCREASE, EOFException.MAX_STACK_INCREASE_ABOVE_LIMIT],
)
def test_all_opcodes_stack_overflow(
@@ -334,7 +346,9 @@ def test_all_opcodes_stack_overflow(
opcode: Opcode,
exception: EOFException,
):
- """Test stack overflow on all opcodes that push more items than they pop."""
+ """
+ Test stack overflow on all opcodes that push more items than they pop.
+ """
opcode = opcode[0] if opcode.has_data_portion() else opcode
assert opcode.pushed_stack_items - opcode.popped_stack_items == 1
@@ -347,7 +361,8 @@ def test_all_opcodes_stack_overflow(
kwargs: Dict[str, Any] = {"code": bytecode}
if exception == EOFException.INVALID_MAX_STACK_INCREASE:
- # Lie about the max stack height to make the code be checked for stack overflow.
+ # Lie about the max stack height to make the code be checked for stack
+ # overflow.
kwargs["max_stack_height"] = MAX_STACK_INCREASE_LIMIT
sections = [Section.Code(**kwargs)]
@@ -367,13 +382,17 @@ def valid_opcode_combinations(
truncate_all_options: List[bool],
opcodes: List[Opcode],
) -> Generator[Tuple[bool, bool, Opcode], None, None]:
- """Create valid parameter combinations for test_truncated_data_portion_opcodes()."""
+ """
+ Create valid parameter combinations for
+ test_truncated_data_portion_opcodes().
+ """
for opcode, truncate_all, compute_max_stack_height in itertools.product(
opcodes, truncate_all_options, compute_max_stack_height_options
):
opcode_with_data_portion: bytes = bytes(opcode[1])
- # Skip invalid or redundant combinations to avoid using pytest.skip in the test
+ # Skip invalid or redundant combinations to avoid using pytest.skip in
+ # the test
if len(opcode_with_data_portion) == 2 and truncate_all:
continue
if (
@@ -401,7 +420,8 @@ def test_truncated_data_portion_opcodes(
"""
opcode_with_data_portion: bytes = bytes(opcode[1])
- # Compose instruction bytes with empty imm bytes (truncate_all) or 1 byte shorter imm bytes.
+ # Compose instruction bytes with empty imm bytes (truncate_all) or 1 byte
+ # shorter imm bytes.
opcode_bytes = opcode_with_data_portion[0:1] if truncate_all else opcode_with_data_portion[:-1]
if opcode.min_stack_height > 0:
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_size.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_size.py
index 1cc141fc03b..5dfbccb1734 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_size.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_size.py
@@ -27,7 +27,8 @@ def test_max_size(
over_limit: int,
):
"""Verify EOF container valid at maximum size, invalid above."""
- # Expand the minimal EOF code by more noop code, reaching the desired target container size.
+ # Expand the minimal EOF code by more noop code, reaching the desired
+ # target container size.
code = Container(
sections=[
Section.Code(
@@ -52,7 +53,10 @@ def test_above_max_size_raw(
eof_test: EOFTestFiller,
size: int,
):
- """Verify EOF container invalid above maximum size, regardless of header contents."""
+ """
+ Verify EOF container invalid above maximum size, regardless of header
+ contents.
+ """
code = Op.INVALID * size
eof_test(
container=Container(raw_bytes=code),
@@ -101,7 +105,10 @@ def test_section_after_end_of_container(
eof_test: EOFTestFiller,
code: Container,
):
- """Verify EOF container is invalid if any of sections declares above container size."""
+ """
+ Verify EOF container is invalid if any of sections declares above container
+ size.
+ """
eof_test(
container=code,
expect_exception=EOFException.INVALID_SECTION_BODIES_SIZE,
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_validation.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_validation.py
index 7e0875da28a..b06ab65fbec 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_container_validation.py
@@ -406,13 +406,15 @@ def test_valid_containers(
validity_error=EOFException.MISSING_CODE_HEADER,
),
Container(
- # EOF code containing multiple type headers, second one matches code length
+ # EOF code containing multiple type headers, second one matches
+ # code length
name="multiple_type_headers_2",
raw_bytes="ef0001010004010001ff00000000800000fe",
validity_error=EOFException.MISSING_CODE_HEADER,
),
Container(
- # EOF code containing multiple type headers followed by 2 code sections
+ # EOF code containing multiple type headers followed by 2 code
+ # sections
name="multiple_type_headers_3",
sections=[
Section(kind=SectionKind.TYPE, data="00800000"),
@@ -486,8 +488,8 @@ def test_valid_containers(
],
),
# The basic `no_section_terminator` cases just remove the terminator
- # and the `00` for zeroth section inputs looks like one. Error is because
- # the sections are wrongly sized.
+ # and the `00` for zeroth section inputs looks like one. Error is
+ # because the sections are wrongly sized.
Container(
name="no_section_terminator",
header_terminator=bytes(),
@@ -521,9 +523,9 @@ def test_valid_containers(
EOFException.INVALID_FIRST_SECTION_TYPE,
],
),
- # The following cases just remove the terminator
- # and the `00` for zeroth section inputs looks like one. Section bodies
- # are as the size prescribes here, so the error is about the inputs of zeroth section.
+ # The following cases just remove the terminator and the `00` for
+ # zeroth section inputs looks like one. Section bodies are as the size
+ # prescribes here, so the error is about the inputs of zeroth section.
Container(
name="no_section_terminator_section_bodies_ok_1",
header_terminator=bytes(),
@@ -536,8 +538,8 @@ def test_valid_containers(
sections=[Section.Code(code=Op.JUMPDEST * 2 + Op.STOP, custom_size=2)],
validity_error=EOFException.INVALID_FIRST_SECTION_TYPE,
),
- # Here the terminator is missing but made to look like a different section
- # or arbitrary byte
+ # Here the terminator is missing but made to look like a different
+ # section or arbitrary byte
Container(
name="no_section_terminator_nonzero",
header_terminator=b"01",
@@ -625,7 +627,8 @@ def test_valid_containers(
Container(
name="empty_code_section",
sections=[Section.Code(code="0x")],
- # TODO the exception must be about code section EOFException.INVALID_CODE_SECTION,
+ # TODO the exception must be about code section
+ # EOFException.INVALID_CODE_SECTION,
validity_error=EOFException.ZERO_SECTION_SIZE,
),
Container(
@@ -634,7 +637,8 @@ def test_valid_containers(
Section.Code(code="0x"),
Section.Data(data="0xDEADBEEF"),
],
- # TODO the exception must be about code section EOFException.INVALID_CODE_SECTION,
+ # TODO the exception must be about code section
+ # EOFException.INVALID_CODE_SECTION,
validity_error=EOFException.ZERO_SECTION_SIZE,
),
Container(
@@ -684,7 +688,8 @@ def test_valid_containers(
Container(
name="data_section_without_code_section",
sections=[Section.Data(data="0xDEADBEEF")],
- # TODO the actual exception should be EOFException.MISSING_CODE_HEADER
+ # TODO the actual exception should be
+ # EOFException.MISSING_CODE_HEADER
validity_error=[EOFException.ZERO_SECTION_SIZE, EOFException.UNEXPECTED_HEADER_KIND],
),
Container(
@@ -1059,7 +1064,8 @@ def test_valid_containers(
Section.Code(code=Op.POP + Op.RETF, code_inputs=1),
Section.Code(Op.STOP),
],
- # TODO the actual exception should be EOFException.INVALID_TYPE_BODY,
+ # TODO the actual exception should be
+ # EOFException.INVALID_TYPE_BODY,
validity_error=EOFException.INVALID_FIRST_SECTION_TYPE,
),
Container(
@@ -1068,7 +1074,8 @@ def test_valid_containers(
Section.Code(code=Op.PUSH0, code_outputs=1),
Section.Code(Op.STOP),
],
- # TODO the actual exception should be EOFException.INVALID_TYPE_BODY,
+ # TODO the actual exception should be
+ # EOFException.INVALID_TYPE_BODY,
validity_error=EOFException.INVALID_FIRST_SECTION_TYPE,
),
Container(
@@ -1158,7 +1165,8 @@ def test_valid_containers(
max_stack_height=1024,
),
],
- # TODO auto types section generation probably failed, the exception must be about code
+ # TODO auto types section generation probably failed, the exception
+ # must be about code
validity_error=EOFException.MAX_STACK_INCREASE_ABOVE_LIMIT,
),
],
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_eof_example.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_eof_example.py
index 3970dd5fd72..5263afbedce 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_eof_example.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_eof_example.py
@@ -23,13 +23,16 @@ def test_eof_example(eof_test: EOFTestFiller):
# TYPES section is constructed automatically based on CODE
# CODE section
Section.Code(
- code=Op.CALLF[1](Op.PUSH0) + Op.STOP, # bytecode to be deployed in the body
+ code=Op.CALLF[1](Op.PUSH0) + Op.STOP, # bytecode to be
+ # deployed in the body
# Code: call section 1 with a single zero as input, then stop.
- max_stack_increase=1, # define code header (in body) stack size
+ max_stack_increase=1, # define code header (in body) stack
+ # size
),
# There can be multiple code sections
Section.Code(
- # Remove input and call section 2 with no inputs, then remove output and return
+ # Remove input and call section 2 with no inputs, then remove
+ # output and return
code=Op.POP + Op.CALLF[2]() + Op.POP + Op.RETF,
code_inputs=1,
code_outputs=0,
@@ -64,45 +67,56 @@ def test_eof_example(eof_test: EOFTestFiller):
def test_eof_example_custom_fields(eof_test: EOFTestFiller):
"""Example of python EOF container class tuning."""
- # if you need to overwrite certain structure bytes, you can use customization
- # this is useful for unit testing the eof structure format, you can reorganize sections
- # and overwrite the header bytes for testing purposes
- # most of the combinations are covered by the unit tests
+ # if you need to overwrite certain structure bytes, you can use
+ # customization. this is useful for unit testing the eof structure format,
+ # you can reorganize sections and overwrite the header bytes for testing
+ # purposes. most of the combinations are covered by the unit tests
# This features are subject for development and will change in the future
eof_code = Container(
name="valid_container_example_2",
- magic=b"\xef\x00", # magic can be overwritten for test purposes, (default is 0xEF00)
- version=b"\x01", # version can be overwritten for testing purposes (default is 0x01)
- header_terminator=b"\x00", # terminator byte can be overwritten (default is 0x00)
- extra=b"", # extra bytes to be trailed after the container body bytes (default is None)
+ magic=b"\xef\x00", # magic can be overwritten for test purposes,
+ # (default is 0xEF00)
+ version=b"\x01", # version can be overwritten for testing purposes
+ # (default is 0x01)
+ header_terminator=b"\x00", # terminator byte can be overwritten
+ # (default is 0x00)
+ extra=b"", # extra bytes to be trailed after the container body bytes
+ # (default is None)
sections=[
# TYPES section is constructed automatically based on CODE
# CODE section
Section.Code(
- code=Op.PUSH1(2)
- + Op.STOP, # this is the actual bytecode to be deployed in the body
+ code=Op.PUSH1(2) + Op.STOP, # this is the actual bytecode to be deployed in the
+ # body
max_stack_height=1, # define code header (in body) stack size
),
# DATA section
Section.Data(
data="0xef",
- # custom_size overrides the size bytes, so you can put only 1 byte into data
- # but still make the header size of 2 to produce invalid section
+ # custom_size overrides the size bytes, so you can put only 1
+ # byte into data but still make the header size of 2 to produce
+ # invalid section
+ #
# if custom_size != len(data), the section will be invalid
custom_size=1,
),
],
# auto generate types section based on provided code sections
- # AutoSection.ONLY_BODY - means the section will be generated only for the body bytes
- # AutoSection.ONLY_BODY - means the section will be generated only for the header bytes
+ # AutoSection.ONLY_BODY - means the section will be generated only for
+ # the body bytes
+ #
+ # AutoSection.ONLY_BODY - means the section will be generated only for
+ # the header bytes
auto_type_section=AutoSection.AUTO,
# auto generate default data section (0x empty), by default is True
auto_data_section=True,
# auto sort section by order 01 02 03 04
- # AutoSection.ONLY_BODY - means the sorting will be done only for the body bytes
- # AutoSection.ONLY_BODY - means the section will be done only for the header bytes
+ # AutoSection.ONLY_BODY - means the sorting will be done only for the
+ # body bytes
+ # AutoSection.ONLY_BODY - means the section will be done only for the
+ # header bytes
auto_sort_sections=AutoSection.AUTO,
)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_opcodes_in_legacy.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_opcodes_in_legacy.py
index 0dfabd04454..3916f8570ff 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_opcodes_in_legacy.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_opcodes_in_legacy.py
@@ -48,7 +48,8 @@
pytest.param(Op.DATASIZE, id="DATASIZE"),
pytest.param(Op.DATACOPY(0, 0, 32), id="DATACOPY"),
pytest.param(Op.EOFCREATE[0](0, 0, 0, 0), id="EOFCREATE"),
- # pytest.param(Op.TXCREATE(0, 0, 0, 0, 0), id="TXCREATE"), not EOF-only anymore
+ # pytest.param(Op.TXCREATE(0, 0, 0, 0, 0), id="TXCREATE"), not EOF-only
+ # anymore
pytest.param(Op.RETURNCODE[0], id="RETURNCODE"),
]
@@ -67,7 +68,8 @@ def test_opcodes_in_legacy(state_test: StateTestFiller, pre: Alloc, code: Opcode
)
post = {
- # assert the canary is not over-written. If it was written then the EOF opcode was valid
+ # assert the canary is not over-written. If it was written then the EOF
+ # opcode was valid
address_test_contract: Account(storage={slot_code_executed: value_non_execution_canary}),
}
@@ -209,7 +211,8 @@ def test_opcodes_in_eof_calling_legacy(
)
post = {
- # assert the canary is not over-written. If it was written then the EOF opcode was valid
+ # assert the canary is not over-written. If it was written then the EOF
+ # opcode was valid
address_test_contract: Account(storage={slot_code_executed: value_non_execution_canary}),
address_entry_contract: Account(
storage={
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_header_body_mismatch.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_header_body_mismatch.py
index 4292914e63d..d1b11bea5c1 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_header_body_mismatch.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_header_body_mismatch.py
@@ -18,10 +18,14 @@
@pytest.mark.parametrize(
**extend_with_defaults(
defaults={
- "skip_header_listing": False, # second section is mentioned in code header array
- "skip_body_listing": False, # second section code is in container's body
- "skip_types_body_listing": False, # code input bytes not listed in container's body
- "skip_types_header_listing": False, # code input bytes size not added to types section size # noqa: E501
+ # second section is mentioned in code header array
+ "skip_header_listing": False,
+ # second section code is in container's body
+ "skip_body_listing": False,
+ # code input bytes not listed in container's body
+ "skip_types_body_listing": False,
+ # code input bytes size not added to types section size
+ "skip_types_header_listing": False,
"expected_code": "",
"expected_exception": None,
},
@@ -30,7 +34,8 @@
{
"skip_header_listing": True,
"skip_body_listing": True,
- "expected_code": "ef00010100080200010003ff00040000800001000000003050000bad60A7", # noqa: E501
+ "expected_code": "ef00010100080200010003ff0004000080000"
+ "1000000003050000bad60A7",
"expected_exception": [
EOFException.INVALID_TYPE_SECTION_SIZE,
EOFException.INVALID_SECTION_BODIES_SIZE,
@@ -42,7 +47,8 @@
{
"skip_header_listing": True,
"skip_body_listing": False,
- "expected_code": "ef00010100080200010003ff00040000800001000000003050003050000bad60A7", # noqa: E501
+ "expected_code": "ef00010100080200010003ff0004000080000"
+ "1000000003050003050000bad60A7",
"expected_exception": [
EOFException.INVALID_TYPE_SECTION_SIZE,
EOFException.INVALID_SECTION_BODIES_SIZE,
@@ -54,7 +60,8 @@
{
"skip_header_listing": False,
"skip_body_listing": True,
- "expected_code": "ef000101000802000200030003ff00040000800001000000003050000bad60A7", # noqa: E501
+ "expected_code": "ef000101000802000200030003ff000400008"
+ "00001000000003050000bad60A7",
"expected_exception": [
EOFException.UNREACHABLE_CODE_SECTIONS,
EOFException.TOPLEVEL_CONTAINER_TRUNCATED,
@@ -66,7 +73,8 @@
{
"skip_header_listing": False,
"skip_body_listing": False,
- "expected_code": "ef000101000802000200030003ff00040000800001000000003050003050000bad60A7", # noqa: E501
+ "expected_code": "ef000101000802000200030003ff000400008"
+ "00001000000003050003050000bad60A7",
"expected_exception": EOFException.UNREACHABLE_CODE_SECTIONS,
},
id="layout_ok_code_bad",
@@ -125,7 +133,8 @@ def test_code_section_header_body_mismatch(
skip_body_listing=skip_body_listing,
# whether to not print its input bytes in containers body
skip_types_body_listing=skip_types_body_listing,
- # whether to not calculate its input bytes size in types section's header
+ # whether to not calculate its input bytes size in types
+ # section's header
skip_types_header_listing=skip_types_header_listing,
),
Section.Data("0x0bad60A7"),
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_order.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_order.py
index 8e5f7c6da0d..aae8c4af0b5 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_order.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_order.py
@@ -36,7 +36,9 @@ class CasePosition(Enum):
def get_expected_code_exception(
section_kind, section_test, test_position
) -> tuple[str, EOFExceptionInstanceOrList | None]:
- """Verification vectors with code and exception based on test combinations."""
+ """
+ Verification vectors with code and exception based on test combinations.
+ """
match (section_kind, section_test, test_position):
case (SectionKind.TYPE, SectionTest.MISSING, CasePosition.HEADER):
return (
@@ -64,7 +66,8 @@ def get_expected_code_exception(
case (SectionKind.TYPE, SectionTest.WRONG_ORDER, CasePosition.BODY):
return (
"ef00010100040200010003ff00010030500000800001ef",
- # TODO why invalid first section type? it should say that the body incorrect
+ # TODO why invalid first section type? it should say that the
+ # body incorrect
EOFException.INVALID_FIRST_SECTION_TYPE,
)
case (SectionKind.TYPE, SectionTest.WRONG_ORDER, CasePosition.BODY_AND_HEADER):
@@ -80,8 +83,8 @@ def get_expected_code_exception(
case (SectionKind.CODE, SectionTest.MISSING, CasePosition.BODY):
return (
"ef00010100040200010003ff00010000800001ef",
- # TODO should be an exception of empty code bytes, because it can understand that
- # last byte is data section byte
+ # TODO should be an exception of empty code bytes, because it
+ # can understand that last byte is data section byte
[EOFException.INVALID_SECTION_BODIES_SIZE, EOFException.UNEXPECTED_HEADER_KIND],
)
case (SectionKind.CODE, SectionTest.MISSING, CasePosition.BODY_AND_HEADER):
@@ -230,17 +233,17 @@ def test_container_section_order(
test_position: CasePosition,
):
"""
- Test containers section being out of order in the header and/or body.
- This extends and follows the convention of the test_section_order()
- for the optional container section.
+ Test containers section being out of order in the header and/or body. This
+ extends and follows the convention of the test_section_order() for the
+ optional container section.
"""
if container_position == 2:
pytest.skip("Skip valid container section position")
section_code = Section.Code(
code=Op.EOFCREATE[0](0, 0, 0, 0)
- # TODO: Migrated tests had the following infinite loop, so it is kept here
- # to equalize code coverage.
+ # TODO: Migrated tests had the following infinite loop, so it is kept
+ # here to equalize code coverage.
+ Op.RJUMP[0]
+ Op.STOP()
)
@@ -270,7 +273,8 @@ def get_expected_exception():
return EOFException.INVALID_FIRST_SECTION_TYPE
case 1, CasePosition.BODY: # Messes up with the code section
return EOFException.UNDEFINED_INSTRUCTION
- case 3, CasePosition.BODY: # Data section messes up with the container section
+ case 3, CasePosition.BODY: # Data section messes up with the
+ # container section
return EOFException.INVALID_MAGIC
case 0, CasePosition.HEADER | CasePosition.BODY_AND_HEADER:
return EOFException.MISSING_TYPE_HEADER
diff --git a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_size.py b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_size.py
index 6fecfe17996..f5ea074b851 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_size.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_section_size.py
@@ -120,7 +120,9 @@ def test_section_size(
section_kind: SectionKind,
exception: EOFExceptionInstanceOrList,
):
- """Test custom_size is auto, more or less than the actual size of the section."""
+ """
+ Test custom_size is auto, more or less than the actual size of the section.
+ """
eof_code = Container()
if section_size != SectionSize.NORMAL and section_kind == SectionKind.TYPE:
@@ -183,8 +185,8 @@ def test_section_size(
@pytest.mark.parametrize(
"truncation_len, exception",
[
- # The original container is not valid by itself because its 2-byte code section
- # starts with the terminating instruction: INVALID.
+ # The original container is not valid by itself because its 2-byte code
+ # section starts with the terminating instruction: INVALID.
pytest.param(0, EOFException.UNREACHABLE_INSTRUCTIONS),
pytest.param(1, EOFException.INVALID_SECTION_BODIES_SIZE, id="EOF1_truncated_section_2"),
pytest.param(3, EOFException.INVALID_SECTION_BODIES_SIZE, id="EOF1_truncated_section_1"),
@@ -198,7 +200,8 @@ def test_truncated_container_without_data(
):
"""
Test takes a semi-valid container and removes some bytes from its tail.
- Migrated from EOFTests/efValidation/EOF1_truncated_section_.json (cases without data section).
+ Migrated from EOFTests/efValidation/EOF1_truncated_section_.json (cases
+ without data section).
"""
container = Container(sections=[Section.Code(Op.INVALID + Op.INVALID)])
bytecode = bytes(container)
@@ -222,8 +225,9 @@ def test_truncated_container_with_data(
exception: EOFException,
):
"""
- Test takes a valid container with data and removes some bytes from its tail.
- Migrated from EOFTests/efValidation/EOF1_truncated_section_.json (cases with data section).
+ Test takes a valid container with data and removes some bytes from its
+ tail. Migrated from EOFTests/efValidation/EOF1_truncated_section_.json
+ (cases with data section).
"""
data = b"\xaa\xbb"
container = Container(
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/__init__.py
index 4eef21b71a5..f08cf8e60ec 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/__init__.py
@@ -1,5 +1,7 @@
"""
-abstract: Test cases for [EIP-4200: EOF - Static relative jumps](https://eips.ethereum.org/EIPS/eip-4200)
- EIP-4200 replaces dynamic jump instructions with relative jump offsets for improved control flow predictability.
- Opcodes introduced: `RJUMP` (`0xE0`), `RJUMPI` (`0xE1`), `RJUMPV` (`0xE2`).
-""" # noqa: E501
+Test cases for [EIP-4200: EOF - Static relative jumps](https://eips.ethereum.org/EIPS/eip-4200).
+
+EIP-4200 replaces dynamic jump instructions with relative jump offsets for
+improved control flow predictability. Opcodes introduced: `RJUMP` (`0xE0`),
+`RJUMPI` (`0xE1`), `RJUMPV` (`0xE2`).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjump.py b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjump.py
index 0a2e407cc5a..490bea52976 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjump.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjump.py
@@ -39,7 +39,9 @@ def test_rjump_negative(
def test_rjump_positive_negative(
eof_state_test: EOFStateTestFiller,
):
- """EOF1V4200_0001 (Valid) EOF code containing RJUMP (Positive, Negative)."""
+ """
+ EOF1V4200_0001 (Valid) EOF code containing RJUMP (Positive, Negative).
+ """
eof_state_test(
container=Container.Code(
Op.PUSH0
@@ -68,7 +70,10 @@ def test_rjump_zero(
def test_rjump_maxes(
eof_state_test: EOFStateTestFiller,
):
- """EOF1V4200_0003 EOF with RJUMP containing the max positive and negative offset (32767)."""
+ """
+ EOF1V4200_0003 EOF with RJUMP containing the max positive and negative
+ offset (32767).
+ """
eof_state_test(
container=Container.Code(
Op.PUSH0
@@ -87,8 +92,8 @@ def test_rjump_max_bytecode_size(
eof_test: EOFTestFiller,
):
"""
- EOF1V4200_0003 EOF with RJUMP containing the maximum offset that does not exceed the maximum
- bytecode size.
+ EOF1V4200_0003 EOF with RJUMP containing the maximum offset that does not
+ exceed the maximum bytecode size.
"""
noop_count = MAX_BYTECODE_SIZE - 27
code = (
@@ -128,8 +133,8 @@ def test_rjump_into_header(
offset: int,
):
"""
- EOF1I4200_0003 (Invalid) EOF code containing RJUMP with target outside code bounds
- (Jumping into header).
+ EOF1I4200_0003 (Invalid) EOF code containing RJUMP with target outside code
+ bounds (Jumping into header).
"""
eof_test(
container=Container.Code(Op.RJUMP[offset]),
@@ -141,8 +146,8 @@ def test_rjump_before_header(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0004 (Invalid) EOF code containing RJUMP with target outside code bounds
- (Jumping before code begin).
+ EOF1I4200_0004 (Invalid) EOF code containing RJUMP with target outside code
+ bounds (Jumping before code begin).
"""
eof_test(
container=Container.Code(Op.RJUMP[-23]),
@@ -154,8 +159,8 @@ def test_rjump_into_data(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0005 (Invalid) EOF code containing RJUMP with target outside code bounds
- (Jumping into data section).
+ EOF1I4200_0005 (Invalid) EOF code containing RJUMP with target outside code
+ bounds (Jumping into data section).
"""
eof_test(
container=Container(
@@ -171,7 +176,10 @@ def test_rjump_into_data(
def test_rjump_outside_other_section_before(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target outside code bounds (prior code section)."""
+ """
+ EOF code containing RJUMP with target outside code bounds (prior code
+ section).
+ """
eof_test(
container=Container(
sections=[
@@ -186,7 +194,10 @@ def test_rjump_outside_other_section_before(
def test_rjump_outside_other_section_after(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target outside code bounds (Subsequent code section)."""
+ """
+ EOF code containing RJUMP with target outside code bounds (Subsequent code
+ section).
+ """
eof_test(
container=Container(
sections=[
@@ -203,8 +214,8 @@ def test_rjump_after_container(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0006 (Invalid) EOF code containing RJUMP with target outside code bounds
- (Jumping after code end).
+ EOF1I4200_0006 (Invalid) EOF code containing RJUMP with target outside code
+ bounds (Jumping after code end).
"""
eof_test(
container=Container.Code(Op.RJUMP[2]),
@@ -216,8 +227,8 @@ def test_rjump_to_code_end(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0007 (Invalid) EOF code containing RJUMP with target outside code bounds
- (Jumping to code end).
+ EOF1I4200_0007 (Invalid) EOF code containing RJUMP with target outside code
+ bounds (Jumping to code end).
"""
eof_test(
container=Container.Code(Op.RJUMP[1] + Op.STOP),
@@ -230,7 +241,10 @@ def test_rjump_into_self_data_portion(
eof_test: EOFTestFiller,
offset: int,
):
- """EOF1I4200_0008 (Invalid) EOF code containing RJUMP with target self RJUMP immediate."""
+ """
+ EOF1I4200_0008 (Invalid) EOF code containing RJUMP with target self RJUMP
+ immediate.
+ """
eof_test(
container=Container.Code(Op.RJUMP[-offset] + Op.STOP),
expect_exception=EOFException.INVALID_RJUMP_DESTINATION,
@@ -241,8 +255,8 @@ def test_rjump_into_self_remaining_code(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0008 (Invalid) EOF code containing RJUMP with target self RJUMP but remaining
- unreachable code.
+ EOF1I4200_0008 (Invalid) EOF code containing RJUMP with target self RJUMP
+ but remaining unreachable code.
"""
eof_test(
container=Container.Code(Op.RJUMP[-len(Op.RJUMP[0])] + Op.STOP),
@@ -269,7 +283,10 @@ def test_rjump_into_self(
def test_rjump_into_self_pre_code(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target self RJUMP with non-zero stack before RJUMP."""
+ """
+ EOF code containing RJUMP with target self RJUMP with non-zero stack before
+ RJUMP.
+ """
eof_test(
container=Container.Code(Op.PUSH1[0] + Op.RJUMP[-len(Op.RJUMP[0])]),
)
@@ -455,8 +472,8 @@ def test_rjump_valid_forward(
container: Container,
):
"""
- Validate a valid code section containing at least one forward RJUMP.
- These tests exercise the stack height validation.
+ Validate a valid code section containing at least one forward RJUMP. These
+ tests exercise the stack height validation.
"""
eof_test(container=container)
@@ -551,8 +568,8 @@ def test_rjump_valid_backward(
container: Container,
):
"""
- Validate a valid code section containing at least one backward RJUMP.
- These tests exercise the stack height validation.
+ Validate a valid code section containing at least one backward RJUMP. These
+ tests exercise the stack height validation.
"""
eof_test(container=container)
@@ -560,7 +577,10 @@ def test_rjump_valid_backward(
def test_rjump_into_stack_height_diff(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target instruction that causes stack height difference."""
+ """
+ EOF code containing RJUMP with target instruction that causes stack height
+ difference.
+ """
eof_test(
container=Container.Code(Op.PUSH1[0] + Op.RJUMP[-(len(Op.RJUMP[0]) + len(Op.PUSH1[0]))]),
expect_exception=EOFException.STACK_HEIGHT_MISMATCH,
@@ -570,7 +590,10 @@ def test_rjump_into_stack_height_diff(
def test_rjump_into_stack_height_diff_2(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target instruction that cause stack height difference."""
+ """
+ EOF code containing RJUMP with target instruction that cause stack height
+ difference.
+ """
eof_test(
container=Container.Code(
Op.PUSH1[0] + Op.POP + Op.RJUMP[-(len(Op.RJUMP[0]) + len(Op.POP))]
@@ -757,8 +780,8 @@ def test_rjump_backward_invalid_max_stack_height(
container: Container,
):
"""
- Validate a code section containing at least one backward RJUMP
- invalid because of the incorrect max stack height.
+ Validate a code section containing at least one backward RJUMP invalid
+ because of the incorrect max stack height.
"""
eof_test(container=container, expect_exception=EOFException.STACK_HEIGHT_MISMATCH)
@@ -766,7 +789,10 @@ def test_rjump_backward_invalid_max_stack_height(
def test_rjump_into_stack_underflow(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMP with target instruction that cause stack underflow."""
+ """
+ EOF code containing RJUMP with target instruction that cause stack
+ underflow.
+ """
eof_test(
container=Container.Code(
Op.ORIGIN
@@ -783,7 +809,10 @@ def test_rjump_into_stack_underflow(
def test_rjump_into_rjump(
eof_test: EOFTestFiller,
):
- """EOF1I4200_0009 (Invalid) EOF code containing RJUMP with target other RJUMP immediate."""
+ """
+ EOF1I4200_0009 (Invalid) EOF code containing RJUMP with target other RJUMP
+ immediate.
+ """
eof_test(
container=Container.Code(Op.RJUMP[1] + Op.RJUMP[0]),
expect_exception=EOFException.INVALID_RJUMP_DESTINATION,
@@ -793,7 +822,10 @@ def test_rjump_into_rjump(
def test_rjump_into_rjumpi(
eof_test: EOFTestFiller,
):
- """EOF1I4200_0010 (Invalid) EOF code containing RJUMP with target RJUMPI immediate."""
+ """
+ EOF1I4200_0010 (Invalid) EOF code containing RJUMP with target RJUMPI
+ immediate.
+ """
eof_test(
container=Container.Code(Op.RJUMP[5] + Op.STOP + Op.PUSH1[1] + Op.RJUMPI[-6] + Op.STOP),
expect_exception=EOFException.INVALID_RJUMP_DESTINATION,
@@ -802,7 +834,10 @@ def test_rjump_into_rjumpi(
@pytest.mark.parametrize("jump", [JumpDirection.FORWARD, JumpDirection.BACKWARD])
def test_rjump_into_push_1(eof_test: EOFTestFiller, jump: JumpDirection):
- """EOF1I4200_0011 (Invalid) EOF code containing RJUMP with target PUSH1 immediate."""
+ """
+ EOF1I4200_0011 (Invalid) EOF code containing RJUMP with target PUSH1
+ immediate.
+ """
code = (
Op.PUSH1[1] + Op.RJUMP[-4] if jump == JumpDirection.BACKWARD else Op.RJUMP[1] + Op.PUSH1[1]
) + Op.STOP
@@ -860,7 +895,10 @@ def test_rjump_into_push_n(
jump: JumpDirection,
data_portion_end: bool,
):
- """EOF1I4200_0011 (Invalid) EOF code containing RJUMP with target PUSH2+ immediate."""
+ """
+ EOF1I4200_0011 (Invalid) EOF code containing RJUMP with target PUSH2+
+ immediate.
+ """
data_portion_length = int.from_bytes(opcode, byteorder="big") - 0x5F
if jump == JumpDirection.FORWARD:
offset = data_portion_length if data_portion_end else 1
@@ -885,7 +923,10 @@ def test_rjump_into_rjumpv(
target_rjumpv_table_size: int,
data_portion_end: bool,
):
- """EOF1I4200_0012 (Invalid) EOF code containing RJUMP with target RJUMPV immediate."""
+ """
+ EOF1I4200_0012 (Invalid) EOF code containing RJUMP with target RJUMPV
+ immediate.
+ """
invalid_destination = 4 + (2 * target_rjumpv_table_size) if data_portion_end else 4
target_jump_table = [0 for _ in range(target_rjumpv_table_size)]
eof_test(
@@ -909,7 +950,10 @@ def test_rjump_into_callf(
eof_test: EOFTestFiller,
data_portion_end: bool,
):
- """EOF1I4200_0013 (Invalid) EOF code containing RJUMP with target CALLF immediate."""
+ """
+ EOF1I4200_0013 (Invalid) EOF code containing RJUMP with target CALLF
+ immediate.
+ """
invalid_destination = 2 if data_portion_end else 1
eof_test(
container=Container(
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpi.py b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpi.py
index 3fa0e7f3f65..dd1647eb95a 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpi.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpi.py
@@ -129,7 +129,9 @@ def test_rjumpi_condition_zero(
pre: Alloc,
calldata: bytes,
):
- """Test RJUMPI contract switching based on external input (condition zero)."""
+ """
+ Test RJUMPI contract switching based on external input (condition zero).
+ """
env = Environment()
sender = pre.fund_eoa(10**18)
contract_address = pre.deploy_contract(
@@ -221,7 +223,10 @@ def test_rjumpi_zero(
def test_rjumpi_max_forward(
eof_state_test: EOFStateTestFiller,
):
- """EOF1V4200_0007 (Valid) EOF with RJUMPI containing the maximum offset (32767)."""
+ """
+ EOF1V4200_0007 (Valid) EOF with RJUMPI containing the maximum offset
+ (32767).
+ """
eof_state_test(
container=Container(
sections=[
@@ -730,8 +735,8 @@ def test_rjumpi_valid_forward(
container: Container,
):
"""
- Validate a valid code section containing at least one forward RJUMPI.
- These tests exercise the stack height validation.
+ Validate a valid code section containing at least one forward RJUMPI. These
+ tests exercise the stack height validation.
"""
eof_test(container=container)
@@ -909,8 +914,8 @@ def test_rjumpi_max_bytecode_size(
eof_test: EOFTestFiller,
):
"""
- EOF1V4200_0003 EOF with RJUMPI containing the maximum offset that does not exceed the maximum
- bytecode size.
+ EOF1V4200_0003 EOF with RJUMPI containing the maximum offset that does not
+ exceed the maximum bytecode size.
"""
noop_count = MAX_BYTECODE_SIZE - 24
code = Op.RJUMPI[len(Op.NOOP) * noop_count](Op.ORIGIN) + (Op.NOOP * noop_count) + Op.STOP
@@ -957,8 +962,8 @@ def test_rjumpi_into_header(
offset: int,
):
"""
- EOF1I4200_0016 (Invalid) EOF code containing RJUMPI with target outside code bounds
- (Jumping into header).
+ EOF1I4200_0016 (Invalid) EOF code containing RJUMPI with target outside
+ code bounds (Jumping into header).
"""
eof_test(
container=Container(
@@ -976,8 +981,8 @@ def test_rjumpi_jump_before_header(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0017 (Invalid) EOF code containing RJUMPI with target outside code bounds
- (Jumping to before code begin).
+ EOF1I4200_0017 (Invalid) EOF code containing RJUMPI with target outside
+ code bounds (Jumping to before code begin).
"""
eof_test(
container=Container(
@@ -995,8 +1000,8 @@ def test_rjumpi_into_data(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0018 (Invalid) EOF code containing RJUMPI with target outside code bounds
- (Jumping into data section).
+ EOF1I4200_0018 (Invalid) EOF code containing RJUMPI with target outside
+ code bounds (Jumping into data section).
"""
eof_test(
container=Container(
@@ -1015,8 +1020,8 @@ def test_rjumpi_after_container(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0019 (Invalid) EOF code containing RJUMPI with target outside code bounds
- (Jumping to after code end).
+ EOF1I4200_0019 (Invalid) EOF code containing RJUMPI with target outside
+ code bounds (Jumping to after code end).
"""
eof_test(
container=Container(
@@ -1034,8 +1039,8 @@ def test_rjumpi_to_code_end(
eof_test: EOFTestFiller,
):
"""
- EOF1I4200_0020 (Invalid) EOF code containing RJUMPI with target outside code bounds
- (Jumping to code end).
+ EOF1I4200_0020 (Invalid) EOF code containing RJUMPI with target outside
+ code bounds (Jumping to code end).
"""
eof_test(
container=Container(
@@ -1055,8 +1060,8 @@ def test_rjumpi_into_self_data_portion(
offset: int,
):
"""
- EOF1I4200_0021 (Invalid) EOF code containing RJUMPI with target same RJUMPI immediate
- (with offset).
+ EOF1I4200_0021 (Invalid) EOF code containing RJUMPI with target same RJUMPI
+ immediate (with offset).
"""
eof_test(
container=Container(
@@ -1076,8 +1081,8 @@ def test_rjumpi_into_self(
stack_height_spread: int,
):
"""
- EOF code containing RJUMPI targeting itself (-3).
- This can never be valid because this is backward jump and RJUMPI consumes one stack item.
+ EOF code containing RJUMPI targeting itself (-3). This can never be valid
+ because this is backward jump and RJUMPI consumes one stack item.
"""
# Create variadic stack height by the parametrized spread.
stack_spread_code = Bytecode()
@@ -1099,7 +1104,10 @@ def test_rjumpi_into_self(
def test_rjumpi_into_stack_height_diff(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMPI with target instruction that causes stack height difference."""
+ """
+ EOF code containing RJUMPI with target instruction that causes stack height
+ difference.
+ """
eof_test(
container=Container(
sections=[
@@ -1118,7 +1126,10 @@ def test_rjumpi_into_stack_height_diff(
def test_rjumpi_into_stack_underflow(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMPI with target instruction that cause stack underflow."""
+ """
+ EOF code containing RJUMPI with target instruction that cause stack
+ underflow.
+ """
eof_test(
container=Container(
sections=[
@@ -1134,7 +1145,10 @@ def test_rjumpi_into_stack_underflow(
def test_rjumpi_skips_stack_underflow(
eof_test: EOFTestFiller,
):
- """EOF code containing RJUMPI where the default path produces a stack underflow."""
+ """
+ EOF code containing RJUMPI where the default path produces a stack
+ underflow.
+ """
eof_test(
container=Container(
sections=[
@@ -1148,7 +1162,10 @@ def test_rjumpi_skips_stack_underflow(
def test_rjumpi_into_rjump(
eof_test: EOFTestFiller,
):
- """EOF1I4200_0023 (Invalid) EOF code containing RJUMPI with target RJUMP immediate."""
+ """
+ EOF1I4200_0023 (Invalid) EOF code containing RJUMPI with target RJUMP
+ immediate.
+ """
eof_test(
container=Container(
sections=[
@@ -1164,7 +1181,10 @@ def test_rjumpi_into_rjump(
def test_rjumpi_into_rjumpi(
eof_test: EOFTestFiller,
):
- """EOF1I4200_0022 (Invalid) EOF code containing RJUMPI with target other RJUMPI immediate."""
+ """
+ EOF1I4200_0022 (Invalid) EOF code containing RJUMPI with target other
+ RJUMPI immediate.
+ """
eof_test(
container=Container(
sections=[
@@ -1187,7 +1207,10 @@ def test_rjumpi_into_push_1(
eof_test: EOFTestFiller,
jump: JumpDirection,
):
- """EOF1I4200_0024 (Invalid) EOF code containing RJUMPI with target PUSH1 immediate."""
+ """
+ EOF1I4200_0024 (Invalid) EOF code containing RJUMPI with target PUSH1
+ immediate.
+ """
code = (
Op.PUSH1[1] + Op.RJUMPI[-4]
if jump == JumpDirection.BACKWARD
@@ -1251,7 +1274,10 @@ def test_rjumpi_into_push_n(
jump: JumpDirection,
data_portion_end: bool,
):
- """EOF1I4200_0024 (Invalid) EOF code containing RJUMPI with target PUSH2+ immediate."""
+ """
+ EOF1I4200_0024 (Invalid) EOF code containing RJUMPI with target PUSH2+
+ immediate.
+ """
data_portion_length = int.from_bytes(opcode, byteorder="big") - 0x5F
if jump == JumpDirection.FORWARD:
offset = data_portion_length if data_portion_end else 1
@@ -1280,7 +1306,10 @@ def test_rjumpi_into_rjumpv(
target_rjumpv_table_size: int,
data_portion_end: bool,
):
- """EOF1I4200_0025 (Invalid) EOF code containing RJUMPI with target RJUMPV immediate."""
+ """
+ EOF1I4200_0025 (Invalid) EOF code containing RJUMPI with target RJUMPV
+ immediate.
+ """
invalid_destination = 4 + (2 * target_rjumpv_table_size) if data_portion_end else 4
target_jump_table = [0 for _ in range(target_rjumpv_table_size)]
eof_test(
@@ -1309,7 +1338,10 @@ def test_rjumpi_into_callf(
eof_test: EOFTestFiller,
data_portion_end: bool,
):
- """EOF1I4200_0026 (Invalid) EOF code containing RJUMPI with target CALLF immediate."""
+ """
+ EOF1I4200_0026 (Invalid) EOF code containing RJUMPI with target CALLF
+ immediate.
+ """
invalid_destination = 2 if data_portion_end else 1
eof_test(
container=Container(
@@ -1474,8 +1506,8 @@ def test_rjumpi_stack_validation(
):
"""
Check that you can get to the same opcode with two different stack heights
- Spec now allows this:
- 4.b in https://github.com/ipsilon/eof/blob/main/spec/eof.md#stack-validation.
+ Spec now allows this: 4.b in
+ https://github.com/ipsilon/eof/blob/main/spec/eof.md#stack-validation.
"""
container = Container.Code(code=Op.RJUMPI[1](1) + Op.ADDRESS + Op.NOOP + Op.STOP)
eof_test(
@@ -1490,7 +1522,8 @@ def test_rjumpi_at_the_end(
"""
Test invalid RJUMPI as the end of a code section.
https://github.com/ipsilon/eof/blob/main/spec/eof.md#stack-validation 4.i:
- This implies that the last instruction must be a terminating instruction or RJUMP.
+ This implies that the last instruction must be a terminating instruction or
+ RJUMP.
"""
eof_test(
container=Container(
@@ -1611,8 +1644,8 @@ def test_double_rjumpi_stack_height_mismatch(
eof_test: EOFTestFiller,
):
"""
- Test stack height check of the backward RJUMP
- targeted by two RJUMPIs with the non-uniform stack height range.
+ Test stack height check of the backward RJUMP targeted by two RJUMPIs with
+ the non-uniform stack height range.
"""
eof_test(
container=Container(
@@ -1635,8 +1668,8 @@ def test_double_rjumpi_invalid_max_stack_height(
eof_test: EOFTestFiller,
):
"""
- Test max stack height of the final block
- targeted by two RJUMPIs with the non-uniform stack height range.
+ Test max stack height of the final block targeted by two RJUMPIs with the
+ non-uniform stack height range.
"""
eof_test(
container=Container(
@@ -1848,7 +1881,7 @@ def test_rjumpi_backward_invalid_max_stack_height(
container: Container,
):
"""
- Validate a code section containing at least one backward RJUMPI
- invalid because of the incorrect max stack height.
+ Validate a code section containing at least one backward RJUMPI invalid
+ because of the incorrect max stack height.
"""
eof_test(container=container, expect_exception=EOFException.STACK_HEIGHT_MISMATCH)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpv.py b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpv.py
index 7fe59d8ffaa..6d32799aaed 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpv.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4200_relative_jumps/test_rjumpv.py
@@ -200,7 +200,10 @@ def test_rjumpv_full_table(
eof_state_test: EOFStateTestFiller,
target: int,
):
- """EOF1V4200_0012/13/14/15 (Valid) EOF with RJUMPV table size 256 (target parameterized)."""
+ """
+ EOF1V4200_0012/13/14/15 (Valid) EOF with RJUMPV table size 256 (target
+ parameterized).
+ """
eof_state_test(
container=Container(
sections=[
@@ -220,7 +223,10 @@ def test_rjumpv_full_table(
def test_rjumpv_max_forwards(
eof_state_test: EOFStateTestFiller,
):
- """EOF1V4200_0016 (Valid) EOF with RJUMPV containing the maximum offset (32767)."""
+ """
+ EOF1V4200_0016 (Valid) EOF with RJUMPV containing the maximum offset
+ (32767).
+ """
eof_state_test(
container=Container(
sections=[
@@ -240,7 +246,10 @@ def test_rjumpv_max_forwards(
def test_rjumpv_truncated_empty(
eof_test: EOFTestFiller,
):
- """EOF1I4200_0027 (Invalid) EOF code containing RJUMPV with max_index 0 but no immediates."""
+ """
+ EOF1I4200_0027 (Invalid) EOF code containing RJUMPV with max_index 0 but no
+ immediates.
+ """
eof_test(
container=Container(
sections=[
@@ -290,8 +299,8 @@ def test_rjumpv_into_header(
invalid_index: int,
):
"""
- EOF1I4200_0031 (Invalid) EOF code containing RJUMPV with target outside code bounds
- (Jumping into header).
+ EOF1I4200_0031 (Invalid) EOF code containing RJUMPV with target outside
+ code bounds (Jumping into header).
"""
invalid_destination = -5 - (2 * table_size)
jump_table = [0 for _ in range(table_size)]
@@ -324,8 +333,8 @@ def test_rjumpv_before_container(
offset: int,
):
"""
- EOF1I4200_0032 (Invalid) EOF code containing RJUMPV with target outside code bounds
- (Jumping to before code begin).
+ EOF1I4200_0032 (Invalid) EOF code containing RJUMPV with target outside
+ code bounds (Jumping to before code begin).
"""
invalid_destination = offset - (2 * table_size)
jump_table = [0 for _ in range(table_size)]
@@ -356,8 +365,8 @@ def test_rjumpv_into_data(
invalid_index: int,
):
"""
- EOF1I4200_0033 (Invalid) EOF code containing RJUMPV with target outside code bounds
- (Jumping into data section).
+ EOF1I4200_0033 (Invalid) EOF code containing RJUMPV with target outside
+ code bounds (Jumping into data section).
"""
invalid_destination = 2
jump_table = [0 for _ in range(table_size)]
@@ -389,8 +398,8 @@ def test_rjumpv_after_container(
invalid_index: int,
):
"""
- EOF1I4200_0034 (Invalid) EOF code containing RJUMPV with target outside code bounds
- (Jumping to after code end).
+ EOF1I4200_0034 (Invalid) EOF code containing RJUMPV with target outside
+ code bounds (Jumping to after code end).
"""
invalid_destination = 2
jump_table = [0 for _ in range(table_size)]
@@ -421,8 +430,8 @@ def test_rjumpv_at_end(
invalid_index: int,
):
"""
- EOF1I4200_0035 (Invalid) EOF code containing RJUMPV with target outside code bounds
- (Jumping to code end).
+ EOF1I4200_0035 (Invalid) EOF code containing RJUMPV with target outside
+ code bounds (Jumping to code end).
"""
invalid_destination = 1
jump_table = [0 for _ in range(table_size)]
@@ -458,7 +467,10 @@ def test_rjumpv_into_self_data_portion(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0036 (Invalid) EOF code containing RJUMPV with target same RJUMPV immediate."""
+ """
+ EOF1I4200_0036 (Invalid) EOF code containing RJUMPV with target same RJUMPV
+ immediate.
+ """
invalid_destination = -1 if data_portion_end else -(2 * table_size) - 1
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = invalid_destination
@@ -490,8 +502,8 @@ def test_rjumpv_into_self(
stack_height_spread: int,
):
"""
- EOF code containing RJUMPV targeting itself.
- This can never be valid because this is backward jump and RJUMPV consumes one stack item.
+ EOF code containing RJUMPV targeting itself. This can never be valid
+ because this is backward jump and RJUMPV consumes one stack item.
"""
# Create variadic stack height by the parametrized spread.
stack_spread_code = Bytecode()
@@ -527,7 +539,10 @@ def test_rjumpv_into_stack_height_diff(
table_size: int,
invalid_index: int,
):
- """EOF code containing RJUMPV with target instruction that causes stack height difference."""
+ """
+ EOF code containing RJUMPV with target instruction that causes stack height
+ difference.
+ """
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = -(len(Op.RJUMPV[jump_table]) + len(Op.PUSH1[0]) + len(Op.PUSH1[0]))
@@ -556,7 +571,10 @@ def test_rjumpv_into_stack_underflow(
table_size: int,
invalid_index: int,
):
- """EOF code containing RJUMPV with target instruction that cause stack underflow."""
+ """
+ EOF code containing RJUMPV with target instruction that cause stack
+ underflow.
+ """
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = 1
eof_test(
@@ -580,7 +598,10 @@ def test_rjumpv_skips_stack_underflow(
eof_test: EOFTestFiller,
table_size: int,
):
- """EOF code containing RJUMPV where the default path produces a stack underflow."""
+ """
+ EOF code containing RJUMPV where the default path produces a stack
+ underflow.
+ """
jump_table = [1 for _ in range(table_size)]
eof_test(
container=Container(
@@ -611,7 +632,10 @@ def test_rjumpv_into_rjump(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0037 (Invalid) EOF code containing RJUMPV with target RJUMP immediate."""
+ """
+ EOF1I4200_0037 (Invalid) EOF code containing RJUMPV with target RJUMP
+ immediate.
+ """
invalid_destination = 3 if data_portion_end else 2
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = invalid_destination
@@ -651,7 +675,10 @@ def test_rjumpv_into_rjumpi(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0038 (Invalid) EOF code containing RJUMPV with target RJUMPI immediate."""
+ """
+ EOF1I4200_0038 (Invalid) EOF code containing RJUMPV with target RJUMPI
+ immediate.
+ """
invalid_destination = 5 if data_portion_end else 4
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = invalid_destination
@@ -692,7 +719,10 @@ def test_rjumpv_into_push_1(
table_size: int,
invalid_index: int,
):
- """EOF1I4200_0039 (Invalid) EOF code containing RJUMPV with target PUSH1 immediate."""
+ """
+ EOF1I4200_0039 (Invalid) EOF code containing RJUMPV with target PUSH1
+ immediate.
+ """
if jump == JumpDirection.FORWARD:
invalid_destination = 2
jump_table = [0 for _ in range(table_size)]
@@ -777,7 +807,10 @@ def test_rjumpv_into_push_n(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0039 (Invalid) EOF code containing RJUMPV with target PUSHN immediate."""
+ """
+ EOF1I4200_0039 (Invalid) EOF code containing RJUMPV with target PUSHN
+ immediate.
+ """
data_portion_length = int.from_bytes(opcode, byteorder="big") - 0x5F
if jump == JumpDirection.FORWARD:
invalid_destination = data_portion_length + 1 if data_portion_end else 2
@@ -830,7 +863,10 @@ def test_rjumpv_into_rjumpv(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0040 (Invalid) EOF code containing RJUMPV with target other RJUMPV immediate."""
+ """
+ EOF1I4200_0040 (Invalid) EOF code containing RJUMPV with target other
+ RJUMPV immediate.
+ """
invalid_destination = 4 + (2 * target_table_size) if data_portion_end else 4
source_jump_table = [0 for _ in range(source_table_size)]
source_jump_table[invalid_index] = invalid_destination
@@ -871,7 +907,10 @@ def test_rjumpv_into_callf(
invalid_index: int,
data_portion_end: bool,
):
- """EOF1I4200_0041 (Invalid) EOF code containing RJUMPV with target CALLF immediate."""
+ """
+ EOF1I4200_0041 (Invalid) EOF code containing RJUMPV with target CALLF
+ immediate.
+ """
invalid_destination = 2 if data_portion_end else 1
jump_table = [0 for _ in range(table_size)]
jump_table[invalid_index] = invalid_destination
@@ -1104,7 +1143,8 @@ def test_rjumpv_at_the_end(
):
"""
https://github.com/ipsilon/eof/blob/main/spec/eof.md#stack-validation 4.i:
- This implies that the last instruction may be a terminating instruction or RJUMPV.
+ This implies that the last instruction may be a terminating instruction or
+ RJUMPV.
"""
eof_test(
container=Container(
@@ -1546,8 +1586,8 @@ def test_rjumpv_valid_forward(
container: Container,
):
"""
- Validate a valid code section containing at least one forward RJUMPV.
- These tests exercise the stack height validation.
+ Validate a valid code section containing at least one forward RJUMPV. These
+ tests exercise the stack height validation.
"""
eof_test(container=container)
@@ -1849,7 +1889,7 @@ def test_rjumpv_backward_invalid_max_stack_height(
container: Container,
):
"""
- Validate a code section containing at least one backward RJUMPV
- invalid because of the incorrect max stack height.
+ Validate a code section containing at least one backward RJUMPV invalid
+ because of the incorrect max stack height.
"""
eof_test(container=container, expect_exception=EOFException.STACK_HEIGHT_MISMATCH)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/__init__.py
index 20c62ac7526..3889e560814 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/__init__.py
@@ -1,5 +1,7 @@
"""
-abstract: Test cases for [EIP-4750: EOF - Functions](https://eips.ethereum.org/EIPS/eip-4750)
- EIP-4750 formalizes functions in the EVM object format, introducing callable units of code.
- Opcodes introduced: `CALLF` (`0xE3`), `RETF` (`0xE4`).
-""" # noqa: E501
+Test cases for [EIP-4750: EOF - Functions](https://eips.ethereum.org/EIPS/eip-4750).
+
+EIP-4750 formalizes functions in the EVM object format, introducing
+callable units of code. Opcodes introduced: `CALLF` (`0xE3`), `RETF`
+(`0xE4`).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_callf_execution.py b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_callf_execution.py
index 30adfa59c2a..b0b1d2a0f54 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_callf_execution.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_callf_execution.py
@@ -73,7 +73,9 @@ def test_callf_factorial(eof_state_test: EOFStateTestFiller, n, result):
((0, 1), (1, 1), (13, 233), (27, 196418)),
)
def test_callf_fibonacci(eof_state_test: EOFStateTestFiller, n, result):
- """Test fibonacci sequence implementation with recursive CALLF instructions."""
+ """
+ Test fibonacci sequence implementation with recursive CALLF instructions.
+ """
eof_state_test(
container=Container(
sections=[
@@ -454,11 +456,14 @@ def test_callf_sneaky_stack_overflow(
pre: Alloc,
):
"""
- CALLF where a normal execution would not overflow, but EIP-4750 CALLF rule #3 triggers.
+ CALLF where a normal execution would not overflow, but EIP-4750 CALLF rule
+ #3 triggers.
Code Section 0 - Mostly fills the stack
- Code Section 1 - jumper to 2, so container verification passes (we want a runtime failure)
- Code Section 2 - Could require too much stack, but doesn't as it JUMPFs to 3
+ Code Section 1 - jumper to 2, so container verification passes (we want a
+ runtime failure)
+ Code Section 2 - Could require too much stack, but doesn't as it JUMPFs
+ to 3
Code Section 3 - Writes canary values
The intent is to catch implementations of CALLF that don't enforce rule #3
@@ -552,17 +557,18 @@ def test_callf_max_stack(
pre: Alloc,
):
"""
- CALLF where a normal execution would not overflow, but EIP-4750 CALLF rule #4 triggers.
+ CALLF where a normal execution would not overflow, but EIP-4750 CALLF rule
+ #4 triggers.
- Code Section 0 - calls #1 with the configured height, but we load some operands so the
- return stack does not overflow
- Code Section 1 - expands stack, calls #2, THEN recursively calls itself until input is zero,
- and returns.
+ Code Section 0 - calls #1 with the configured height, but we load some
+ operands so the return stack does not overflow
+ Code Section 1 - expands stack, calls #2, THEN recursively calls itself
+ until input is zero, and returns.
Code Section 2 - Just returns, zero inputs, zero outputs
- This will catch CALLF execution rule #3: always fail if the operand stack is full. Not
- checking rule 3 results in a call to section 2 and not overfilling the stack (as it is just
- RETF).
+ This will catch CALLF execution rule #3: always fail if the operand stack
+ is full. Not checking rule 3 results in a call to section 2 and not
+ overfilling the stack (as it is just RETF).
"""
env = Environment()
sender = pre.fund_eoa()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_code_validation.py b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_code_validation.py
index c705d197581..e1df510d68b 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_code_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip4750_functions/test_code_validation.py
@@ -379,7 +379,10 @@ def test_eof_validity(
eof_test: EOFTestFiller,
container: Container,
):
- """Test EOF container validation for features around EIP-4750 / Functions / Code Sections."""
+ """
+ Test EOF container validation for features around EIP-4750 / Functions /
+ Code Sections.
+ """
eof_test(container=container)
@@ -475,7 +478,9 @@ def test_invalid_code_section_index(
eof_test: EOFTestFiller,
container: Container,
):
- """Test cases for CALLF instructions with invalid target code section index."""
+ """
+ Test cases for CALLF instructions with invalid target code section index.
+ """
eof_test(container=container, expect_exception=EOFException.INVALID_CODE_SECTION_INDEX)
@@ -637,8 +642,8 @@ def test_unreachable_code_sections(
container: Container,
):
"""
- Test cases for EOF unreachable code sections
- (i.e. code sections not reachable from the code section 0).
+ Test cases for EOF unreachable code sections (i.e. code sections not
+ reachable from the code section 0).
"""
eof_test(container=container, expect_exception=EOFException.UNREACHABLE_CODE_SECTIONS)
@@ -646,9 +651,10 @@ def test_unreachable_code_sections(
@pytest.mark.parametrize("callee_outputs", [1, 2, MAX_CODE_OUTPUTS])
def test_callf_stack_height_limit_exceeded(eof_test, callee_outputs):
"""
- Test for invalid EOF code containing CALLF instruction exceeding the stack height limit.
- The code reaches the maximum runtime stack height (1024)
- which is above the EOF limit for the stack height in the type section (1023).
+ Test for invalid EOF code containing CALLF instruction exceeding the stack
+ height limit. The code reaches the maximum runtime stack height (1024)
+ which is above the EOF limit for the stack height in the type section
+ (1023).
"""
callf_stack_height = MAX_RUNTIME_STACK_HEIGHT - callee_outputs
container = Container(
@@ -669,7 +675,9 @@ def test_callf_stack_height_limit_exceeded(eof_test, callee_outputs):
@pytest.mark.parametrize("stack_height", [512, 513, 1023])
def test_callf_stack_overflow(eof_test: EOFTestFiller, stack_height: int):
- """Test CALLF instruction recursively calling itself causing stack overflow."""
+ """
+ Test CALLF instruction recursively calling itself causing stack overflow.
+ """
container = Container(
sections=[
Section.Code(code=Op.CALLF[1] + Op.STOP),
@@ -689,7 +697,10 @@ def test_callf_stack_overflow(eof_test: EOFTestFiller, stack_height: int):
@pytest.mark.parametrize("stack_height", [1, 2])
def test_callf_stack_overflow_after_callf(eof_test: EOFTestFiller, stack_height: int):
- """Test CALLF instruction calling next function causing stack overflow at validation time."""
+ """
+ Test CALLF instruction calling next function causing stack overflow at
+ validation time.
+ """
container = Container(
sections=[
Section.Code(code=Op.CALLF[1] + Op.STOP),
@@ -823,7 +834,10 @@ def test_callf_stack_overflow_variable_stack_4(eof_test: EOFTestFiller):
@pytest.mark.parametrize("stack_height", [2, 3])
def test_callf_validate_outputs(eof_test: EOFTestFiller, stack_height: int):
- """Test CALLF instruction when calling a function returning more outputs than expected."""
+ """
+ Test CALLF instruction when calling a function returning more outputs than
+ expected.
+ """
container = Container(
sections=[
Section.Code(code=Op.CALLF[1] + Op.STOP, max_stack_height=1),
@@ -1074,10 +1088,11 @@ def test_callf_with_inputs_stack_overflow_variable_stack(
)
def test_callf_stack_overflow_by_outputs(eof_test, callee_outputs, max_stack_height):
"""
- Test for invalid EOF code containing CALLF instruction exceeding the runtime stack height limit
- by calling a function with at least one output. The computed stack height of the code section 0
- is always above the maximum allowed in the EOF type section. Therefore, the test declares
- an invalid max_stack_height.
+ Test for invalid EOF code containing CALLF instruction exceeding the
+ runtime stack height limit by calling a function with at least one output.
+ The computed stack height of the code section 0 is always above the maximum
+ allowed in the EOF type section. Therefore, the test declares an invalid
+ max_stack_height.
"""
callf_stack_height = (MAX_RUNTIME_STACK_HEIGHT + 1) - callee_outputs
container = Container(
@@ -1102,10 +1117,10 @@ def test_callf_stack_overflow_by_outputs(eof_test, callee_outputs, max_stack_hei
)
def test_callf_stack_overflow_by_height(eof_test, callee_stack_height):
"""
- Test for invalid EOF code containing CALLF instruction exceeding the runtime stack height limit
- by calling a function with 2+ maximum stack height.
- The callee with the maximum stack height of 1 is valid because runtime limit (1024)
- is 1 bigger than the EOF limit (1023).
+ Test for invalid EOF code containing CALLF instruction exceeding the
+ runtime stack height limit by calling a function with 2+ maximum stack
+ height. The callee with the maximum stack height of 1 is valid because
+ runtime limit (1024) is 1 bigger than the EOF limit (1023).
"""
container = Container(
sections=[
@@ -1234,8 +1249,8 @@ def test_returning_section_aborts(
eof_test: EOFTestFiller,
):
"""
- Test EOF container validation where in the same code section we have returning
- and nonreturning terminating instructions.
+ Test EOF container validation where in the same code section we have
+ returning and nonreturning terminating instructions.
"""
container = Container(
name="returning_section_aborts",
diff --git a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/__init__.py
index fb5897d4ed4..3935e40789c 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/__init__.py
@@ -1,5 +1,8 @@
"""
-abstract: Test cases for [EIP-5450: EOF - Stack Validation](https://eips.ethereum.org/EIPS/eip-5450)
- EIP-5450 defines stack validation requirements to ensure consistent behavior during execution.
- Opcodes introduced: None (specifies validation rules for stack usage).
-""" # noqa: E501
+Tests for
+[EIP-5450: EOF - Stack Validation](https://eips.ethereum.org/EIPS/eip-5450).
+
+EIP-5450 defines stack validation requirements to ensure consistent
+behavior during execution. Opcodes introduced: None (specifies validation
+rules for stack usage).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_code_validation.py b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_code_validation.py
index ed58e09c06a..f85edd1f12c 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_code_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_code_validation.py
@@ -1,4 +1,7 @@
-"""Code validation of CALLF, JUMPF, RETF opcodes in conjunction with static relative jumps."""
+"""
+Code validation of CALLF, JUMPF, RETF opcodes in conjunction with static
+relative jumps.
+"""
import itertools
from enum import Enum, auto, unique
@@ -51,7 +54,9 @@ def __str__(self) -> str:
@unique
class RjumpSpot(Enum):
- """Possible spots in the code section layout where the RJUMP* is injected."""
+ """
+ Possible spots in the code section layout where the RJUMP* is injected.
+ """
BEGINNING = auto()
BEFORE_TERMINATION = auto()
@@ -65,14 +70,16 @@ def rjump_code_with(
rjump_kind: RjumpKind | None, code_so_far_len: int, next_code: Bytecode
) -> Tuple[Bytecode, bool, bool, bool]:
"""
- Unless `rjump_kind` is None generates a code snippet with an RJUMP* instruction.
- For some kinds `code_so_far_len` must be code length in bytes preceding the snippet.
- For some kinds `next_code_len` must be code length in bytes of some code which follows.
+ Unless `rjump_kind` is None generates a code snippet with an RJUMP*
+ instruction. For some kinds `code_so_far_len` must be code length in bytes
+ preceding the snippet. For some kinds `next_code_len` must be code length
+ in bytes of some code which follows.
- It is expected that the snippet and the jump target are valid, but the resulting code
- or its stack balance might not.
+ It is expected that the snippet and the jump target are valid, but the
+ resulting code or its stack balance might not.
- Also returns some traits of the snippet: `is_backwards`, `pops` and `pushes`
+ Also returns some traits of the snippet: `is_backwards`, `pops` and
+ `pushes`
"""
body = Bytecode()
@@ -124,8 +131,9 @@ def rjump_code_with(
raise TypeError("unknown rjumps value" + str(rjump_kind))
if jumps_over_next:
- # This is against intuition, but if the code we're jumping over pushes, the path
- # which misses it will be short of stack items, as if the RJUMP* popped and vice versa.
+ # This is against intuition, but if the code we're jumping over pushes,
+ # the path which misses it will be short of stack items, as if the
+ # RJUMP* popped and vice versa.
if next_code.pushed_stack_items > next_code.popped_stack_items:
pops = True
elif next_code.popped_stack_items > next_code.pushed_stack_items:
@@ -136,10 +144,11 @@ def rjump_code_with(
def call_code_with(inputs, outputs, call: Bytecode) -> Bytecode:
"""
- Generate code snippet with the `call` bytecode provided and its respective input/output
- management.
+ Generate code snippet with the `call` bytecode provided and its respective
+ input/output management.
- `inputs` and `outputs` are understood as those of the code section we're generating for.
+ `inputs` and `outputs` are understood as those of the code section we're
+ generating for.
"""
body = Bytecode()
@@ -168,8 +177,8 @@ def section_code_with(
"""
Generate code section with RJUMP* and CALLF/RETF instructions.
- Also returns some traits of the section: `has_invalid_back_jump`, `rjump_snippet_pops`,
- `rjump_snippet_pushes`, `rjump_falls_off_code`
+ Also returns some traits of the section: `has_invalid_back_jump`,
+ `rjump_snippet_pops`, `rjump_snippet_pushes`, `rjump_falls_off_code`
"""
code = Bytecode()
code.pushed_stack_items, code.max_stack_height = (inputs, inputs)
@@ -214,7 +223,8 @@ def section_code_with(
RjumpKind.RJUMPI_OVER_NEXT_NESTED,
RjumpKind.RJUMPV_EMPTY_AND_OVER_NEXT,
]:
- # Jump over termination or jump over body, but there is nothing after the body.
+ # Jump over termination or jump over body, but there is nothing
+ # after the body.
rjump_falls_off_code = True
code += termination
@@ -260,10 +270,12 @@ def test_rjumps_callf_retf(
"""
Test EOF container validation for EIP-4200 vs EIP-4750 interactions.
- Each test's code consists of `num_sections` code sections, which call into one another
- and then return. Code may include RJUMP* snippets of `rjump_kind` in various `rjump_spots`.
+ Each test's code consists of `num_sections` code sections, which call into
+ one another and then return. Code may include RJUMP* snippets of
+ `rjump_kind` in various `rjump_spots`.
"""
- # Zeroth section has always 0 inputs and 0 outputs, so is excluded from param
+ # Zeroth section has always 0 inputs and 0 outputs, so is excluded from
+ # param
inputs = (0,) + inputs
outputs = (0,) + outputs
@@ -316,7 +328,8 @@ def test_rjumps_callf_retf(
container_has_invalid_back_jump = True
if rjump_snippet_pops:
container_has_rjump_pops = True
- # Pushes to the stack never affect the zeroth section, because it `STOP`s and not `RETF`s.
+ # Pushes to the stack never affect the zeroth section, because it
+ # `STOP`s and not `RETF`s.
if rjump_snippet_pushes and section_idx != 0:
container_has_rjump_pushes = True
if rjump_falls_off_code:
@@ -371,10 +384,11 @@ def test_rjumps_jumpf_nonreturning(
rjump_spot: RjumpSpot,
):
"""
- Test EOF container validation for EIP-4200 vs EIP-6206 interactions on non-returning
- functions.
+ Test EOF container validation for EIP-4200 vs EIP-6206 interactions on
+ non-returning functions.
"""
- # Zeroth section has always 0 inputs and 0 outputs, so is excluded from param
+ # Zeroth section has always 0 inputs and 0 outputs, so is excluded from
+ # param
inputs = (0,) + inputs
sections = []
@@ -394,8 +408,9 @@ def test_rjumps_jumpf_nonreturning(
call = None
termination = Op.STOP
- # `section_has_invalid_back_jump` - never happens: we excluded RJUMP from the end
- # `rjump_snippet_pushes` - never happens: we never RETF where too large stack would fail
+ # `section_has_invalid_back_jump` - never happens: we excluded RJUMP
+ # from the end `rjump_snippet_pushes` - never happens: we never RETF
+ # where too large stack would fail
(
code,
_section_has_invalid_back_jump,
@@ -463,8 +478,8 @@ def test_all_opcodes_stack_underflow(
eof_test: EOFTestFiller, op: Op, stack_height: int, spread: int
):
"""
- Test EOF validation failing due to stack overflow
- caused by the specific instruction `op`.
+ Test EOF validation failing due to stack overflow caused by the specific
+ instruction `op`.
"""
code = Bytecode()
@@ -579,7 +594,9 @@ def test_all_opcodes_stack_underflow(
ids=lambda x: x.name,
)
def test_stack_underflow_examples(eof_test, container):
- """Test EOF validation failing due to stack underflow at basic instructions."""
+ """
+ Test EOF validation failing due to stack underflow at basic instructions.
+ """
eof_test(container=container, expect_exception=EOFException.STACK_UNDERFLOW)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_execution.py b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_execution.py
index d72f12eb148..5c9fa2fa5ba 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_execution.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip5450_stack/test_execution.py
@@ -27,12 +27,13 @@ def test_execution_at_max_stack_height(
eof_state_test: EOFStateTestFiller, code_inputs: int, call_op: Op
):
"""
- Test execution at the maximum runtime operand stack height (1024).
- EOF doesn't allow to increase the stack height of a single code section more than 1023.
- The effect of the maximum runtime stack height is achieved by using non-zero number
- of the code section inputs and increasing the runtime stack to the limit accordingly.
- The test pushes consecutive numbers starting from 0 (including inputs).
- At the maximum stack height SSTORE is used so it should store 1022 at key 1023.
+ Test execution at the maximum runtime operand stack height (1024). EOF
+ doesn't allow to increase the stack height of a single code section more
+ than 1023. The effect of the maximum runtime stack height is achieved by
+ using non-zero number of the code section inputs and increasing the runtime
+ stack to the limit accordingly. The test pushes consecutive numbers
+ starting from 0 (including inputs). At the maximum stack height SSTORE is
+ used so it should store 1022 at key 1023.
"""
max_stack_increase = MAX_RUNTIME_STACK_HEIGHT - code_inputs
container = Container(
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/__init__.py
index b2391ecb037..dcd27c79cd4 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/__init__.py
@@ -1,5 +1,7 @@
"""
-abstract: Test cases for [EIP-6206: EOF - JUMPF and non-returning functions](https://eips.ethereum.org/EIPS/eip-6206)
- EIP-6206 adds a conditional forward jump instruction and support for functions without return values.
- Opcodes introduced: `JUMPF` (`0xE5`).
-""" # noqa: E501
+Test cases for [EIP-6206: EOF - JUMPF and non-returning functions](https://
+eips.ethereum.org/EIPS/eip-6206).
+
+EIP-6206 adds a conditional forward jump instruction and support for
+functions without return values. Opcodes introduced: `JUMPF` (`0xE5`).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_execution.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_execution.py
index c8c21022b99..b26d7702de3 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_execution.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_execution.py
@@ -299,7 +299,10 @@ def test_jumpf_way_too_large(
def test_jumpf_to_nonexistent_section(
eof_state_test: EOFStateTestFiller,
):
- """Tests JUMPF jumping to valid section number but where the section does not exist."""
+ """
+ Tests JUMPF jumping to valid section number but where the section does not
+ exist.
+ """
eof_state_test(
container=Container(
sections=[
@@ -358,7 +361,9 @@ def test_jumpf_stack_size_1024(
def test_jumpf_with_inputs_stack_size_1024(
eof_state_test: EOFStateTestFiller,
):
- """Test stack reaching 1024 items in target function of JUMPF with inputs."""
+ """
+ Test stack reaching 1024 items in target function of JUMPF with inputs.
+ """
eof_state_test(
container=Container(
sections=[
@@ -381,7 +386,10 @@ def test_jumpf_with_inputs_stack_size_1024(
def test_jumpf_stack_size_1024_at_push(
eof_state_test: EOFStateTestFiller,
):
- """Test stack reaching 1024 items in JUMPF target function at PUSH0 instruction."""
+ """
+ Test stack reaching 1024 items in JUMPF target function at PUSH0
+ instruction.
+ """
eof_state_test(
container=Container(
sections=[
@@ -430,13 +438,18 @@ def test_jumpf_stack_overflow(
eof_state_test: EOFStateTestFiller,
):
"""
- Test rule #2 in execution semantics, where we make sure we have enough stack to guarantee
- safe execution (the "reserved stack rule") max possible stack will not exceed 1024. But some
- executions may not overflow the stack, so we need to ensure the rule is checked.
+ Test rule #2 in execution semantics, where we make sure we have enough
+ stack to guarantee safe execution (the "reserved stack rule") max possible
+ stack will not exceed 1024. But some executions may not overflow the stack,
+ so we need to ensure the rule is checked.
`no_overflow` - the stack does not overflow at JUMPF call, executes to end
- `rule_overflow` - reserved stack rule triggers, but execution would not overflow if allowed
- `execution_overflow` - execution would overflow (but still blocked by reserved stack rule)
+
+ `rule_overflow` - reserved stack rule triggers, but execution would not
+ overflow if allowed
+
+ `execution_overflow` - execution would overflow (but still blocked by
+ reserved stack rule)
"""
eof_state_test(
container=Container(
@@ -482,7 +495,10 @@ def test_jumpf_stack_overflow(
def test_jumpf_with_inputs_stack_size_1024_at_push(
eof_state_test: EOFStateTestFiller,
):
- """Test stack reaching 1024 items in JUMPF target function with inputs at PUSH0 instruction."""
+ """
+ Test stack reaching 1024 items in JUMPF target function with inputs at
+ PUSH0 instruction.
+ """
eof_state_test(
container=Container(
sections=[
@@ -520,7 +536,9 @@ def test_jumpf_with_inputs_stack_size_1024_at_push(
def test_jumpf_with_inputs_stack_overflow(
eof_state_test: EOFStateTestFiller,
):
- """Test stack overflowing 1024 items in JUMPF target function with inputs."""
+ """
+ Test stack overflowing 1024 items in JUMPF target function with inputs.
+ """
eof_state_test(
container=Container(
sections=[
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_stack.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_stack.py
index 0831a8c9c24..f54946a3079 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_stack.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_stack.py
@@ -33,8 +33,8 @@ def test_jumpf_stack_non_returning_rules(
stack_height: int,
):
"""
- Tests for JUMPF validation stack rules. Non-returning section cases.
- Valid cases are executed.
+ Tests for JUMPF validation stack rules. Non-returning section cases. Valid
+ cases are executed.
"""
container = Container(
name="stack-non-retuning_h-%d_ti-%d" % (stack_height, target_inputs),
@@ -90,8 +90,8 @@ def test_jumpf_stack_returning_rules(
stack_diff: int,
):
"""
- Tests for JUMPF validation stack rules. Returning section cases.
- Valid cases are executed.
+ Tests for JUMPF validation stack rules. Returning section cases. Valid
+ cases are executed.
"""
if target_outputs > source_outputs:
# These create invalid containers without JUMPF validation, Don't test.
@@ -297,7 +297,10 @@ def test_jumpf_self_variadic_stack_overflow(eof_test: EOFTestFiller):
def test_jumpf_variadic_stack_overflow(
eof_test: EOFTestFiller, stack_height: int, callee_stack_height: int
):
- """Test JUMPF stack validation causing stack overflow with variable stack height."""
+ """
+ Test JUMPF stack validation causing stack overflow with variable stack
+ height.
+ """
container = Container(
sections=[
Section.Code(
@@ -346,7 +349,10 @@ def test_jumpf_with_inputs_stack_overflow(
def test_jumpf_with_inputs_stack_overflow_variable_stack(
eof_test: EOFTestFiller, stack_height: int, callee_stack_increase: int
):
- """Test JUMPF with variable stack depending on RJUMPI calling function with inputs."""
+ """
+ Test JUMPF with variable stack depending on RJUMPI calling function with
+ inputs.
+ """
container = Container(
sections=[
Section.Code(
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_target.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_target.py
index dcfd397430a..886a39e7586 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_target.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_target.py
@@ -32,8 +32,8 @@ def test_jumpf_target_rules(
target_outputs: int,
):
"""
- Validate the target section rules of JUMPF, and execute valid cases.
- We are not testing stack so a lot of the logic is to get correct stack values.
+ Validate the target section rules of JUMPF, and execute valid cases. We are
+ not testing stack so a lot of the logic is to get correct stack values.
"""
source_non_returning = source_outputs == NON_RETURNING_SECTION
source_height = 0 if source_non_returning else source_outputs
@@ -43,10 +43,11 @@ def test_jumpf_target_rules(
target_height = 0 if target_non_returning else target_outputs
target_section_index = 2
- # Because we are testing the target and not the stack height validation we need to do some work
- # to make sure the stack passes validation.
+ # Because we are testing the target and not the stack height validation we
+ # need to do some work to make sure the stack passes validation.
- # `source_extra_push` is how many more pushes we need to match our stack commitments
+ # `source_extra_push` is how many more pushes we need to match our stack
+ # commitments
source_extra_push = max(0, source_height - target_height)
source_section = Section.Code(
code=Op.PUSH0 * (source_height)
@@ -60,8 +61,9 @@ def test_jumpf_target_rules(
max_stack_height=source_height + max(1, source_extra_push),
)
- # `delta` is how many stack items the target output is from the input height, and tracks the
- # number of pushes or (if negative) pops the target needs to do to match output commitments
+ # `delta` is how many stack items the target output is from the input
+ # height, and tracks the number of pushes or (if negative) pops the target
+ # needs to do to match output commitments
delta = 0 if target_non_returning or source_non_returning else target_outputs - source_height
target_section = Section.Code(
code=((Op.PUSH0 * delta) if delta >= 0 else (Op.POP * -delta))
@@ -116,7 +118,7 @@ def test_jumpf_multi_target_rules(
eof_state_test: EOFStateTestFiller,
):
"""
- NOT IMPLEMENTED:
- Test a section that contains multiple JUMPF to different targets with different outputs.
+ NOT IMPLEMENTED: Test a section that contains multiple JUMPF to different
+ targets with different outputs.
"""
pass
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_validation.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_validation.py
index 67a057df89c..4fe3100e934 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_jumpf_validation.py
@@ -111,7 +111,9 @@ def test_invalid_code_section_index(
eof_test: EOFTestFiller,
container: Container,
):
- """Test cases for JUMPF instructions with invalid target code section index."""
+ """
+ Test cases for JUMPF instructions with invalid target code section index.
+ """
eof_test(container=container, expect_exception=EOFException.INVALID_CODE_SECTION_INDEX)
@@ -119,8 +121,8 @@ def test_returning_section_aborts_jumpf(
eof_test: EOFTestFiller,
):
"""
- Test EOF container validation where in the same code section we have returning
- and nonreturning terminating instructions.
+ Test EOF container validation where in the same code section we have
+ returning and nonreturning terminating instructions.
"""
container = Container(
sections=[
@@ -141,7 +143,10 @@ def test_returning_section_aborts_jumpf(
@pytest.mark.parametrize("stack_height", [512, 513, 1023])
def test_jumpf_self_stack_overflow(eof_test: EOFTestFiller, stack_height: int):
- """Test JUMPF instruction jumping to itself causing validation time stack overflow."""
+ """
+ Test JUMPF instruction jumping to itself causing validation time stack
+ overflow.
+ """
container = Container(
sections=[
Section.Code(
@@ -162,7 +167,10 @@ def test_jumpf_self_stack_overflow(eof_test: EOFTestFiller, stack_height: int):
def test_jumpf_other_stack_overflow(
eof_test: EOFTestFiller, stack_height: int, stack_height_other: int
):
- """Test JUMPF instruction jumping to other section causing validation time stack overflow."""
+ """
+ Test JUMPF instruction jumping to other section causing validation time
+ stack overflow.
+ """
container = Container(
sections=[
Section.Code(
@@ -202,7 +210,10 @@ def test_jumpf_to_non_returning(eof_test: EOFTestFiller, stack_height: int, code
@pytest.mark.parametrize("code_inputs", [0, 1, 3, 5])
def test_jumpf_to_non_returning_variable_stack(eof_test: EOFTestFiller, code_inputs: int):
- """Test JUMPF jumping to a non-returning function with stack depending on RJUMPI."""
+ """
+ Test JUMPF jumping to a non-returning function with stack depending on
+ RJUMPI.
+ """
container = Container(
sections=[
Section.Code(
@@ -266,7 +277,10 @@ def test_jumpf_to_returning_variable_stack_1(
code_outputs: int,
stack_increase: int,
):
- """Test JUMPF with variable stack jumping to a returning function increasing the stack."""
+ """
+ Test JUMPF with variable stack jumping to a returning function increasing
+ the stack.
+ """
exception = None
if code_inputs >= 3 or code_outputs + 1 < 3: # 3 = Section 1's max stack
exception = EOFException.STACK_UNDERFLOW
@@ -305,7 +319,10 @@ def test_jumpf_to_returning_variable_stack_2(
code_outputs: int,
stack_decrease: int,
):
- """Test JUMPF with variable stack jumping to a returning function decreasing the stack."""
+ """
+ Test JUMPF with variable stack jumping to a returning function decreasing
+ the stack.
+ """
exceptions = []
if code_inputs >= 3 or code_outputs + 1 < 3: # 3 = Section 1's max stack
exceptions.append(EOFException.STACK_UNDERFLOW)
@@ -336,7 +353,10 @@ def test_jumpf_to_returning_variable_stack_2(
def test_jumpf_to_returning_variable_stack_3(eof_test: EOFTestFiller):
- """Test JUMPF with variable stack jumping to a returning function increasing the stack."""
+ """
+ Test JUMPF with variable stack jumping to a returning function increasing
+ the stack.
+ """
container = Container(
sections=[
Section.Code(code=Op.CALLF[1] + Op.STOP, max_stack_height=2),
diff --git a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_nonreturning_validation.py b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_nonreturning_validation.py
index c5bc912994e..d40bdb0eb7c 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_nonreturning_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip6206_jumpf/test_nonreturning_validation.py
@@ -31,7 +31,9 @@
[0, 1, 0x7F, 0x81, 0xFF],
)
def test_first_section_returning(eof_test: EOFTestFiller, code: Bytecode, outputs: int):
- """Test EOF validation failing because the first section is not non-returning."""
+ """
+ Test EOF validation failing because the first section is not non-returning.
+ """
eof_test(
container=Container(
sections=[Section.Code(code, code_outputs=outputs)],
@@ -62,7 +64,10 @@ def test_first_section_returning(eof_test: EOFTestFiller, code: Bytecode, output
def test_first_section_with_inputs(
eof_test: EOFTestFiller, code: Bytecode, inputs: int, outputs: int
):
- """Test EOF validation failing because the first section has non-zero number of inputs."""
+ """
+ Test EOF validation failing because the first section has non-zero number
+ of inputs.
+ """
eof_test(
container=Container(
sections=[
@@ -94,7 +99,10 @@ def test_first_section_with_inputs(
],
)
def test_returning_section_not_returning(eof_test: EOFTestFiller, code_section: Section):
- """Test EOF validation failing due to returning section with no RETF or JUMPF-to-returning."""
+ """
+ Test EOF validation failing due to returning section with no RETF or
+ JUMPF-to-returning.
+ """
eof_test(
container=Container(
sections=[
@@ -118,8 +126,8 @@ def test_returning_section_not_returning(eof_test: EOFTestFiller, code_section:
)
def test_returning_section_returncode(eof_test: EOFTestFiller, code_section: Section):
"""
- Test EOF validation failing because a returning section has no RETF or JUMPF-to-returning -
- RETURNCODE version.
+ Test EOF validation failing because a returning section has no RETF or
+ JUMPF-to-returning - RETURNCODE version.
"""
eof_test(
container=Container(
@@ -148,7 +156,10 @@ def test_returning_section_returncode(eof_test: EOFTestFiller, code_section: Sec
@first
@code_prefix
def test_retf_in_nonreturning(eof_test: EOFTestFiller, first: bool, code_prefix: Bytecode):
- """Test EOF validation failing due to non-returning section with the RETF instruction."""
+ """
+ Test EOF validation failing due to non-returning section with the RETF
+ instruction.
+ """
sections = [Section.Code(code_prefix + Op.RETF, code_outputs=NON_RETURNING_SECTION)]
if not first: # Prefix sections with additional valid JUMPF to invalid section
sections = [Section.Code(Op.JUMPF[1])] + sections
@@ -162,7 +173,10 @@ def test_retf_in_nonreturning(eof_test: EOFTestFiller, first: bool, code_prefix:
@first
@code_prefix
def test_jumpf_in_nonreturning(eof_test: EOFTestFiller, first: bool, code_prefix: Bytecode):
- """Test EOF validation failing due to non-returning section with the JUMPF instruction."""
+ """
+ Test EOF validation failing due to non-returning section with the JUMPF
+ instruction.
+ """
invalid_section = Section.Code(
code_prefix + Op.JUMPF[1 if first else 2],
code_outputs=NON_RETURNING_SECTION,
diff --git a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/__init__.py
index d75f39a597d..01a78b47340 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/__init__.py
@@ -1,8 +1,10 @@
"""
-abstract: Test cases for [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663)
- EIP-663 defines new stack manipulation instructions that allow accessing the stack at higher depths.
- Opcodes introduced: `DUPN` (`0xE6`), `SWAPN` (`0xE7`), `EXCHANGEN` (`0xE8`).
-""" # noqa: E501
+Test cases for EIP-663 SWAPN, DUPN and EXCHANGE instructions
+ [EIP-663](https://eips.ethereum.org/EIPS/eip-663) defines new stack
+ manipulation instructions that allow accessing the stack at higher depths.
+ Opcodes introduced: `DUPN` (`0xE6`), `SWAPN` (`0xE7`), `EXCHANGEN`
+ (`0xE8`).
+"""
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-663.md"
REFERENCE_SPEC_VERSION = "b658bb87fe039d29e9475d5cfaebca9b92e0fca2"
diff --git a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_dupn.py b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_dupn.py
index 4285d6c8e0c..a68cae56817 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_dupn.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_dupn.py
@@ -1,7 +1,8 @@
"""
-abstract: Tests [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663)
- Tests for the DUPN instruction.
-""" # noqa: E501
+DUPN instruction tests
+ Tests for DUPN instruction in
+ [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663).
+"""
import pytest
diff --git a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_exchange.py b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_exchange.py
index 74e40d83f19..583c10d89bc 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_exchange.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_exchange.py
@@ -1,7 +1,6 @@
"""
-abstract: Tests [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663)
- Tests for the EXCHANGE instruction.
-""" # noqa: E501
+Tests [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663).
+"""
import pytest
diff --git a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_swapn.py b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_swapn.py
index dedbd76771f..b0d14af35de 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_swapn.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip663_dupn_swapn_exchange/test_swapn.py
@@ -1,7 +1,4 @@
-"""
-abstract: Tests [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663)
- Tests for the SWAPN instruction.
-""" # noqa: E501
+"""Tests [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663)."""
import pytest
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/__init__.py
index 63bedd7fd2f..672c24a7406 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/__init__.py
@@ -1,8 +1,11 @@
"""
-abstract: Test cases for [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069)
- EIP-7069 proposes modifications to `CALL` instructions to align with the structured EOF format.
- Opcodes introduced: `EXTCALL` (`0xF8`), `EXTDELEGATECALL` (`0xF9`), `EXTSTATICCALL` (`0xFB`), `RETURNDATALOAD` (`0xF7`).
-""" # noqa: E501
+Test cases for EIP-7069 Revamped CALL instructions
+ [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069)
+ proposes modifications to `CALL` instructions to align with the
+ structured EOF format. Opcodes introduced: `EXTCALL` (`0xF8`),
+ `EXTDELEGATECALL` (`0xF9`), `EXTSTATICCALL` (`0xFB`), `RETURNDATALOAD`
+ (`0xF7`).
+"""
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-7069.md"
REFERENCE_SPEC_VERSION = "1795943aeacc86131d5ab6bb3d65824b3b1d4cad"
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_address_space_extension.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_address_space_extension.py
index feee36e6b2b..a2180dca078 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_address_space_extension.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_address_space_extension.py
@@ -65,7 +65,10 @@ def test_address_space_extension(
target_opcode: Op,
target_account_type: str,
):
- """Test contacts with possibly extended address and fail if address is too large."""
+ """
+ Test contacts with possibly extended address and fail if address is too
+ large.
+ """
env = Environment()
ase_address = len(target_address) > 20
@@ -167,11 +170,12 @@ def test_address_space_extension(
caller_storage[slot_target_returndata] = stripped_address
case Op.CALLCODE | Op.DELEGATECALL:
caller_storage[slot_target_call_status] = LEGACY_CALL_SUCCESS
- # CALLCODE and DELEGATECALL call will call the stripped address
- # but will change the sender to self
+ # CALLCODE and DELEGATECALL call will call the stripped
+ # address but will change the sender to self
caller_storage[slot_target_returndata] = address_caller
case Op.EXTCALL | Op.EXTSTATICCALL:
- # EXTCALL and EXTSTATICCALL will fault if calling an ASE address
+ # EXTCALL and EXTSTATICCALL will fault if calling an ASE
+ # address
if ase_address:
caller_storage[slot_target_call_status] = value_exceptional_abort_canary
caller_storage[slot_target_returndata] = value_exceptional_abort_canary
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calldata.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calldata.py
index 2bc3f36e839..99faf722834 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calldata.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calldata.py
@@ -1,7 +1,8 @@
"""
-abstract: Tests [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069)
- Tests for the RETURNDATALOAD instruction.
-""" # noqa: E501
+Call data tests for EXT*CALL instructions
+ Tests for call data handling in
+ [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069).
+"""
import pytest
@@ -61,8 +62,8 @@ def test_extcalls_inputdata(
"""
Tests call data into EXTCALL including multiple offset conditions.
- Caller pushes data into memory, then calls the target. Target writes 64 bytes of call data
- to storage and a success byte.
+ Caller pushes data into memory, then calls the target. Target writes 64
+ bytes of call data to storage and a success byte.
"""
env = Environment()
@@ -148,8 +149,8 @@ def test_extdelegatecall_inputdata(
"""
Tests call data into EXTDELEGATECALL including multiple offset conditions.
- Caller pushes data into memory, then calls the target. Target writes 64 bytes of call data
- to storage and a success byte.
+ Caller pushes data into memory, then calls the target. Target writes 64
+ bytes of call data to storage and a success byte.
"""
env = Environment()
@@ -232,8 +233,8 @@ def test_extstaticcall_inputdata(
"""
Tests call data into EXTSTATICCALL including multiple offset conditions.
- Caller pushes data into memory, then calls the target. Target writes 64 bytes of call data
- to storage and a success byte.
+ Caller pushes data into memory, then calls the target. Target writes 64
+ bytes of call data to storage and a success byte.
"""
env = Environment()
@@ -312,8 +313,8 @@ def test_calldata_remains_after_subcall(
"""
Tests call data remains after a call to another contract.
- Caller pushes data into memory, then calls the target. Target calls 3rd contract. 3rd contract
- returns. Target writes calldata to storage.
+ Caller pushes data into memory, then calls the target. Target calls 3rd
+ contract. 3rd contract returns. Target writes calldata to storage.
"""
env = Environment()
@@ -494,18 +495,21 @@ def test_extcalls_input_offset(
"""
Tests call data into EXT*CALL including multiple offset conditions.
- Returner returns a success value, which caller stores. If memory expansion cost is less than
- 2 billion gas call succeeds. Else whole transaction aborts, leaving canaries in memory.
+ Returner returns a success value, which caller stores. If memory expansion
+ cost is less than 2 billion gas call succeeds. Else whole transaction
+ aborts, leaving canaries in memory.
- The name id of `*-mem-cost` refers to the bit-length of the result of the calculated memory
- expansion cost. Their length choice is designed to cause problems on shorter bit-length
- representations with native integers.
+ The name id of `*-mem-cost` refers to the bit-length of the result of the
+ calculated memory expansion cost. Their length choice is designed to cause
+ problems on shorter bit-length representations with native integers.
- The `offset_field` param indicates what part of the input data arguments are being tested,
- either the offset of the data in memory or the size of the data in memory.
+ The `offset_field` param indicates what part of the input data arguments
+ are being tested, either the offset of the data in memory or the size of
+ the data in memory.
- The `test_arg` param is the value passed into the field being tested (offset or size),
- intending to trigger integer size bugs for that particular field.
+ The `test_arg` param is the value passed into the field being tested
+ (offset or size), intending to trigger integer size bugs for that
+ particular field.
"""
env = Environment(gas_limit=1_000_000_000)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py
index a6e6f0a3934..55c2c67ba89 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py
@@ -365,8 +365,8 @@ def test_eof_calls_eof_mstore(
identity = Address(0x04)
-# `blake2f`` is chosen for the test because it fails unless args_size == 213, which is what we are
-# interested in.
+# `blake2f`` is chosen for the test because it fails unless args_size == 213,
+# which is what we are interested in.
blake2f = Address(0x09)
# `p256verify` / RIP-7212 has been in and out of prague and osaka.
# Hence we need to test explicitly
@@ -843,7 +843,9 @@ def test_eof_calls_static_flag_with_value(
sender: EOA,
opcode: Op,
):
- """Test EOF contracts calls handle static flag and sending value correctly."""
+ """
+ Test EOF contracts calls handle static flag and sending value correctly.
+ """
env = Environment()
noop_callee_address = pre.deploy_contract(Container.Code(Op.STOP))
@@ -922,13 +924,16 @@ def test_eof_calls_min_callee_gas(
reverts: bool,
):
"""
- Test EOF contracts calls do light failure when retained/callee gas is not enough.
+ Test EOF contracts calls do light failure when retained/callee gas is not
+ enough.
- Premise of the test is that there exists a range of `gas_limit` values, which are enough
- for all instructions to execute, but call's returned value is 1, meaning not enough for gas
- allowances (MIN_RETAINED_GAS and MIN_CALLEE_GAS) - ones marked with `reverts==False`.
+ Premise of the test is that there exists a range of `gas_limit` values,
+ which are enough for all instructions to execute, but call's returned value
+ is 1, meaning not enough for gas allowances (MIN_RETAINED_GAS and
+ MIN_CALLEE_GAS) - ones marked with `reverts==False`.
- Once we provide both allowances, the RJUMPI condition is no longer met and `reverts==True`.
+ Once we provide both allowances, the RJUMPI condition is no longer met and
+ `reverts==True`.
"""
env = Environment()
@@ -939,7 +944,8 @@ def test_eof_calls_min_callee_gas(
Container.Code(
Op.SSTORE(slot_code_worked, value_code_worked)
+ Op.EQ(opcode(address=noop_callee_address, value=value), EXTCALL_REVERT)
- # If the return code isn't 1, it means gas was enough to cover the allowances.
+ # If the return code isn't 1, it means gas was enough to cover the
+ # allowances.
+ Op.RJUMPI[len(revert_block)]
+ revert_block
+ Op.STOP
@@ -947,7 +953,8 @@ def test_eof_calls_min_callee_gas(
balance=value,
)
- # `no_oog_gas` is minimum amount of gas_limit which makes the transaction not go oog.
+ # `no_oog_gas` is minimum amount of gas_limit which makes the transaction
+ # not go oog.
push_operations = 3 + len(opcode.kwargs) # type: ignore
no_oog_gas = (
21_000
@@ -994,7 +1001,10 @@ def test_eof_calls_with_value(
balance: int,
value: int,
):
- """Test EOF contracts calls handle value calls with and without enough balance."""
+ """
+ Test EOF contracts calls handle value calls with and without enough
+ balance.
+ """
env = Environment()
noop_callee_address = pre.deploy_contract(Container.Code(Op.STOP))
@@ -1047,24 +1057,30 @@ def test_eof_calls_msg_depth(
):
"""
Test EOF contracts calls handle msg depth limit correctly (1024).
- NOTE: due to block gas limit and the 63/64th rule this limit is unlikely to be hit
- on mainnet.
+
+ Note:
+ due to block gas limit and the 63/64th rule this limit is unlikely
+ to be hit on mainnet.
+
"""
- # Not a precise gas_limit formula, but enough to exclude risk of gas causing the failure.
+ # Not a precise gas_limit formula, but enough to exclude risk of gas
+ # causing the failure.
gas_limit = int(200000 * (64 / 63) ** 1024)
env = Environment(gas_limit=gas_limit)
# Flow of the test:
- # `callee_code` is recursively calling itself, passing msg depth as calldata
- # (kept with the `MSTORE(0, ADD(...))`). When maximum msg depth is reached
- # the call fails and starts returning. The deep-most frame returns:
+ # `callee_code` is recursively calling itself, passing msg depth as
+ # calldata (kept with the `MSTORE(0, ADD(...))`). When maximum msg depth is
+ # reached the call fails and starts returning. The deep-most frame returns:
# - current reached msg depth (expected to be the maximum 1024), with the
# `MSTORE(32, ADD(...))`
- # - the respective return code of the EXT*CALL (expected to be 1 - light failure), with the
- # `MSTORE(64, NOOP)`. Note the `NOOP` is just to appease the `Op.MSTORE` call, the return
- # code value is actually coming from the `Op.DUP1`
- # When unwinding the msg call stack, the intermediate frames return whatever the deeper callee
- # returned with the `RETURNDATACOPY` instruction.
+ # - the respective return code of the EXT*CALL (expected to be 1 - light
+ # failure), with the `MSTORE(64, NOOP)`. Note the `NOOP` is just to
+ # appease the `Op.MSTORE` call, the return code value is actually
+ # coming from the `Op.DUP1`
+ # When unwinding the msg call stack, the intermediate frames return
+ # whatever the deeper callee returned with the `RETURNDATACOPY`
+ # instruction.
# Memory offsets layout:
# - 0 - input - msg depth
@@ -1084,8 +1100,10 @@ def test_eof_calls_msg_depth(
# duplicate return code for the `returndatacopy_block` below
+ Op.DUP1
# if return code was:
- # - 1, we're in the deep-most frame, `deep_most_result_block` returns the actual result
- # - 0, we're in an intermediate frame, `returndatacopy_block` only passes on the result
+ # - 1, we're in the deep-most frame, `deep_most_result_block` returns
+ # the actual result
+ # - 0, we're in an intermediate frame, `returndatacopy_block` only
+ # passes on the result
+ Op.RJUMPI[rjump_offset]
+ returndatacopy_block
+ deep_most_result_block
@@ -1139,8 +1157,8 @@ def test_extdelegate_call_targets(
call_from_initcode: bool,
):
"""
- Test EOF contracts extdelegatecalling various targets, especially resolved via 7702
- delegation.
+ Test EOF contracts extdelegatecalling various targets, especially resolved
+ via 7702 delegation.
"""
env = Environment()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_gas.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_gas.py
index e7b6ffa923c..638adc05c5b 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_gas.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_gas.py
@@ -1,7 +1,8 @@
"""
-abstract: Tests [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069)
- Tests gas consumption.
-""" # noqa: E501
+Gas consumption tests for EXT*CALL instructions
+ Tests for gas consumption in
+ [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069).
+"""
import pytest
@@ -32,9 +33,9 @@ def state_env() -> Environment:
"""
Prepare the environment for all state test cases.
- Main difference is that the excess blob gas is not increased by the target, as
- there is no genesis block -> block 1 transition, and therefore the excess blob gas
- is not decreased by the target.
+ Main difference is that the excess blob gas is not increased by the target,
+ as there is no genesis block -> block 1 transition, and therefore the
+ excess blob gas is not decreased by the target.
"""
return Environment()
@@ -124,7 +125,10 @@ def test_ext_calls_gas(
new_account: bool,
mem_expansion_bytes: int,
):
- """Tests variations of EXT*CALL gas, both warm and cold, without and with mem expansions."""
+ """
+ Tests variations of EXT*CALL gas, both warm and cold, without and with mem
+ expansions.
+ """
address_target = (
pre.fund_eoa(0) if new_account else pre.deploy_contract(Container.Code(Op.STOP))
)
@@ -154,11 +158,12 @@ def test_transfer_gas_is_cleared(
value: int,
):
"""
- Test that EXT*CALL call doesn't charge for value transfer, even if the outer call
- transferred value.
+ Test that EXT*CALL call doesn't charge for value transfer, even if the
+ outer call transferred value.
- NOTE: This is particularly possible for EXTDELEGATECALL, which carries over the value sent
- in the outer call, however, we extend the test to all 3 EXT*CALL opcodes for good measure.
+ NOTE: This is particularly possible for EXTDELEGATECALL, which carries over
+ the value sent in the outer call, however, we extend the test to all 3
+ EXT*CALL opcodes for good measure.
"""
noop_callee_address = pre.deploy_contract(Container.Code(Op.STOP))
@@ -176,8 +181,8 @@ def test_transfer_gas_is_cleared(
subject_code=Op.EXTCALL,
subject_balance=5 * value,
tear_down_code=Op.STOP,
- # NOTE: CALL_WITH_VALUE_GAS is charged only once on the outer EXTCALL, while the base
- # call gas - twice.
+ # NOTE: CALL_WITH_VALUE_GAS is charged only once on the outer EXTCALL,
+ # while the base call gas - twice.
cold_gas=2 * COLD_ACCOUNT_ACCESS_GAS
+ (CALL_WITH_VALUE_GAS if value > 0 else 0)
+ push_gas,
@@ -196,8 +201,8 @@ def test_late_account_create(
opcode: Op,
):
"""
- Test EXTCALL to a non-existent account after another EXT*CALL has called it and not
- created it.
+ Test EXTCALL to a non-existent account after another EXT*CALL has called it
+ and not created it.
"""
empty_address = Address(0xDECAFC0DE)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndatacopy_memory_expansion.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndatacopy_memory_expansion.py
index f5b6112c0b9..7b997275943 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndatacopy_memory_expansion.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndatacopy_memory_expansion.py
@@ -52,7 +52,10 @@ def subcall_exact_cost(
dest: int,
length: int,
) -> int:
- """Return exact cost of the subcall, based on the initial memory and the length of the copy."""
+ """
+ Return exact cost of the subcall, based on the initial memory and the
+ length of the copy.
+ """
cost_memory_bytes = fork.memory_expansion_gas_calculator()
returndatacopy_cost = 3
@@ -78,8 +81,9 @@ def bytecode_storage(
memory_expansion_address: Address,
) -> Tuple[Bytecode, Storage.StorageDictType]:
"""
- Prepare bytecode and storage for the test, based on the expected result of the subcall
- (whether it succeeds or fails depending on the length of the memory expansion).
+ Prepare bytecode and storage for the test, based on the expected result of
+ the subcall (whether it succeeds or fails depending on the length of the
+ memory expansion).
"""
bytecode = Bytecode()
storage = {}
@@ -213,7 +217,10 @@ def test_returndatacopy_memory_expansion(
post: Mapping[str, Account],
tx: Transaction,
):
- """Perform RETURNDATACOPY operations that expand the memory, and verify the gas it costs."""
+ """
+ Perform RETURNDATACOPY operations that expand the memory, and verify the
+ gas it costs.
+ """
state_test(
env=env,
pre=pre,
@@ -266,8 +273,8 @@ def test_returndatacopy_huge_memory_expansion(
tx: Transaction,
):
"""
- Perform RETURNDATACOPY operations that expand the memory by huge amounts, and verify that it
- correctly runs out of gas.
+ Perform RETURNDATACOPY operations that expand the memory by huge amounts,
+ and verify that it correctly runs out of gas.
"""
state_test(
env=env,
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndataload.py b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndataload.py
index 53400fd80f9..8c75d1af340 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndataload.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_returndataload.py
@@ -1,7 +1,8 @@
"""
-abstract: Tests [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069)
- Tests for the RETURNDATALOAD instruction.
-""" # noqa: E501
+RETURNDATALOAD instruction tests
+ Tests for RETURNDATALOAD instruction in
+ [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069).
+"""
from typing import cast
@@ -82,14 +83,16 @@ def test_returndatacopy_handling(
size: int,
):
"""
- Tests ReturnDataLoad including multiple offset conditions and differing legacy vs. eof
- boundary conditions.
+ Tests ReturnDataLoad including multiple offset conditions and differing
+ legacy vs. eof boundary conditions.
entrypoint creates a "0xff" test area of memory, delegate calls to caller.
- Caller is either EOF or legacy, as per parameter. Calls returner and copies the return data
- based on offset and size params. Cases are expected to trigger boundary violations.
+ Caller is either EOF or legacy, as per parameter. Calls returner and
+ copies the return data based on offset and size params. Cases are expected
+ to trigger boundary violations.
- Entrypoint copies the test area to storage slots, and the expected result is asserted.
+ Entrypoint copies the test area to storage slots, and the expected result
+ is asserted.
"""
env = Environment()
@@ -215,9 +218,10 @@ def test_returndataload_handling(
offset: int,
):
"""
- Much simpler than returndatacopy, no memory or boosted call. Returner is called
- and results are stored in storage slot, which is asserted for expected values.
- The parameters offset and return data are configured to test boundary conditions.
+ Much simpler than returndatacopy, no memory or boosted call. Returner is
+ called and results are stored in storage slot, which is asserted for
+ expected values. The parameters offset and return data are configured to
+ test boundary conditions.
"""
env = Environment()
@@ -283,16 +287,17 @@ def test_returndatacopy_oob(
opcode: Op,
):
"""
- Extends the RETURNDATACOPY test for correct out-of-bounds behavior, by checking if the
- caller frame's context being EOF or legacy doesn't impact the execution logic of the
- RETURNDATACOPY instance under test.
+ Extends the RETURNDATACOPY test for correct out-of-bounds behavior, by
+ checking if the caller frame's context being EOF or legacy doesn't impact
+ the execution logic of the RETURNDATACOPY instance under test.
"""
env = Environment()
sender = pre.fund_eoa()
- # Both callee codes below make an OOB (out-of-bounds) RETURNDATACOPY of one byte,
- # which they then attempt to return (Legacy should exceptionally halt on RETURNDATACOPY).
+ # Both callee codes below make an OOB (out-of-bounds) RETURNDATACOPY of one
+ # byte, which they then attempt to return (Legacy should exceptionally halt
+ # on RETURNDATACOPY).
address_callee_eof = pre.deploy_contract(
code=Container(
sections=[
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/__init__.py
index 845a31a74a8..ee7f967e8f0 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/__init__.py
@@ -1,5 +1,9 @@
"""
-abstract: Test cases for [EIP-7480: EOF - Data section access instructions](https://eips.ethereum.org/EIPS/eip-7480)
- EIP-7480 specifies instructions for accessing data stored in the dedicated data section of the EOF format.
- Opcodes introduced: `DATALOAD` (`0xD0`), `DATALOADN` (`0xD1`), `DATASIZE` (`0xD2`), `DATACOPY` (`0xD3`).
-""" # noqa: E501
+Test cases for EOF Data section access instructions for EIP-7480.
+
+EIP-7480 specifies instructions for accessing data stored in the dedicated
+data section of the EOF format. Full specification: [EIP-7480: EOF - Data
+section access instructions](https://eips.ethereum.org/EIPS/eip-7480).
+Opcodes introduced: `DATALOAD` (`0xD0`), `DATALOADN` (`0xD1`), `DATASIZE`
+(`0xD2`), `DATACOPY` (`0xD3`).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_data_opcodes.py b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_data_opcodes.py
index 234ea530a78..2770011f6c5 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_data_opcodes.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_data_opcodes.py
@@ -36,7 +36,10 @@ def test_dataloadn(eof_state_test: EOFStateTestFiller, index: int, suffix_len: i
def create_data_test(offset: int, datasize: int):
- """Generate data load operators test cases based on load offset and data section size."""
+ """
+ Generate data load operators test cases based on load offset and data
+ section size.
+ """
data = b"".join(i.to_bytes(length=2, byteorder="big") for i in range(1, datasize // 2 + 1))
assert len(data) == datasize
overhang = min(32, offset + 32 - datasize)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_datacopy_memory_expansion.py b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_datacopy_memory_expansion.py
index 4c4e58d7bbf..90c2ec67568 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_datacopy_memory_expansion.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7480_data_section/test_datacopy_memory_expansion.py
@@ -52,7 +52,10 @@ def subcall_exact_cost(
dest: int,
length: int,
) -> int:
- """Return exact cost of the subcall, based on the initial memory and the length of the copy."""
+ """
+ Return exact cost of the subcall, based on the initial memory and the
+ length of the copy.
+ """
cost_memory_bytes = fork.memory_expansion_gas_calculator()
datacopy_cost = 3
@@ -78,8 +81,9 @@ def bytecode_storage(
memory_expansion_address: Address,
) -> Tuple[Bytecode, Storage.StorageDictType]:
"""
- Prepare bytecode and storage for the test, based on the expected result of the subcall
- (whether it succeeds or fails depending on the length of the memory expansion).
+ Prepare bytecode and storage for the test, based on the expected result of
+ the subcall (whether it succeeds or fails depending on the length of the
+ memory expansion).
"""
bytecode = Bytecode()
storage = {}
@@ -223,7 +227,10 @@ def test_datacopy_memory_expansion(
post: Mapping[str, Account],
tx: Transaction,
):
- """Perform DATACOPY operations that expand the memory, and verify the gas it costs to do so."""
+ """
+ Perform DATACOPY operations that expand the memory, and verify the gas it
+ costs to do so.
+ """
state_test(
env=env,
pre=pre,
@@ -286,8 +293,8 @@ def test_datacopy_huge_memory_expansion(
tx: Transaction,
):
"""
- Perform DATACOPY operations that expand the memory by huge amounts, and verify that it
- correctly runs out of gas.
+ Perform DATACOPY operations that expand the memory by huge amounts, and
+ verify that it correctly runs out of gas.
"""
state_test(
env=env,
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/__init__.py
index 886cfd7db23..4156e92a7a5 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/__init__.py
@@ -1,19 +1,26 @@
"""
-abstract: Test cases for [EIP-7620: EOF Contract Creation](https://eips.ethereum.org/EIPS/eip-7620)
- EIP-7620 replaces `CREATE` and `CREATE2` with `EOFCREATE` for deploying contracts in the EOF format.
- Opcodes introduced: `EOFCREATE` (`0xEC`), `RETURNCODE` (`0xEE`).
+Test cases for EOF Contract Creation for EIP-7620.
+EIP-7620 replaces `CREATE` and `CREATE2` with `EOFCREATE` for deploying
+contracts in the EOF format.
-EOFCREATE, RETURNCODE, and container tests
+Full specification:
+[EIP-7620: EOF Contract Creation](https://eips.ethereum.org/EIPS/eip-7620).
-evmone tests not ported
+Opcodes introduced: `EOFCREATE` (`0xEC`), `RETURNCODE` (`0xEE`).
-- create_tx_with_eof_initcode - This calls it invalid, it is now the way to add EOF contacts to state
-- eofcreate_extcall_returncode - per the new initcode mode tests you cannot have RETURNCODE
- in a deployed contract
-- eofcreate_dataloadn_referring_to_auxdata - covered by
- tests.unscheduled.eip7480_data_section.test_data_opcodes.test_data_section_succeed
-- eofcreate_initcontainer_return - RETURN is banned in initcode containers
-- eofcreate_initcontainer_stop - STOP is banned in initcode containers
+EOFCREATE, RETURNCODE, and container tests.
+
+evmone tests not ported:
+- create_tx_with_eof_initcode: This calls it invalid, it is now the way to
+ add EOF contacts to state
+- eofcreate_extcall_returncode: Per the new initcode
+ mode tests you cannot have RETURNCODE in a
+ deployed contract
+- eofcreate_dataloadn_referring_to_auxdata: covered by
+ tests.unscheduled.eip7480_data_section.
+ test_data_opcodes.test_data_section_succeed
+- eofcreate_initcontainer_return: RETURN is banned in initcode containers
+- eofcreate_initcontainer_stop: STOP is banned in initcode containers
- All TXCREATE tests.
-""" # noqa: E501
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate.py
index 3f4780368af..e8a6846adb0 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate.py
@@ -76,7 +76,10 @@ def test_eofcreate_then_dataload(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies that a contract returned with auxdata does not overwrite the parent data."""
+ """
+ Verifies that a contract returned with auxdata does not overwrite the
+ parent data.
+ """
env = Environment()
sender = pre.fund_eoa()
small_auxdata_container = Container(
@@ -124,7 +127,9 @@ def test_eofcreate_then_call(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies a simple EOFCREATE case, and then calls the deployed contract."""
+ """
+ Verifies a simple EOFCREATE case, and then calls the deployed contract.
+ """
env = Environment()
callable_contract = Container(
sections=[
@@ -314,7 +319,9 @@ def test_eofcreate_in_initcode(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies an EOFCREATE occurring within initcode creates that contract."""
+ """
+ Verifies an EOFCREATE occurring within initcode creates that contract.
+ """
nested_initcode_subcontainer = Container(
sections=[
Section.Code(
@@ -368,7 +375,10 @@ def test_eofcreate_in_initcode_reverts(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies an EOFCREATE occurring in an initcode is rolled back when the initcode reverts."""
+ """
+ Verifies an EOFCREATE occurring in an initcode is rolled back when the
+ initcode reverts.
+ """
nested_initcode_subcontainer = Container(
sections=[
Section.Code(
@@ -422,7 +432,10 @@ def test_return_data_cleared(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies the return data is not reused from a extcall but is cleared upon eofcreate."""
+ """
+ Verifies the return data is not reused from a extcall but is cleared upon
+ eofcreate.
+ """
env = Environment()
value_return_canary = 0x4158675309
value_return_canary_size = 5
@@ -518,8 +531,10 @@ def test_address_collision(
contract_address: Account(
storage={
slot_create_address: salt_zero_address,
- slot_create_address_2: EOFCREATE_FAILURE, # had an in-transaction collision
- slot_create_address_3: EOFCREATE_FAILURE, # had a pre-existing collision
+ # had an in-transaction collision
+ slot_create_address_2: EOFCREATE_FAILURE,
+ # had a pre-existing collision
+ slot_create_address_3: EOFCREATE_FAILURE,
slot_code_worked: value_code_worked,
}
)
@@ -540,7 +555,10 @@ def test_eofcreate_revert_eof_returndata(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies the return data is not being deployed, even if happens to be valid EOF."""
+ """
+ Verifies the return data is not being deployed, even if happens to be valid
+ EOF.
+ """
env = Environment()
code_reverts_with_calldata = Container(
name="Initcode Subcontainer reverting with its calldata",
@@ -638,7 +656,9 @@ def test_eofcreate_truncated_container(
data_len: int,
data_section_size: int,
):
- """EOFCREATE instruction targeting a container with truncated data section."""
+ """
+ EOFCREATE instruction targeting a container with truncated data section.
+ """
assert data_len < data_section_size
eof_test(
container=Container(
@@ -716,7 +736,8 @@ def test_eofcreate_context(
elif expected_result == "selfbalance":
expected_bytes = eofcreate_value
elif expected_result == "factorybalance":
- # Factory receives value from sender and passes on eofcreate_value as endowment.
+ # Factory receives value from sender and passes on eofcreate_value as
+ # endowment.
expected_bytes = value - eofcreate_value
else:
raise TypeError("Unexpected expected_result", expected_result)
@@ -747,7 +768,10 @@ def test_eofcreate_memory_context(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies an EOFCREATE frame enjoys a separate EVM memory from its caller frame."""
+ """
+ Verifies an EOFCREATE frame enjoys a separate EVM memory from its caller
+ frame.
+ """
env = Environment()
destination_storage = Storage()
contract_storage = Storage()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate_failures.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate_failures.py
index 68f50eec3ee..1b3872b5c0a 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate_failures.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_eofcreate_failures.py
@@ -142,8 +142,8 @@ def test_initcode_aborts(
"""
-Size of the factory portion of test_eofcreate_deploy_sizes, but as the runtime code is dynamic, we
-have to use a pre-calculated size
+Size of the factory portion of test_eofcreate_deploy_sizes, but as the runtime
+code is dynamic, we have to use a pre-calculated size
"""
factory_size = 78
@@ -172,7 +172,10 @@ def test_eofcreate_deploy_sizes(
pre: Alloc,
target_deploy_size: int,
):
- """Verifies a mix of runtime contract sizes mixing success and multiple size failure modes."""
+ """
+ Verifies a mix of runtime contract sizes mixing success and multiple size
+ failure modes.
+ """
env = Environment()
runtime_container = Container(
@@ -215,9 +218,9 @@ def test_eofcreate_deploy_sizes(
sender = pre.fund_eoa()
contract_address = pre.deploy_contract(code=factory_container)
- # Storage in 0 should have the address,
- # Storage 1 is a canary of 1 to make sure it tried to execute, which also covers cases of
- # data+code being greater than initcode_size_max, which is allowed.
+ # Storage in 0 should have the address, Storage 1 is a canary of 1 to make
+ # sure it tried to execute, which also covers cases of data+code being
+ # greater than initcode_size_max, which is allowed.
success = target_deploy_size <= MAX_BYTECODE_SIZE
post = {
contract_address: Account(
@@ -260,8 +263,8 @@ def test_eofcreate_deploy_sizes_tx(
target_deploy_size: int,
):
"""
- Verifies a mix of runtime contract sizes mixing success and multiple size failure modes
- where the initcontainer is included in a transaction.
+ Verifies a mix of runtime contract sizes mixing success and multiple size
+ failure modes where the initcontainer is included in a transaction.
"""
raise NotImplementedError("Not implemented")
@@ -278,7 +281,9 @@ def test_eofcreate_deploy_sizes_tx(
],
)
def test_auxdata_size_failures(state_test: StateTestFiller, pre: Alloc, auxdata_size: int):
- """Exercises a number of auxdata size violations, and one maxcode success."""
+ """
+ Exercises a number of auxdata size violations, and one maxcode success.
+ """
env = Environment()
auxdata_bytes = b"a" * auxdata_size
@@ -309,8 +314,11 @@ def test_auxdata_size_failures(state_test: StateTestFiller, pre: Alloc, auxdata_
deployed_container_size = len(smallest_runtime_subcontainer) + auxdata_size
- # Storage in 0 will have address in first test, 0 in all other cases indicating failure
- # Storage 1 in 1 is a canary to see if EOFCREATE opcode halted
+ # Storage in 0 will have address in first test, 0 in all other cases
+ # indicating failure
+ #
+ # Storage 1 in 1 is a canary to see if EOFCREATE opcode
+ # halted
success = deployed_container_size <= MAX_BYTECODE_SIZE
post = {
contract_address: Account(
@@ -351,8 +359,8 @@ def test_eofcreate_insufficient_stipend(
value: int,
):
"""
- Exercises an EOFCREATE that fails because the calling account does not have enough ether to
- pay the stipend.
+ Exercises an EOFCREATE that fails because the calling account does not have
+ enough ether to pay the stipend.
"""
env = Environment()
initcode_container = Container(
@@ -370,7 +378,9 @@ def test_eofcreate_insufficient_stipend(
code=initcode_container,
balance=value - 1,
)
- # create will fail but not trigger a halt, so canary at storage 1 should be set
+ # create will fail but not trigger a halt, so canary at storage 1 should be
+ # set
+ #
# also validate target created contract fails
post = {
contract_address: Account(
@@ -395,7 +405,10 @@ def test_insufficient_initcode_gas(
state_test: StateTestFiller,
pre: Alloc,
):
- """Exercises an EOFCREATE when there is not enough gas for the initcode charge."""
+ """
+ Exercises an EOFCREATE when there is not enough gas for the initcode
+ charge.
+ """
env = Environment()
initcode_data = b"a" * 0x5000
@@ -455,7 +468,10 @@ def test_insufficient_gas_memory_expansion(
state_test: StateTestFiller,
pre: Alloc,
):
- """Exercises EOFCREATE when the memory for auxdata has not been expanded but is requested."""
+ """
+ Exercises EOFCREATE when the memory for auxdata has not been expanded but
+ is requested.
+ """
env = Environment()
auxdata_size = 0x5000
@@ -513,7 +529,10 @@ def test_insufficient_returncode_auxdata_gas(
state_test: StateTestFiller,
pre: Alloc,
):
- """Exercises a RETURNCODE when there is not enough gas for the initcode charge."""
+ """
+ Exercises a RETURNCODE when there is not enough gas for the initcode
+ charge.
+ """
env = Environment()
auxdata_size = 0x5000
@@ -583,7 +602,8 @@ def test_insufficient_returncode_auxdata_gas(
Op.EXTSTATICCALL,
],
)
-@pytest.mark.parametrize("endowment", [0, 1]) # included to verify static flag check comes first
+@pytest.mark.parametrize("endowment", [0, 1]) # included to verify static flag
+# check comes first
@pytest.mark.parametrize(
"initcode",
[smallest_initcode_subcontainer, aborting_container],
@@ -664,14 +684,18 @@ def test_eof_eofcreate_msg_depth(
):
"""
Test EOFCREATE handles msg depth limit correctly (1024).
- NOTE: due to block gas limit and the 63/64th rule this limit is unlikely to be hit
- on mainnet.
- NOTE: See `tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py::test_eof_calls_msg_depth`
- for more explanations and comments. Most notable deviation from that test is that here
- calls and `EOFCREATE`s alternate in order to reach the max depth. `who_fails` decides
- whether the failing depth 1024 will be on a call or on an `EOFCREATE` to happen.
- """ # noqa: E501
- # Not a precise gas_limit formula, but enough to exclude risk of gas causing the failure.
+
+ NOTE: due to block gas limit and the 63/64th rule this limit is
+ unlikely to be hit on mainnet.
+ NOTE: See `tests/unscheduled/eip7692_eof_v1/eip7069_extcall/
+ test_calls.py::test_eof_calls_msg_depth` for more explanations and
+ comments. Most notable deviation from that test is that here calls
+ and `EOFCREATE`s alternate in order to reach the max depth.
+ `who_fails` decides whether the failing depth 1024 will be on a call
+ or on an `EOFCREATE` to happen.
+ """
+ # Not a precise gas_limit formula, but enough to exclude risk of gas
+ # causing the failure.
gas_limit = int(20000000 * (64 / 63) ** 1024)
env = Environment(gas_limit=gas_limit)
sender = pre.fund_eoa()
@@ -730,8 +754,9 @@ def test_eof_eofcreate_msg_depth(
)
)
- # Only bumps the msg call depth "register" and forwards to the `calling_contract_address`.
- # If it is used it makes the "failing" depth of 1024 to happen on EOFCREATE, instead of CALL.
+ # Only bumps the msg call depth "register" and forwards to the
+ # `calling_contract_address`. If it is used it makes the "failing" depth of
+ # 1024 to happen on EOFCREATE, instead of CALL.
passthrough_address = pre.deploy_contract(
Container.Code(
Op.MSTORE(0, 1) + Op.EXTCALL(address=calling_contract_address, args_size=32) + Op.STOP
@@ -768,12 +793,15 @@ def test_reentrant_eofcreate(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies a reentrant EOFCREATE case, where EIP-161 prevents conflict via nonce bump."""
+ """
+ Verifies a reentrant EOFCREATE case, where EIP-161 prevents conflict via
+ nonce bump.
+ """
env = Environment()
# Calls into the factory contract with 1 as input.
reenter_code = Op.MSTORE(0, 1) + Op.EXTCALL(address=Op.CALLDATALOAD(32), args_size=32)
- # Initcode: if given 0 as 1st word of input will call into the factory again.
- # 2nd word of input is the address of the factory.
+ # Initcode: if given 0 as 1st word of input will call into the factory
+ # again. 2nd word of input is the address of the factory.
initcontainer = Container(
sections=[
Section.Code(
@@ -786,15 +814,19 @@ def test_reentrant_eofcreate(
Section.Container(smallest_runtime_subcontainer),
]
)
- # Factory: Passes on its input into the initcode. It's 0 first time, 1 the second time.
- # Saves the result of deployment in slot 0 first time, 1 the second time.
+ # Factory:
+ # Passes on its input into the initcode.
+ # It's 0 first time, 1 the second time.
+ # Saves the result of deployment in slot 0 first time, 1 the
+ # second time.
contract_address = pre.deploy_contract(
code=Container(
sections=[
Section.Code(
Op.CALLDATACOPY(0, 0, 32)
+ Op.MSTORE(32, Op.ADDRESS)
- # 1st word - copied from input (reenter flag), 2nd word - `this.address`.
+ # 1st word - copied from input (reenter flag)
+ # 2nd word - `this.address`
+ Op.SSTORE(Op.CALLDATALOAD(0), Op.EOFCREATE[0](input_size=64))
+ Op.STOP,
),
@@ -803,13 +835,18 @@ def test_reentrant_eofcreate(
),
storage={0: 0xB17D, 1: 0xB17D}, # a canary to be overwritten
)
- # Flow is: reenter flag 0 -> factory -> reenter flag 0 -> initcode -> reenter ->
- # reenter flag 1 -> factory -> reenter flag 1 -> (!) initcode -> stop,
- # if the EIP-161 nonce bump is not implemented. If it is, it fails before second
- # inicode marked (!).
+ # Flow is:
+ # reenter flag 0 -> factory -> reenter flag 0 -> initcode ->
+ # reenter -> reenter flag 1 -> factory -> reenter flag 1 -> (!) initcode
+ # -> stop
+ # if the EIP-161 nonce bump is not implemented.
+ #
+ # If it is, it fails before second inicode marked (!).
+ #
# Storage in 0 should have the address from the outer EOFCREATE.
- # Storage in 1 should have 0 from the inner EOFCREATE.
- # For the created contract storage in `slot_counter` should be 1 as initcode executes only once
+ # Storage in 1 should have 0 from the inner EOFCREATE. For the created
+ # contract storage in `slot_counter` should be 1 as initcode
+ # executes only once.
post = {
contract_address: Account(
storage={
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_gas.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_gas.py
index f7cb2519a10..c3019da962e 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_gas.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_gas.py
@@ -130,8 +130,9 @@ def test_eofcreate_gas(
for a in salt_addresses:
pre.fund_address(a, 1)
- # Using `TLOAD` / `TSTORE` to work around warm/cold gas differences. We need a counter to pick
- # a distinct salt on each `EOFCREATE` and avoid running into address conflicts.
+ # Using `TLOAD` / `TSTORE` to work around warm/cold gas differences. We
+ # need a counter to pick a distinct salt on each `EOFCREATE` and avoid
+ # running into address conflicts.
code_increment_counter = (
Op.TLOAD(slot_counter) + Op.DUP1 + Op.TSTORE(slot_counter, Op.PUSH1(1) + Op.ADD)
)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_legacy_eof_creates.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_legacy_eof_creates.py
index 390b1ce089b..d6ab9c2a090 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_legacy_eof_creates.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_legacy_eof_creates.py
@@ -56,7 +56,10 @@ def test_cross_version_creates_fail_light(
legacy_create_opcode: Opcodes,
initcode: Bytes | Container,
):
- """Verifies that CREATE and CREATE2 cannot run EOF initcodes and fail early on attempt."""
+ """
+ Verifies that CREATE and CREATE2 cannot run EOF initcodes and fail early on
+ attempt.
+ """
env = Environment()
sender = pre.fund_eoa()
@@ -66,7 +69,8 @@ def test_cross_version_creates_fail_light(
contract_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.SSTORE(slot_create_address, legacy_create_opcode(size=Op.CALLDATASIZE))
- # Approximates whether code until here consumed the 63/64th gas given to subcall
+ # Approximates whether code until here consumed the 63/64th gas given
+ # to subcall
+ Op.SSTORE(slot_all_subcall_gas_gone, Op.LT(Op.GAS, tx_gas_limit // 64))
+ Op.SSTORE(slot_code_worked, value_code_worked)
+ Op.STOP
@@ -126,8 +130,8 @@ def test_cross_version_creates_fail_hard(
initcode: Bytes,
):
"""
- Verifies that CREATE and CREATE2 fail hard on attempt to run initcode starting with `EF` but
- not `EF00`.
+ Verifies that CREATE and CREATE2 fail hard on attempt to run initcode
+ starting with `EF` but not `EF00`.
"""
env = Environment()
@@ -138,7 +142,8 @@ def test_cross_version_creates_fail_hard(
contract_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.SSTORE(slot_create_address, legacy_create_opcode(size=Op.CALLDATASIZE))
- # Approximates whether code until here consumed the 63/64th gas given to subcall
+ # Approximates whether code until here consumed the 63/64th gas given
+ # to subcall
+ Op.SSTORE(slot_all_subcall_gas_gone, Op.LT(Op.GAS, tx_gas_limit // 64))
+ Op.SSTORE(slot_code_worked, value_code_worked)
+ Op.STOP
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_memory.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_memory.py
index 67748507e67..15eed022aee 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_memory.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_memory.py
@@ -62,18 +62,20 @@ def test_eofcreate_memory(
"""
Tests auxdata sizes in EOFCREATE including multiple offset conditions.
- EOFCREATE either succeeds or fails based on memory access cost, resulting in new address
- or zero in the create address slot.
+ EOFCREATE either succeeds or fails based on memory access cost, resulting
+ in new address or zero in the create address slot.
- The name id of `*-mem-cost` refers to the bit-length of the result of the calculated memory
- expansion cost. Their length choice is designed to cause problems on shorter bit-length
- representations with native integers.
+ The name id of `*-mem-cost` refers to the bit-length of the result of the
+ calculated memory expansion cost. Their length choice is designed to cause
+ problems on shorter bit-length representations with native integers.
- The `offset_field` param indicates what part of the input data arguments are being tested,
- either the offset of the data in memory or the size of the data in memory.
+ The `offset_field` param indicates what part of the input data arguments
+ are being tested, either the offset of the data in memory or the size of
+ the data in memory.
- The `test_arg` param is the value passed into the field being tested (offset or size),
- intending to trigger integer size bugs for that particular field.
+ The `test_arg` param is the value passed into the field being tested
+ (offset or size), intending to trigger integer size bugs for that
+ particular field.
"""
env = Environment(gas_limit=2_000_000_000)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_returncode.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_returncode.py
index 3b50714dec5..e9457683717 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_returncode.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_returncode.py
@@ -203,18 +203,21 @@ def test_returncode_memory_expansion(
success: bool,
):
"""
- Attempts an EOFCREATE with a possibly too-large auxdata. Create either fails due to gas
- or contract too large, resulting in address or zero on failure in the create address slot.
+ Attempts an EOFCREATE with a possibly too-large auxdata. Create either
+ fails due to gas or contract too large, resulting in address or zero on
+ failure in the create address slot.
- The name id of `*-mem-cost` refers to the bit-length of the result of the calculated memory
- expansion cost. Their length choice is designed to cause problems on shorter bit-length
- representations with native integers.
+ The name id of `*-mem-cost` refers to the bit-length of the result of the
+ calculated memory expansion cost. Their length choice is designed to cause
+ problems on shorter bit-length representations with native integers.
- The `offset_field` param indicates what part of the input data arguments are being tested,
- either the offset of the data in memory or the size of the data in memory.
+ The `offset_field` param indicates what part of the input data arguments
+ are being tested, either the offset of the data in memory or the size of
+ the data in memory.
- The `test_arg` param is the value passed into the field being tested (offset or size),
- intending to trigger integer size bugs for that particular field.
+ The `test_arg` param is the value passed into the field being tested
+ (offset or size), intending to trigger integer size bugs for that
+ particular field.
"""
env = Environment(gas_limit=2_000_000_000)
sender = pre.fund_eoa(10**27)
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_subcontainer_validation.py b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_subcontainer_validation.py
index 045e3dbbd39..8c796feb759 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_subcontainer_validation.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7620_eof_create/test_subcontainer_validation.py
@@ -264,7 +264,10 @@ def test_container_combos_deeply_nested_valid(
code_section: Section,
first_sub_container: Container,
):
- """Test valid subcontainer reference / opcode combos on a deep container nesting level."""
+ """
+ Test valid subcontainer reference / opcode combos on a deep container
+ nesting level.
+ """
valid_container = Container(
sections=[
code_section,
@@ -314,7 +317,10 @@ def test_container_combos_deeply_nested_invalid(
code_section: Section,
first_sub_container: Container,
):
- """Test invalid subcontainer reference / opcode combos on a deep container nesting level."""
+ """
+ Test invalid subcontainer reference / opcode combos on a deep container
+ nesting level.
+ """
invalid_container = Container(
sections=[
code_section,
@@ -380,7 +386,10 @@ def test_container_combos_non_first_code_sections_valid(
first_sub_container: Container,
container_kind: ContainerKind,
):
- """Test valid subcontainer reference / opcode combos in a non-first code section."""
+ """
+ Test valid subcontainer reference / opcode combos in a non-first code
+ section.
+ """
eof_test(
container=Container(
sections=[Section.Code(Op.JUMPF[i]) for i in range(1, 1024)]
@@ -419,7 +428,10 @@ def test_container_combos_non_first_code_sections_invalid(
first_sub_container: Container,
container_kind: ContainerKind,
):
- """Test invalid subcontainer reference / opcode combos in a non-first code section."""
+ """
+ Test invalid subcontainer reference / opcode combos in a non-first code
+ section.
+ """
eof_test(
container=Container(
sections=[Section.Code(Op.JUMPF[i]) for i in range(1, 1024)]
@@ -431,7 +443,9 @@ def test_container_combos_non_first_code_sections_invalid(
def test_container_both_kinds_same_sub(eof_test: EOFTestFiller):
- """Test subcontainer conflicts (both EOFCREATE and RETURNCODE Reference)."""
+ """
+ Test subcontainer conflicts (both EOFCREATE and RETURNCODE Reference).
+ """
eof_test(
container=Container(
sections=[
@@ -460,8 +474,8 @@ def test_container_ambiguous_kind(
eof_test: EOFTestFiller, container_idx: int, sub_container: Section
):
"""
- Test ambiguous container kind:
- a single subcontainer reference by both EOFCREATE and RETURNCODE.
+ Test ambiguous container kind: a single subcontainer reference by both
+ EOFCREATE and RETURNCODE.
"""
sections = [
Section.Code(
@@ -504,7 +518,10 @@ def test_container_both_kinds_different_sub(eof_test: EOFTestFiller):
def test_container_multiple_eofcreate_references(eof_test: EOFTestFiller):
- """Test multiple references to the same subcontainer from an EOFCREATE operation."""
+ """
+ Test multiple references to the same subcontainer from an EOFCREATE
+ operation.
+ """
eof_test(
container=Container(
sections=[
@@ -518,7 +535,10 @@ def test_container_multiple_eofcreate_references(eof_test: EOFTestFiller):
def test_container_multiple_returncode_references(eof_test: EOFTestFiller):
- """Test multiple references to the same subcontainer from a RETURNCONTACT operation."""
+ """
+ Test multiple references to the same subcontainer from a RETURNCONTACT
+ operation.
+ """
eof_test(
container=Container(
sections=[
@@ -611,7 +631,8 @@ def test_deep_container(
"""
Test a very deeply nested container.
- This test skips generating a state test because the initcode size is too large.
+ This test skips generating a state test because the initcode size is too
+ large.
"""
container = deepest_container
last_container = deepest_container
@@ -785,7 +806,8 @@ def test_wide_container(eof_test: EOFTestFiller, width: int, exception: EOFExcep
ef0001010004020001000603000100000014ff000200008000016000e0000000ef000101000402000100
01ff00000000800000fe""",
# Originally this test was "valid" but against the current spec
- # it contains two errors: data section truncated and orphan subcontainer.
+ # it contains two errors: data section truncated and orphan
+ # subcontainer.
validity_error=EOFException.TOPLEVEL_CONTAINER_TRUNCATED,
),
id="orphan_subcontainer_0_and_truncated_data",
@@ -812,7 +834,8 @@ def test_wide_container(eof_test: EOFTestFiller, width: int, exception: EOFExcep
Section.Code(Op.EOFCREATE[0](0, 0, 0, 0) + Op.STOP),
Section.Container("aabbccddeeff"),
],
- # The original test has been modified to reference the subcontainer by EOFCREATE.
+ # The original test has been modified to reference the
+ # subcontainer by EOFCREATE.
validity_error=EOFException.INVALID_MAGIC,
),
id="subcontainer_0_with_invalid_prefix",
@@ -896,7 +919,10 @@ def test_migrated_eofcreate(eof_test: EOFTestFiller, container: Container):
def test_dangling_initcode_subcontainer_bytes(
eof_test: EOFTestFiller,
):
- """Initcode mode EOF Subcontainer test with subcontainer containing dangling bytes."""
+ """
+ Initcode mode EOF Subcontainer test with subcontainer containing dangling
+ bytes.
+ """
eof_test(
container=Container(
sections=[
@@ -916,7 +942,10 @@ def test_dangling_initcode_subcontainer_bytes(
def test_dangling_runtime_subcontainer_bytes(
eof_test: EOFTestFiller,
):
- """Runtime mode EOF Subcontainer test with subcontainer containing dangling bytes."""
+ """
+ Runtime mode EOF Subcontainer test with subcontainer containing dangling
+ bytes.
+ """
eof_test(
container=Container(
sections=[
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/__init__.py b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/__init__.py
index b893e42d5a3..ddd44267933 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/__init__.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/__init__.py
@@ -1,4 +1,3 @@
"""
-abstract: Test cases for
-[EIP-7873: TXCREATE and InitcodeTransaction](https://eips.ethereum.org/EIPS/eip-7873).
-""" # noqa: E501
+Test cases for [EIP-7873: TXCREATE and InitcodeTransaction](https://eips.ethereum.org/EIPS/eip-7873).
+"""
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_creation_tx.py b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_creation_tx.py
index 7d21916f292..842fd90e8ea 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_creation_tx.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_creation_tx.py
@@ -99,9 +99,9 @@ def test_legacy_create_tx_prefix_initcode(
initcode: Bytes,
):
"""
- Test that a legacy contract creation tx behaves as it did before EIP-7873 for
- initcode stating with `EF`.
- The transaction should be valid but fail on executing of the first byte `EF`.
+ Test that a legacy contract creation tx behaves as it did before EIP-7873
+ for initcode stating with `EF`. The transaction should be valid but fail on
+ executing of the first byte `EF`.
"""
env = Environment()
sender = pre.fund_eoa()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate.py b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate.py
index 4d3b571f233..049cca14064 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate.py
@@ -67,7 +67,10 @@ def test_txcreate_then_dataload(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies that a contract returned with auxdata does not overwrite the parent data."""
+ """
+ Verifies that a contract returned with auxdata does not overwrite the
+ parent data.
+ """
env = Environment()
sender = pre.fund_eoa()
small_auxdata_container = Container(
@@ -110,7 +113,9 @@ def test_txcreate_then_dataload(
@pytest.mark.with_all_evm_code_types
def test_txcreate_then_call(state_test: StateTestFiller, pre: Alloc, evm_code_type: EVMCodeType):
- """Verifies a simple TXCREATE case, and then calls the deployed contract."""
+ """
+ Verifies a simple TXCREATE case, and then calls the deployed contract.
+ """
env = Environment()
callable_contract = Container(
sections=[
@@ -294,8 +299,8 @@ def test_txcreate_in_initcode(
"""
Verifies an TXCREATE occurring within initcode creates that contract.
- Via the `outer_create_reverts` also verifies a TXCREATE occurring in an initcode is rolled back
- when the initcode reverts.
+ Via the `outer_create_reverts` also verifies a TXCREATE occurring in an
+ initcode is rolled back when the initcode reverts.
"""
smallest_initcode_subcontainer_hash = smallest_initcode_subcontainer.hash
inner_create_bytecode = (
@@ -303,8 +308,8 @@ def test_txcreate_in_initcode(
if inner_create_opcode == Op.TXCREATE
else Op.EOFCREATE[1](0, 0, 0, 0)
)
- # The terminating code of the inner initcontainer, the RJUMPI is a trick to not need to deal
- # with the subcontainer indices
+ # The terminating code of the inner initcontainer, the RJUMPI is a trick to
+ # not need to deal with the subcontainer indices
revert_code = Op.REVERT(0, 0)
terminating_code = (
Op.RJUMPI[len(revert_code)](0) + revert_code + Op.RETURNCODE[0](0, 0)
@@ -385,7 +390,10 @@ def test_return_data_cleared(
pre: Alloc,
evm_code_type: EVMCodeType,
):
- """Verifies the return data is not reused from a extcall but is cleared upon TXCREATE."""
+ """
+ Verifies the return data is not reused from a extcall but is cleared upon
+ TXCREATE.
+ """
env = Environment()
value_return_canary = 0x4158675309
value_return_canary_size = 5
@@ -472,8 +480,10 @@ def test_address_collision(
contract_address: Account(
storage={
slot_create_address: salt_zero_address,
- slot_create_address_2: TXCREATE_FAILURE, # had an in-transaction collision
- slot_create_address_3: TXCREATE_FAILURE, # had a pre-existing collision
+ # had an in-transaction collision
+ slot_create_address_2: TXCREATE_FAILURE,
+ # had a pre-existing collision
+ slot_create_address_3: TXCREATE_FAILURE,
slot_code_worked: value_code_worked,
}
)
@@ -494,7 +504,10 @@ def test_txcreate_revert_eof_returndata(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies the return data is not being deployed, even if happens to be valid EOF."""
+ """
+ Verifies the return data is not being deployed, even if happens to be valid
+ EOF.
+ """
env = Environment()
code_reverts_with_calldata = Container(
name="Initcode Subcontainer reverting with its calldata",
@@ -602,7 +615,8 @@ def test_txcreate_context(
elif expected_result == "selfbalance":
expected_bytes = txcreate_value
elif expected_result == "factorybalance":
- # Factory receives value from sender and passes on eofcreate_value as endowment.
+ # Factory receives value from sender and passes on eofcreate_value as
+ # endowment.
expected_bytes = value - txcreate_value
else:
raise TypeError("Unexpected expected_result", expected_result)
@@ -634,7 +648,10 @@ def test_txcreate_memory_context(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies an TXCREATE frame enjoys a separate EVM memory from its caller frame."""
+ """
+ Verifies an TXCREATE frame enjoys a separate EVM memory from its caller
+ frame.
+ """
env = Environment()
destination_storage = Storage()
contract_storage = Storage()
@@ -680,7 +697,10 @@ def test_short_data_subcontainer(
state_test: StateTestFiller,
pre: Alloc,
):
- """Deploy a subcontainer where the data is "short" and filled by deployment code."""
+ """
+ Deploy a subcontainer where the data is "short" and filled by deployment
+ code.
+ """
env = Environment()
sender = pre.fund_eoa()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_failures.py b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_failures.py
index d44588aaed5..2fd81705454 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_failures.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_failures.py
@@ -177,8 +177,8 @@ def test_initcode_aborts(
"""
-Size of the initcode portion of test_txcreate_deploy_sizes, but as the runtime code is dynamic, we
-have to use a pre-calculated size
+Size of the initcode portion of test_txcreate_deploy_sizes, but as the runtime
+code is dynamic, we have to use a pre-calculated size
"""
initcode_size = 32
@@ -199,7 +199,11 @@ def test_txcreate_deploy_sizes(
pre: Alloc,
target_deploy_size: int,
):
- """Verifies a mix of runtime contract sizes mixing success and multiple size failure modes."""
+ """
+ Verify a mix of runtime contract sizes.
+
+ This mixes success and multiple size failure modes.
+ """
env = Environment()
runtime_container = Container(
@@ -237,9 +241,9 @@ def test_txcreate_deploy_sizes(
+ Op.SSTORE(slot_code_worked, value_code_worked)
+ Op.STOP
)
- # Storage in 0 should have the address,
- # Storage 1 is a canary of 1 to make sure it tried to execute, which also covers cases of
- # data+code being greater than initcode_size_max, which is allowed.
+ # Storage in 0 should have the address, Storage 1 is a canary of 1 to make
+ # sure it tried to execute, which also covers cases of data+code being
+ # greater than initcode_size_max, which is allowed.
success = target_deploy_size <= MAX_BYTECODE_SIZE
post = {
contract_address: Account(
@@ -277,7 +281,9 @@ def test_txcreate_deploy_sizes(
],
)
def test_auxdata_size_failures(state_test: StateTestFiller, pre: Alloc, auxdata_size: int):
- """Exercises a number of auxdata size violations, and one maxcode success."""
+ """
+ Exercises a number of auxdata size violations, and one maxcode success.
+ """
env = Environment()
auxdata_bytes = b"a" * auxdata_size
@@ -305,7 +311,9 @@ def test_auxdata_size_failures(state_test: StateTestFiller, pre: Alloc, auxdata_
deployed_container_size = len(smallest_runtime_subcontainer) + auxdata_size
- # Storage in 0 will have address in first test, 0 in all other cases indicating failure
+ # Storage in 0 will have address in first test, 0 in all other cases
+ # indicating failure
+ #
# Storage 1 in 1 is a canary to see if TXCREATE opcode halted
success = deployed_container_size <= MAX_BYTECODE_SIZE
post = {
@@ -347,8 +355,8 @@ def test_txcreate_insufficient_stipend(
value: int,
):
"""
- Exercises an TXCREATE that fails because the calling account does not have enough ether to
- pay the stipend.
+ Exercises an TXCREATE that fails because the calling account does not have
+ enough ether to pay the stipend.
"""
env = Environment()
sender = pre.fund_eoa(10**11)
@@ -362,7 +370,9 @@ def test_txcreate_insufficient_stipend(
+ Op.STOP,
balance=value - 1,
)
- # create will fail but not trigger a halt, so canary at storage 1 should be set
+ # create will fail but not trigger a halt, so canary at storage 1
+ # should be set
+ #
# also validate target created contract fails
post = {
contract_address: Account(
@@ -384,7 +394,9 @@ def test_txcreate_insufficient_stipend(
@pytest.mark.with_all_evm_code_types
def test_insufficient_initcode_gas(state_test: StateTestFiller, pre: Alloc, fork: Fork):
- """Exercises an TXCREATE when there is not enough gas for the constant charge."""
+ """
+ Exercises an TXCREATE when there is not enough gas for the constant charge.
+ """
env = Environment()
initcode_container = Container(
@@ -440,7 +452,10 @@ def test_insufficient_gas_memory_expansion(
pre: Alloc,
fork: Fork,
):
- """Exercises TXCREATE when the memory for auxdata has not been expanded but is requested."""
+ """
+ Exercises TXCREATE when the memory for auxdata has not been expanded but is
+ requested.
+ """
env = Environment()
auxdata_size = 0x5000
@@ -493,7 +508,10 @@ def test_insufficient_returncode_auxdata_gas(
pre: Alloc,
fork: Fork,
):
- """Exercises a RETURNCODE when there is not enough gas for the initcode charge."""
+ """
+ Exercises a RETURNCODE when there is not enough gas for the initcode
+ charge.
+ """
env = Environment()
auxdata_size = 0x5000
@@ -517,8 +535,8 @@ def test_insufficient_returncode_auxdata_gas(
slot_code_worked: value_canary_to_be_overwritten,
},
)
- # 63/64ths is not enough to cover RETURNCODE memory expansion. Unfortunately the 1/64th left
- # won't realistically accommodate a SSTORE
+ # 63/64ths is not enough to cover RETURNCODE memory expansion.
+ # Unfortunately the 1/64th left won't realistically accommodate a SSTORE
auxdata_size_words = (auxdata_size + 31) // 32
gas_limit = (
32_000
@@ -555,7 +573,8 @@ def test_insufficient_returncode_auxdata_gas(
Op.EXTSTATICCALL,
],
)
-@pytest.mark.parametrize("endowment", [0, 1]) # included to verify static flag check comes first
+@pytest.mark.parametrize("endowment", [0, 1]) # included to verify static flag
+# check comes first
@pytest.mark.parametrize(
"initcode",
[smallest_initcode_subcontainer, aborting_container],
@@ -582,7 +601,8 @@ def test_static_flag_txcreate(
)
calling_address = pre.deploy_contract(
calling_code,
- # Need to override the global value from the `with_all_evm_code_types` marker.
+ # Need to override the global value from the `with_all_evm_code_types`
+ # marker.
evm_code_type=EVMCodeType.EOF_V1 if opcode == Op.EXTSTATICCALL else EVMCodeType.LEGACY,
)
@@ -626,14 +646,17 @@ def test_eof_txcreate_msg_depth(
):
"""
Test TXCREATE handles msg depth limit correctly (1024).
- NOTE: due to block gas limit and the 63/64th rule this limit is unlikely to be hit
- on mainnet.
- NOTE: See `tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py::test_eof_calls_msg_depth`
- for more explanations and comments. Most notable deviation from that test is that here
- calls and `TXCREATE`s alternate in order to reach the max depth. `who_fails` decides
- whether the failing depth 1024 will be on a call or on an `TXCREATE` to happen.
- """ # noqa: E501
- # Not a precise gas_limit formula, but enough to exclude risk of gas causing the failure.
+ NOTE: due to block gas limit and the 63/64th rule this limit is
+ unlikely to be hit on mainnet.
+ NOTE: See
+ `tests/unscheduled/eip7692_eof_v1/eip7069_extcall/test_calls.py::
+ test_eof_calls_msg_depth` for more explanations and comments.
+ Most notable deviation from that test is that here calls and `TXCREATE`s
+ alternate in order to reach the max depth. `who_fails` decides whether
+ the failing depth 1024 will be on a call or on an `TXCREATE` to happen.
+ """
+ # Not a precise gas_limit formula, but enough to exclude risk of gas
+ # causing the failure.
gas_limit = int(20000000 * (64 / 63) ** 1024)
env = Environment(gas_limit=gas_limit)
@@ -694,8 +717,9 @@ def test_eof_txcreate_msg_depth(
)
)
- # Only bumps the msg call depth "register" and forwards to the `calling_contract_address`.
- # If it is used it makes the "failing" depth of 1024 to happen on TXCREATE, instead of CALL.
+ # Only bumps the msg call depth "register" and forwards to the
+ # `calling_contract_address`. If it is used it makes the "failing" depth of
+ # 1024 to happen on TXCREATE, instead of CALL.
passthrough_address = pre.deploy_contract(
Container.Code(
Op.MSTORE(0, 1) + Op.EXTCALL(address=calling_contract_address, args_size=32) + Op.STOP
@@ -734,12 +758,15 @@ def test_reentrant_txcreate(
state_test: StateTestFiller,
pre: Alloc,
):
- """Verifies a reentrant TXCREATE case, where EIP-161 prevents conflict via nonce bump."""
+ """
+ Verifies a reentrant TXCREATE case, where EIP-161 prevents conflict via
+ nonce bump.
+ """
env = Environment()
# Calls into the factory contract with 1 as input.
reenter_code = Op.MSTORE(0, 1) + Op.EXTCALL(address=Op.CALLDATALOAD(32), args_size=32)
- # Initcode: if given 0 as 1st word of input will call into the factory again.
- # 2nd word of input is the address of the factory.
+ # Initcode: if given 0 as 1st word of input will call into the factory
+ # again. 2nd word of input is the address of the factory.
initcontainer = Container(
sections=[
Section.Code(
@@ -753,12 +780,15 @@ def test_reentrant_txcreate(
]
)
initcode_hash = initcontainer.hash
- # Factory: Passes on its input into the initcode. It's 0 first time, 1 the second time.
- # Saves the result of deployment in slot 0 first time, 1 the second time.
+ # Factory:
+ # Passes on its input into the initcode.
+ # It's 0 first time, 1 the second time.
+ # Saves the result of deployment in slot 0 first time, 1 the second time.
contract_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, 32)
+ Op.MSTORE(32, Op.ADDRESS)
- # 1st word - copied from input (reenter flag), 2nd word - `this.address`.
+ # 1st word - copied from input (reenter flag)
+ # 2nd word - `this.address`
+ Op.SSTORE(
Op.CALLDATALOAD(0),
Op.TXCREATE(tx_initcode_hash=initcode_hash, input_size=64),
@@ -766,13 +796,15 @@ def test_reentrant_txcreate(
+ Op.STOP,
storage={0: 0xB17D, 1: 0xB17D}, # a canary to be overwritten
)
- # Flow is: reenter flag 0 -> factory -> reenter flag 0 -> initcode -> reenter ->
- # reenter flag 1 -> factory -> reenter flag 1 -> (!) initcode -> stop,
- # if the EIP-161 nonce bump is not implemented. If it is, it fails before second
- # inicode marked (!).
+ # Flow is: reenter flag 0 -> factory -> reenter flag 0 -> initcode
+ # -> reenter -> reenter flag 1 -> factory -> reenter flag 1
+ # -> (!) initcode -> stop,
+ # if the EIP-161 nonce bump is not implemented. If it is, it fails before
+ # second initcode marked (!).
# Storage in 0 should have the address from the outer TXCREATE.
# Storage in 1 should have 0 from the inner TXCREATE.
- # For the created contract storage in `slot_counter` should be 1 as initcode executes only once
+ # For the created contract storage in `slot_counter` should be 1 as
+ # initcode executes only once
post = {
contract_address: Account(
storage={
@@ -814,7 +846,10 @@ def test_invalid_container_deployment(
pre: Alloc,
reason: str,
):
- """Verify contract is not deployed when an invalid container deployment is attempted."""
+ """
+ Verify contract is not deployed when an invalid container deployment is
+ attempted.
+ """
env = Environment()
sender = pre.fund_eoa()
diff --git a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_validates.py b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_validates.py
index 3dd9dc89652..1fca0845e6c 100644
--- a/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_validates.py
+++ b/tests/unscheduled/eip7692_eof_v1/eip7873_tx_create/test_txcreate_validates.py
@@ -88,8 +88,10 @@ def __str__(self) -> str:
class Factory(Enum):
"""
- Kinds of systems leading up to a call to TXCREATE. DIRECT just puts the TXCREATE in the
- code it generates, while *CALL ones call into another account which does the TXCREATE.
+ Kinds of systems leading up to a call to TXCREATE.
+
+ DIRECT just puts the TXCREATE in the code it generates, while *CALL ones
+ call into another account which does the TXCREATE.
"""
DIRECT = auto()
@@ -107,9 +109,10 @@ def creation_snippet(
input_size: int,
) -> Tuple[Bytecode, Address | None]:
"""
- Return snippet to cause TXCREATE to be called along with an address which
- will end up in the `compute_eofcreate_address`, or None if that would be the snippet
- itself.
+ Return snippet to cause TXCREATE to be called along with an address.
+
+ This will end up in the `compute_eofcreate_address` or None if that
+ would be the snippet itself.
"""
if evm_code_type not in [EVMCodeType.LEGACY, EVMCodeType.EOF_V1]:
raise Exception(f"Test needs to be updated for {evm_code_type}")
@@ -119,7 +122,8 @@ def creation_snippet(
)
# Snippet which returns the TXCREATE result to caller
callee_txcreate_code = Op.MSTORE(0, txcreate_code) + Op.RETURN(0, 32)
- # Snippet which recovers the TXCREATE result from returndata (wipes memory afterwards)
+ # Snippet which recovers the TXCREATE result from returndata (wipes
+ # memory afterwards)
returndataload_code = (
Op.RETURNDATALOAD
if evm_code_type == EVMCodeType.EOF_V1
@@ -175,8 +179,7 @@ def test_txcreate_validates(
access_list_a: bool,
):
"""
- Verifies proper validation of initcode on TXCREATE in various circumstances of the
- opcode.
+ Verifies proper validation of initcode on TXCREATE in various scenarios.
"""
env = Environment()
snippet_a, factory_address_a = factory_a.creation_snippet(
diff --git a/tests/unscheduled/eip7692_eof_v1/gas_test.py b/tests/unscheduled/eip7692_eof_v1/gas_test.py
index b2520dcbc8d..eaeeb1fd83c 100644
--- a/tests/unscheduled/eip7692_eof_v1/gas_test.py
+++ b/tests/unscheduled/eip7692_eof_v1/gas_test.py
@@ -43,9 +43,9 @@ def gas_test(
"""
Create State Test to check the gas cost of a sequence of EOF code.
- `setup_code` and `tear_down_code` are called multiple times during the test, and MUST NOT have
- any side-effects which persist across message calls, and in particular, any effects on the gas
- usage of `subject_code`.
+ `setup_code` and `tear_down_code` are called multiple times during the
+ test, and MUST NOT have any side-effects which persist across message
+ calls, and in particular, any effects on the gas usage of `subject_code`.
"""
if cold_gas <= 0:
raise ValueError(f"Target gas allocations (cold_gas) must be > 0, got {cold_gas}")
@@ -68,7 +68,8 @@ def gas_test(
balance=subject_balance,
address=subject_address,
)
- # 2 times GAS, POP, CALL, 6 times PUSH1 - instructions charged for at every gas run
+ # 2 times GAS, POP, CALL, 6 times PUSH1 - instructions charged for at every
+ # gas run
gas_single_gas_run = 2 * 2 + 2 + WARM_ACCOUNT_ACCESS_GAS + 6 * 3
address_legacy_harness = pre.deploy_contract(
code=(
@@ -109,11 +110,15 @@ def gas_test(
+ (Op.DUP2 + Op.SWAP1 + Op.SUB + Op.PUSH2(slot_cold_gas) + Op.SSTORE)
+ (
(
- # do an oog gas run, unless skipped with `out_of_gas_testing=False`:
- # - DUP7 is the gas of the baseline gas run, after other CALL args were pushed
+ # do an oog gas run, unless skipped with
+ # `out_of_gas_testing=False`:
+ #
+ # - DUP7 is the gas of the baseline gas run, after other
+ # CALL args were pushed
# - subtract the gas charged by the harness
# - add warm gas charged by the subject
- # - subtract `oog_difference` to cause OOG exception (1 by default)
+ # - subtract `oog_difference` to cause OOG exception
+ # (1 by default)
Op.SSTORE(
slot_oog_call_result,
Op.CALL(
@@ -121,7 +126,8 @@ def gas_test(
address=address_subject,
),
)
- # sanity gas run: not subtracting 1 to see if enough gas makes the call succeed
+ # sanity gas run: not subtracting 1 to see if enough gas
+ # makes the call succeed
+ Op.SSTORE(
slot_sanity_call_result,
Op.CALL(
@@ -135,7 +141,8 @@ def gas_test(
else Op.STOP
)
),
- evm_code_type=EVMCodeType.LEGACY, # Needs to be legacy to use GAS opcode
+ evm_code_type=EVMCodeType.LEGACY, # Needs to be legacy to use GAS
+ # opcode
)
post = {
diff --git a/tox.ini b/tox.ini
index 43b46960077..c8fb2670d31 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,6 +23,9 @@ extras = lint
commands =
ruff check --no-fix --show-fixes {[testenv]python_source_dirs}
ruff format --check {[testenv]python_source_dirs}
+ # The following ensures docstring and comment line length is checked to 79 characters
+ # see https://github.com/ethereum/execution-spec-tests/issues/2134
+ ruff check --select W505 --config 'lint.pycodestyle.max-doc-length = 79' {[testenv]python_source_dirs}
[testenv:typecheck]
description = Run type checking (mypy)