Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions .github/actions/get-changed-files/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
name: Get Changed Files
description: Get list of changed files between base and head commits

outputs:
file_list:
description: Path to the file containing the list of changed files
value: ${{ steps.get-changed-files.outputs.file_list }}

runs:
using: composite
steps:
- name: Get changed files and save to disk
id: get-changed-files
shell: bash
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
BASE_SHA=$(git merge-base "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")
HEAD_SHA="${{ github.event.pull_request.head.sha }}"
else
# On push or force push to the feature branch
BASE_SHA=$(git merge-base "${{ github.event.before }}" "${{ github.sha }}")
HEAD_SHA="${{ github.sha }}"
fi

echo "Diffing commits: $BASE_SHA..$HEAD_SHA"

# Get changed files and save to disk (use absolute path for reliability)
FILE_LIST="${GITHUB_WORKSPACE}/changed_files.txt"
git diff --name-only "$BASE_SHA" "$HEAD_SHA" > "$FILE_LIST"
echo "Changed files saved to $FILE_LIST"
echo "file_list=$FILE_LIST" >> $GITHUB_OUTPUT
echo "List of files changed in the PR"
cat $FILE_LIST
26 changes: 5 additions & 21 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,27 +94,8 @@ jobs:
with:
python-version: "3.11"
- uses: ./.github/actions/setup-env
- name: Get changed files and save to disk
- uses: ./.github/actions/get-changed-files
id: get-changed-files
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
BASE_SHA=$(git merge-base "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")
HEAD_SHA="${{ github.event.pull_request.head.sha }}"
else
# On push or force push to the feature branch
BASE_SHA=$(git merge-base "${{ github.event.before }}" "${{ github.sha }}")
HEAD_SHA="${{ github.sha }}"
fi

echo "Diffing commits: $BASE_SHA..$HEAD_SHA"

# Get changed files and save to disk
FILE_LIST="changed_files.txt"
git diff --name-only "$BASE_SHA" "$HEAD_SHA" > "$FILE_LIST"
echo "Changed files saved to $FILE_LIST"
echo "file_list=$FILE_LIST" >> $GITHUB_OUTPUT
echo "List of files changed in the PR"
cat $FILE_LIST
- name: Run json infra tests
run: tox -e json_infra -- --file-list="${{ steps.get-changed-files.outputs.file_list }}"
- name: Upload coverage reports to Codecov
Expand All @@ -131,13 +112,16 @@ jobs:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955
with:
submodules: recursive
fetch-depth: 0 # Fetch full history for commit comparison
- name: Setup Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
with:
python-version: "3.11"
- uses: ./.github/actions/setup-env
- uses: ./.github/actions/get-changed-files
id: get-changed-files
- name: Run optimized tests
run: tox -e optimized
run: tox -e optimized -- --file-list="${{ steps.get-changed-files.outputs.file_list }}"

tests_pytest_py3:
runs-on: [self-hosted-ghr, size-xl-x64]
Expand Down
5 changes: 4 additions & 1 deletion tests/json_infra/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def pytest_configure(config: Config) -> None:
ethereum.trace.set_evm_trace(Eip3155Tracer())

# Process fork range options
optimized = config.getoption("optimized")
desired_fork = config.getoption("single_fork", "")
forks_from = config.getoption("forks_from", "")
forks_until = config.getoption("forks_until", "")
Expand Down Expand Up @@ -162,7 +163,9 @@ def pytest_configure(config: Config) -> None:
# Extract the fork range
desired_forks = all_forks[start_idx:end_idx]
elif file_list:
desired_forks = extract_affected_forks(config.rootpath, file_list)
desired_forks = extract_affected_forks(
config.rootpath, file_list, optimized
)
else:
desired_forks = all_forks

Expand Down
2 changes: 2 additions & 0 deletions tests/json_infra/helpers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
from .fixtures import ALL_FIXTURE_TYPES, Fixture, FixturesFile, FixtureTestItem
from .load_blockchain_tests import BlockchainTestFixture
from .load_state_tests import StateTestFixture
from .load_vm_tests import VmTestFixture

ALL_FIXTURE_TYPES.append(BlockchainTestFixture)
ALL_FIXTURE_TYPES.append(StateTestFixture)
ALL_FIXTURE_TYPES.append(VmTestFixture)

__all__ = ["ALL_FIXTURE_TYPES", "Fixture", "FixturesFile", "FixtureTestItem"]
21 changes: 21 additions & 0 deletions tests/json_infra/helpers/exceptional_test_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,3 +140,24 @@ def exceptional_state_test_patterns(
expected_fail=tuple(),
big_memory=tuple(),
)


def exceptional_vm_test_patterns(
json_fork: str, eels_fork: str
) -> TestPatterns:
"""
Returns patterns for slow, ignored, and big-memory VM tests for a
given json_fork and eels_fork.
"""
del json_fork, eels_fork # Not used for VM tests currently

slow_tests = (
# vmPerformance tests are slow
"vmPerformance/",
)

return TestPatterns(
slow=tuple(re.compile(p) for p in slow_tests),
expected_fail=tuple(),
big_memory=tuple(),
)
184 changes: 164 additions & 20 deletions tests/json_infra/helpers/load_vm_tests.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
"""Helper class to load and run VM tests."""

import json
import os
from importlib import import_module
from typing import Any, List
from pathlib import Path
from typing import Any, Dict, Generator, List, Tuple

import pytest
from _pytest.config import Config
from _pytest.nodes import Item
from ethereum_rlp import rlp
from ethereum_types.numeric import U64, U256, Uint
from pytest import Collector

from ethereum.crypto.hash import keccak256
from ethereum.utils.hexadecimal import (
Expand All @@ -16,6 +19,27 @@
hex_to_uint,
)

from ..hardfork import TestHardfork
from ..stash_keys import desired_forks_key
from .exceptional_test_patterns import exceptional_vm_test_patterns
from .fixtures import Fixture, FixturesFile, FixtureTestItem


def _get_vm_forks() -> List[TestHardfork]:
"""
Get the list of forks for which VM tests should run.

VM tests are only run for legacy forks up to Constantinople.
"""
all_forks = list(TestHardfork.discover())
constantinople = next(
f for f in all_forks if f.short_name == "constantinople"
)
return [f for f in all_forks if f.criteria <= constantinople.criteria]


VM_FORKS: List[TestHardfork] = _get_vm_forks()


class VmTestLoader:
"""
Expand Down Expand Up @@ -61,13 +85,11 @@ def __init__(self, network: str, fork_name: str):
def _module(self, name: str) -> Any:
return import_module(f"ethereum.forks.{self.fork_name}.{name}")

def run_test(
self, test_dir: str, test_file: str, check_gas_left: bool = True
) -> None:
def run_test_from_dict(self, json_data: Dict[str, Any]) -> None:
"""
Execute a test case and check its post state.
Execute a test case from parsed JSON data and check its post state.
"""
test_data = self.load_test(test_dir, test_file)
test_data = self.prepare_test_data(json_data)
block_env = test_data["block_env"]
tx_env = test_data["tx_env"]
tx = test_data["tx"]
Expand All @@ -81,8 +103,6 @@ def run_test(
output = self.process_message_call(message)

if test_data["has_post_state"]:
if check_gas_left:
assert output.gas_left == test_data["expected_gas_left"]
assert (
keccak256(rlp.encode(output.logs))
== test_data["expected_logs_hash"]
Expand All @@ -101,15 +121,10 @@ def run_test(
self.close_state(block_env.state)
self.close_state(test_data["expected_post_state"])

def load_test(self, test_dir: str, test_file: str) -> Any:
def prepare_test_data(self, json_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Read tests from a file.
Prepare test data from parsed JSON.
"""
test_name = os.path.splitext(test_file)[0]
path = os.path.join(test_dir, test_file)
with open(path, "r") as fp:
json_data = json.load(fp)[test_name]

block_env = self.json_to_block_env(json_data)

tx = self.Transaction(
Expand Down Expand Up @@ -156,9 +171,8 @@ def json_to_block_env(self, json_data: Any) -> Any:
# Hence creating a dummy caller state.
if caller_hex_address not in json_data["pre"]:
value = json_data["exec"]["value"]
json_data["pre"][caller_hex_address] = (
self.get_dummy_account_state(value)
)
dummy_state = self.get_dummy_account_state(value)
json_data["pre"][caller_hex_address] = dummy_state

current_state = self.json_to_state(json_data["pre"])

Expand Down Expand Up @@ -226,3 +240,133 @@ def get_dummy_account_state(self, min_balance: str) -> Any:
"nonce": "0x00",
"storage": {},
}


class VmTest(FixtureTestItem):
"""Single VM test case item for a specific fork."""

fork_name: str
eels_fork: str

def __init__(
self,
*args: Any,
fork_name: str,
eels_fork: str,
**kwargs: Any,
) -> None:
"""Initialize a single VM test case item."""
super().__init__(*args, **kwargs)
self.fork_name = fork_name
self.eels_fork = eels_fork
self.add_marker(pytest.mark.fork(self.fork_name))
self.add_marker("vm_test")

# Mark tests with exceptional markers
test_patterns = exceptional_vm_test_patterns(fork_name, eels_fork)
if any(x.search(self.nodeid) for x in test_patterns.slow):
self.add_marker("slow")

@property
def vm_test_fixture(self) -> "VmTestFixture":
"""Return the VM test fixture this test belongs to."""
parent = self.parent
assert parent is not None
assert isinstance(parent, VmTestFixture)
return parent

@property
def test_key(self) -> str:
"""Return the key of the VM test fixture in the fixture file."""
return self.vm_test_fixture.test_key

@property
def fixtures_file(self) -> FixturesFile:
"""Fixtures file from which the test fixture was collected."""
return self.vm_test_fixture.fixtures_file

@property
def test_dict(self) -> Dict[str, Any]:
"""Load test from disk."""
loaded_file = self.fixtures_file.data
return loaded_file[self.test_key]

def runtest(self) -> None:
"""Run a VM test from JSON test case data."""
loader = VmTestLoader(self.fork_name, self.eels_fork)
loader.run_test_from_dict(self.test_dict)

def reportinfo(self) -> Tuple[Path, int, str]:
"""Return information for test reporting."""
return self.path, 1, self.name


class VmTestFixture(Fixture, Collector):
"""
VM test fixture from a JSON file that yields test items for each
supported fork.
"""

@classmethod
def is_format(cls, test_dict: Dict[str, Any]) -> bool:
"""Return true if the object can be parsed as a VM test fixture."""
# VM tests have exec, env, and pre keys
if "exec" not in test_dict:
return False
if "env" not in test_dict:
return False
if "pre" not in test_dict:
return False
if "logs" not in test_dict:
return False
# Make sure it's not a state test (which has "transaction" and "post")
if "transaction" in test_dict:
return False
return True

@property
def fixtures_file(self) -> FixturesFile:
"""Fixtures file from which the test fixture was collected."""
parent = self.parent
assert parent is not None
assert isinstance(parent, FixturesFile)
return parent

@property
def test_dict(self) -> Dict[str, Any]:
"""Load test from disk."""
loaded_file = self.fixtures_file.data
return loaded_file[self.test_key]

def collect(self) -> Generator[Item | Collector, None, None]:
"""Collect VM test cases for each supported fork."""
desired_forks: List[str] = self.config.stash.get(desired_forks_key, [])

for fork in VM_FORKS:
if fork.json_test_name not in desired_forks:
continue
yield VmTest.from_parent(
parent=self,
name=fork.json_test_name,
fork_name=fork.json_test_name,
eels_fork=fork.short_name,
)

@classmethod
def has_desired_fork(
cls,
test_dict: Dict[str, Any], # noqa: ARG003
config: Config,
) -> bool:
"""
Check if any of the VM test forks are in the desired forks list.
"""
desired_forks = config.stash.get(desired_forks_key, None)
if desired_forks is None:
return True

# Check if any VM fork is in the desired forks
for fork in VM_FORKS:
if fork.json_test_name in desired_forks:
return True
return False
Loading
Loading