diff --git a/LICENSE b/LICENSE index e402eaa8bf..210c8d5969 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright 2017 Piper Merriam +Copyright 2017-2018 Ethereum Foundation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/docs/api/evm/api.db.rst b/docs/api/evm/api.db.rst new file mode 100644 index 0000000000..b2dd21d667 --- /dev/null +++ b/docs/api/evm/api.db.rst @@ -0,0 +1,13 @@ +DataBase +======== + + +.. toctree:: + :maxdepth: 4 + :name: toc-evm-api-db + :caption: Database + + db/api.db.backends + db/api.db.account + db/api.db.journal + db/api.db.chain \ No newline at end of file diff --git a/docs/api/evm/db/api.db.account.rst b/docs/api/evm/db/api.db.account.rst new file mode 100644 index 0000000000..f3199b3fce --- /dev/null +++ b/docs/api/evm/db/api.db.account.rst @@ -0,0 +1,14 @@ +Account +======== + +BaseAccountDB +------------- + +.. autoclass:: evm.db.account.BaseAccountDB + :members: + +AccountDB +------------- + +.. autoclass:: evm.db.account.AccountDB + :members: diff --git a/docs/api/evm/db/api.db.backends.rst b/docs/api/evm/db/api.db.backends.rst new file mode 100644 index 0000000000..c65fc4bbdc --- /dev/null +++ b/docs/api/evm/db/api.db.backends.rst @@ -0,0 +1,20 @@ +Backends +======== + +BaseDB +------ + +.. autoclass:: evm.db.backends.base.BaseDB + :members: + +LevelDB +------- + +.. autoclass:: evm.db.backends.level.LevelDB + :members: + +MemoryDB +-------- + +.. autoclass:: evm.db.backends.memory.MemoryDB + :members: \ No newline at end of file diff --git a/docs/api/evm/db/api.db.chain.rst b/docs/api/evm/db/api.db.chain.rst new file mode 100644 index 0000000000..96a2f71786 --- /dev/null +++ b/docs/api/evm/db/api.db.chain.rst @@ -0,0 +1,20 @@ +Chain +===== + +BaseChainDB +~~~~~~~~~~~ + +.. autoclass:: evm.db.chain.BaseChainDB + :members: + +ChainDB +~~~~~~~ + +.. autoclass:: evm.db.chain.ChainDB + :members: + +AsyncChainDB +------------ + +.. autoclass:: evm.db.chain.AsyncChainDB + :members: \ No newline at end of file diff --git a/docs/api/evm/db/api.db.journal.rst b/docs/api/evm/db/api.db.journal.rst new file mode 100644 index 0000000000..3004cddcae --- /dev/null +++ b/docs/api/evm/db/api.db.journal.rst @@ -0,0 +1,8 @@ +Journal +======= + +JournalDB +--------- + +.. autoclass:: evm.db.journal.JournalDB + :members: \ No newline at end of file diff --git a/docs/api/evm/index.rst b/docs/api/evm/index.rst index 455dbe0f0e..aee7d17371 100644 --- a/docs/api/evm/index.rst +++ b/docs/api/evm/index.rst @@ -10,5 +10,6 @@ This section aims to provide a detailed description of all APIs. If you are look api.chain api.computation + api.db api.state api.vm diff --git a/docs/conf.py b/docs/conf.py index 038e713901..fb90c0bbd3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = about['__name__'] -copyright = '2017, Piper Merriam, Jason Carver' +copyright = '2017-2018 Ethereum Foundation' author = about['__author__'] # The version info for the project you're documenting, acts as replacement for diff --git a/docs/guides/trinity/quickstart.rst b/docs/guides/trinity/quickstart.rst index 6d52660a62..155252dce4 100644 --- a/docs/guides/trinity/quickstart.rst +++ b/docs/guides/trinity/quickstart.rst @@ -199,43 +199,3 @@ For a list of JSON-RPC endpoints which are expected to work, see this issue: htt - Only a subset of JSON-RPC API calls are currently supported -Release Notes -~~~~~~~~~~~~~ - - -0.1.0-alpha.11 --------------- - -- Bugfix for ``PreferredNodePeerPool`` to respect ``max_peers`` - - -0.1.0-alpha.10 --------------- - -- More bugfixes to enforce ``--max-peers`` in ``PeerPool._connect_to_nodes`` - - -0.1.0-alpha.9 -------------- - -- Bugfix to enforce ``--max-peers`` for incoming connections. - - -0.1.0-alpha.7 -------------- - -- Remove ``min_peers`` concept from ``PeerPool`` -- Add ``--max-peers`` and enforcement of maximum peer connections maintained by - the ``PeerPool``. - - -0.1.0-alpha.6 -------------- - -- Respond to ``GetBlockHeaders`` message during fast sync to prevent being disconnected as a *useless peer*. -- Add ``--profile`` CLI flag to Trinity to enable profiling via ``cProfile`` -- Better error messaging with Trinity cannot determine the appropriate location for the data directory. -- Handle ``ListDeserializationError`` during handshake. -- Add ``net_version`` JSON-RPC endpoint. -- Add ``web3_clientVersion`` JSON-RPC endpoint. -- Handle ``rlp.DecodingError`` during handshake. diff --git a/docs/index.rst b/docs/index.rst index d0f28dd9d6..bb9a182469 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,6 +10,7 @@ Table of contents introduction quickstart + release_notes/index .. toctree:: :maxdepth: 1 diff --git a/docs/release_notes/index.rst b/docs/release_notes/index.rst new file mode 100644 index 0000000000..21be80f2b8 --- /dev/null +++ b/docs/release_notes/index.rst @@ -0,0 +1,10 @@ +Release notes +============= + +Trinity and Py-EVM are moving fast. Learn about the latest improvements in the release notes. + +.. toctree:: + :maxdepth: 2 + :name: toc-release-notes + + trinity.rst diff --git a/docs/release_notes/trinity.rst b/docs/release_notes/trinity.rst new file mode 100644 index 0000000000..351216d243 --- /dev/null +++ b/docs/release_notes/trinity.rst @@ -0,0 +1,40 @@ +Trinity +======= + + +0.1.0-alpha.11 +-------------- + +- Bugfix for ``PreferredNodePeerPool`` to respect ``max_peers`` + + +0.1.0-alpha.10 +-------------- + +- More bugfixes to enforce ``--max-peers`` in ``PeerPool._connect_to_nodes`` + + +0.1.0-alpha.9 +------------- + +- Bugfix to enforce ``--max-peers`` for incoming connections. + + +0.1.0-alpha.7 +------------- + +- Remove ``min_peers`` concept from ``PeerPool`` +- Add ``--max-peers`` and enforcement of maximum peer connections maintained by + the ``PeerPool``. + + +0.1.0-alpha.6 +------------- + +- Respond to ``GetBlockHeaders`` message during fast sync to prevent being disconnected as a *useless peer*. +- Add ``--profile`` CLI flag to Trinity to enable profiling via ``cProfile`` +- Better error messaging with Trinity cannot determine the appropriate location for the data directory. +- Handle ``ListDeserializationError`` during handshake. +- Add ``net_version`` JSON-RPC endpoint. +- Add ``web3_clientVersion`` JSON-RPC endpoint. +- Handle ``rlp.DecodingError`` during handshake. diff --git a/evm/utils/blobs.py b/evm/utils/blobs.py index f1470c785f..98d8dbd5f1 100644 --- a/evm/utils/blobs.py +++ b/evm/utils/blobs.py @@ -1,5 +1,3 @@ -import itertools -import math from io import ( BytesIO, ) @@ -14,11 +12,12 @@ int_to_big_endian, ) -from eth_hash.auto import keccak - from evm.utils.padding import ( zpad_right, ) +from evm.utils.merkle import ( + calc_merkle_root, +) from evm.constants import ( CHUNK_SIZE, @@ -31,11 +30,6 @@ ValidationError, ) -from cytoolz import ( - partition, - pipe, -) - from typing import ( cast, ) @@ -50,31 +44,9 @@ def iterate_chunks(collation_body: bytes) -> Iterator[Hash32]: yield cast(Hash32, collation_body[chunk_start:chunk_start + CHUNK_SIZE]) -def hash_layer(layer: Iterable[Hash32]) -> Iterator[Hash32]: - for left, right in partition(2, layer): - yield keccak(left + right) - - -def calc_merkle_root(leaves: Iterable[Hash32]) -> Hash32: - leaves = list(leaves) # convert potential iterator to list - if len(leaves) == 0: - raise ValidationError("No leaves given") - - n_layers = math.log2(len(leaves)) - if not n_layers.is_integer(): - raise ValidationError("Leave number is not a power of two") - n_layers = int(n_layers) - - first_layer = (keccak(leaf) for leaf in leaves) - - root, *extras = pipe(first_layer, *itertools.repeat(hash_layer, n_layers)) - assert not extras, "Invariant: should only be a single value" - return root - - def calc_chunk_root(collation_body: bytes) -> Hash32: check_body_size(collation_body) - chunks = iterate_chunks(collation_body) + chunks = list(iterate_chunks(collation_body)) return calc_merkle_root(chunks) diff --git a/evm/utils/merkle.py b/evm/utils/merkle.py new file mode 100644 index 0000000000..2bf3b87fa6 --- /dev/null +++ b/evm/utils/merkle.py @@ -0,0 +1,115 @@ +"""Utilities for binary merkle trees. + +Merkle trees are represented as sequences of layers, from root to leaves. The root layer contains +only a single element, the leaves as many as there are data items in the tree. The data itself is +not considered to be part of the tree. +""" + +import math +from typing import ( + cast, + Hashable, + NewType, + Sequence, +) + +from cytoolz import ( + identity, + iterate, + partition, + reduce, + take, +) +from eth_hash.auto import ( + keccak, +) +from eth_typing import ( + Hash32, +) + +from evm.exceptions import ( + ValidationError, +) + + +MerkleTree = NewType("MerkleTree", Sequence[Sequence[Hash32]]) +MerkleProof = NewType("MerkleProof", Sequence[Hash32]) + + +def get_root(tree: MerkleTree) -> Hash32: + """Get the root hash of a Merkle tree.""" + return tree[0][0] + + +def get_branch_indices(node_index: int, depth: int) -> Sequence[int]: + """Get the indices of all ancestors up until the root for a node with a given depth.""" + return tuple(take(depth, iterate(lambda index: index // 2, node_index))) + + +def get_merkle_proof(tree: MerkleTree, item_index: int) -> Sequence[Hash32]: + """Read off the Merkle proof for an item from a Merkle tree.""" + if item_index < 0 or item_index >= len(tree[-1]): + raise ValidationError("Item index out of range") + + # special case of tree consisting of only root + if len(tree) == 1: + return () + + branch_indices = get_branch_indices(item_index, len(tree)) + proof_indices = [i ^ 1 for i in branch_indices][:-1] # get sibling by flipping rightmost bit + return tuple( + layer[proof_index] + for layer, proof_index + in zip(reversed(tree), proof_indices) + ) + + +def _calc_parent_hash(left_node: Hash32, right_node: Hash32) -> Hash32: + """Calculate the parent hash of a node and its sibling.""" + return keccak(left_node + right_node) + + +def verify_merkle_proof(root: Hash32, + item: Hashable, + item_index: int, + proof: MerkleProof) -> bool: + """Verify a Merkle proof against a root hash.""" + leaf = keccak(item) + branch_indices = get_branch_indices(item_index, len(proof)) + node_orderers = [ + identity if branch_index % 2 == 0 else reversed + for branch_index in branch_indices + ] + proof_root = reduce( + lambda n1, n2_and_order: _calc_parent_hash(*n2_and_order[1]([n1, n2_and_order[0]])), + zip(proof, node_orderers), + leaf, + ) + return proof_root == root + + +def _hash_layer(layer: Sequence[Hash32]) -> Sequence[Hash32]: + """Calculate the layer on top of another one.""" + return tuple(_calc_parent_hash(left, right) for left, right in partition(2, layer)) + + +def calc_merkle_tree(items: Sequence[Hashable]) -> MerkleTree: + """Calculate the Merkle tree corresponding to a list of items.""" + if len(items) == 0: + raise ValidationError("No items given") + n_layers = math.log2(len(items)) + 1 + if not n_layers.is_integer(): + raise ValidationError("Item number is not a power of two") + n_layers = int(n_layers) + + leaves = tuple(keccak(item) for item in items) + tree = cast(MerkleTree, tuple(take(n_layers, iterate(_hash_layer, leaves)))[::-1]) + if len(tree[0]) != 1: + raise Exception("Invariant: There must only be one root") + + return tree + + +def calc_merkle_root(items: Sequence[Hashable]) -> Hash32: + """Calculate the Merkle root corresponding to a list of items.""" + return get_root(calc_merkle_tree(items)) diff --git a/p2p/exceptions.py b/p2p/exceptions.py index 505172f93b..0a91c35082 100644 --- a/p2p/exceptions.py +++ b/p2p/exceptions.py @@ -135,3 +135,12 @@ class BadLESResponse(BaseP2PError): Raised when the response to a LES request doesn't contain the data we asked for. """ pass + + +class NoInternalAddressMatchesDevice(BaseP2PError): + """ + Raised when no internal IP address matches the UPnP device that is being configured. + """ + def __init__(self, *args, device_hostname=None, **kwargs): + super.__init__(*args, **kwargs) + self.device_hostname = device_hostname diff --git a/p2p/nat.py b/p2p/nat.py new file mode 100644 index 0000000000..b25c7b42fe --- /dev/null +++ b/p2p/nat.py @@ -0,0 +1,174 @@ +import asyncio +from concurrent.futures import ( + ThreadPoolExecutor, +) +import ipaddress +from typing import ( + AsyncGenerator, + NamedTuple, +) +from urllib.parse import urlparse + +from p2p.cancel_token import ( + CancelToken, +) +from p2p.exceptions import ( + NoInternalAddressMatchesDevice, + OperationCancelled, +) +import netifaces +from p2p.service import BaseService +import upnpclient + + +# UPnP discovery can take a long time, so use a loooong timeout here. +UPNP_DISCOVER_TIMEOUT_SECONDS = 30 + + +class PortMapping(NamedTuple): + internal: str # of the form "192.2.3.4:56" + external: str # of the form "192.2.3.4:56" + + +def find_internal_ip_on_device_network(upnp_dev: upnpclient.upnp.Device) -> str: + """ + For a given UPnP device, return the internal IP address of this host machine that can + be used for a NAT mapping. + """ + parsed_url = urlparse(upnp_dev.location) + # Get an ipaddress.IPv4Network instance for the upnp device's network. + upnp_dev_net = ipaddress.ip_network(parsed_url.hostname + '/24', strict=False) + for iface in netifaces.interfaces(): + for family, addresses in netifaces.ifaddresses(iface).items(): + # TODO: Support IPv6 addresses as well. + if family != netifaces.AF_INET: + continue + for item in addresses: + if ipaddress.ip_address(item['addr']) in upnp_dev_net: + return item['addr'] + raise NoInternalAddressMatchesDevice(device_hostname=parsed_url.hostname) + + +class UPnPService(BaseService): + """ + Generate a mapping of external network IP address/port to internal IP address/port, + using the Universal Plug 'n' Play standard. + """ + + _nat_portmap_lifetime = 30 * 60 + + def __init__(self, port: int, token: CancelToken = None) -> None: + """ + :param port: The port that a server wants to bind to on this machine, and + make publicly accessible. + """ + super().__init__(token) + self.port = port + self._mapping: PortMapping = None # when called externally, this never returns None + + async def _run(self): + """Run an infinite loop refreshing our NAT port mapping. + + On every iteration we configure the port mapping with a lifetime of 30 minutes and then + sleep for that long as well. + """ + while self.is_running: + try: + # Wait for the port mapping lifetime, and then try registering it again + await self.wait(asyncio.sleep(self._nat_portmap_lifetime)) + await self.add_nat_portmap() + except OperationCancelled: + break + except Exception: + self.logger.exception("Failed to setup NAT portmap") + + async def _cleanup(self): + pass + + async def add_nat_portmap(self): + """ + Set up the port mapping + + :return: the IP address of the new mapping (or None if failed) + """ + self.logger.info("Setting up NAT portmap...") + try: + async for upnp_dev in self._discover_upnp_devices(): + try: + external_ip = await self._add_nat_portmap(upnp_dev) + except NoInternalAddressMatchesDevice as exc: + self.logger.info( + "No internal addresses were managed by the UPnP device at %s", + exc.device_hostname, + ) + continue + else: + return external_ip + except upnpclient.soap.SOAPError as e: + if e.args == (718, 'ConflictInMappingEntry'): + # An entry already exists with the parameters we specified. Maybe the router + # didn't clean it up after it expired or it has been configured by other piece + # of software, either way we should not override it. + # https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors + self.logger.info("NAT port mapping already configured, not overriding it") + else: + self.logger.exception("Failed to setup NAT portmap") + + self._mapping = None + + def current_mapping(self) -> PortMapping: + if self._mapping is None: + unbound = ':%d' % self.port + return PortMapping(unbound, unbound) + else: + return self._mapping + + async def _add_nat_portmap(self, upnp_dev: upnpclient.upnp.Device) -> str: + # Detect our internal IP address (which raises if there are no matches) + internal_ip = find_internal_ip_on_device_network(upnp_dev) + + external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress'] + for protocol, description in [('TCP', 'ethereum p2p'), ('UDP', 'ethereum discovery')]: + upnp_dev.WANIPConn1.AddPortMapping( + NewRemoteHost=external_ip, + NewExternalPort=self.port, + NewProtocol=protocol, + NewInternalPort=self.port, + NewInternalClient=internal_ip, + NewEnabled='1', + NewPortMappingDescription=description, + NewLeaseDuration=self._nat_portmap_lifetime, + ) + self._mapping = PortMapping( + '%s:%d' % (internal_ip, self.port), + '%s:%d' % (external_ip, self.port), + ) + self.logger.info("NAT port forwarding successfully set up: %r", self._mapping) + return external_ip + + async def _discover_upnp_devices(self) -> AsyncGenerator[upnpclient.upnp.Device, None]: + loop = asyncio.get_event_loop() + # Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a + # while to complete. We must use a ThreadPoolExecutor() because the + # response from upnpclient.discover() can't be pickled. + try: + devices = await self.wait( + loop.run_in_executor(ThreadPoolExecutor(max_workers=1), upnpclient.discover), + timeout=UPNP_DISCOVER_TIMEOUT_SECONDS, + ) + except TimeoutError: + self.logger.info("Timeout waiting for UPNP-enabled devices") + return + + # If there are no UPNP devices we can exit early + if not devices: + self.logger.info("No UPNP-enabled devices found") + return + + # Now we loop over all of the devices until we find one that we can use. + for device in devices: + try: + device.WANIPConn1 + except AttributeError: + continue + yield device diff --git a/p2p/server.py b/p2p/server.py index f6f0edd24a..1ca1753c34 100644 --- a/p2p/server.py +++ b/p2p/server.py @@ -1,5 +1,4 @@ import asyncio -import ipaddress import logging import secrets import socket @@ -9,10 +8,6 @@ Type, TYPE_CHECKING, ) -from urllib.parse import urlparse - -import netifaces -import upnpclient from eth_keys import datatypes @@ -46,6 +41,7 @@ Address, Node, ) +from p2p.nat import UPnPService from p2p.p2p_proto import ( DisconnectReason, ) @@ -67,7 +63,6 @@ class Server(BaseService): logger = logging.getLogger("p2p.server.Server") _tcp_listener = None _udp_listener = None - _nat_portmap_lifetime = 30 * 60 peer_pool: PeerPool = None @@ -97,110 +92,11 @@ def __init__(self, self.peer_pool_class = peer_pool_class self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes + self.upnp_service = UPnPService(port, token=self.cancel_token) if not bootstrap_nodes: self.logger.warn("Running with no bootstrap nodes") - async def refresh_nat_portmap(self) -> None: - """Run an infinite loop refreshing our NAT port mapping. - - On every iteration we configure the port mapping with a lifetime of 30 minutes and then - sleep for that long as well. - """ - while self.is_running: - try: - # We start with a sleep because our _run() method will setup the initial portmap. - await self.wait(asyncio.sleep(self._nat_portmap_lifetime)) - await self.add_nat_portmap() - except OperationCancelled: - break - - async def add_nat_portmap(self) -> None: - self.logger.info("Setting up NAT portmap...") - # This is experimental and it's OK if it fails, hence the bare except. - try: - upnp_dev = await self._discover_upnp_device() - if upnp_dev is None: - return - await self._add_nat_portmap(upnp_dev) - except upnpclient.soap.SOAPError as e: - if e.args == (718, 'ConflictInMappingEntry'): - # An entry already exists with the parameters we specified. Maybe the router - # didn't clean it up after it expired or it has been configured by other piece - # of software, either way we should not override it. - # https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors - self.logger.info("NAT port mapping already configured, not overriding it") - else: - self.logger.exception("Failed to setup NAT portmap") - except Exception: - self.logger.exception("Failed to setup NAT portmap") - - def _find_internal_ip_on_device_network(self, upnp_dev: upnpclient.upnp.Device) -> str: - parsed_url = urlparse(upnp_dev.location) - # Get an ipaddress.IPv4Network instance for the upnp device's network. - upnp_dev_net = ipaddress.ip_network(parsed_url.hostname + '/24', strict=False) - for iface in netifaces.interfaces(): - for family, addresses in netifaces.ifaddresses(iface).items(): - # TODO: Support IPv6 addresses as well. - if family != netifaces.AF_INET: - continue - for item in addresses: - if ipaddress.ip_address(item['addr']) in upnp_dev_net: - return item['addr'] - return None - - async def _add_nat_portmap(self, upnp_dev: upnpclient.upnp.Device) -> None: - # Detect our internal IP address (or abort if we can't determine - # the internal IP address - internal_ip = self._find_internal_ip_on_device_network(upnp_dev) - if internal_ip is None: - self.logger.warn( - "Unable to detect internal IP address in order to setup NAT portmap" - ) - return - - external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress'] - for protocol, description in [('TCP', 'ethereum p2p'), ('UDP', 'ethereum discovery')]: - upnp_dev.WANIPConn1.AddPortMapping( - NewRemoteHost=external_ip, - NewExternalPort=self.port, - NewProtocol=protocol, - NewInternalPort=self.port, - NewInternalClient=internal_ip, - NewEnabled='1', - NewPortMappingDescription=description, - NewLeaseDuration=self._nat_portmap_lifetime, - ) - self.logger.info("NAT port forwarding successfully setup") - - async def _discover_upnp_device(self) -> upnpclient.upnp.Device: - loop = asyncio.get_event_loop() - # UPnP discovery can take a long time, so use a loooong timeout here. - discover_timeout = 10 * REPLY_TIMEOUT - # Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a - # while to complete. - try: - devices = await self.wait( - loop.run_in_executor(None, upnpclient.discover), - timeout=discover_timeout) - except TimeoutError: - self.logger.info("Timeout waiting for UPNP-enabled devices") - return None - - # If there are no UPNP devices we can exit early - if not devices: - self.logger.info("No UPNP-enabled devices found") - return None - - # Now we loop over all of the devices until we find one that we can use. - for device in devices: - try: - device.WANIPConn1 - except AttributeError: - continue - return device - return None - async def _start_tcp_listener(self) -> None: # TODO: Support IPv6 addresses as well. self._tcp_listener = await asyncio.start_server( @@ -246,11 +142,11 @@ def _make_peer_pool(self, discovery: DiscoveryProtocol) -> PeerPool: async def _run(self) -> None: self.logger.info("Running server...") - upnp_dev = await self._discover_upnp_device() - external_ip = '0.0.0.0' - if upnp_dev is not None: - external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress'] - await self._add_nat_portmap(upnp_dev) + mapped_external_ip = await self.upnp_service.add_nat_portmap() + if mapped_external_ip is None: + external_ip = '0.0.0.0' + else: + external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", @@ -264,15 +160,18 @@ async def _run(self) -> None: self.discovery = DiscoveryProtocol(self.privkey, addr, bootstrap_nodes=self.bootstrap_nodes) await self._start_udp_listener(self.discovery) self.peer_pool = self._make_peer_pool(self.discovery) - asyncio.ensure_future(self.refresh_nat_portmap()) asyncio.ensure_future(self.discovery.bootstrap()) asyncio.ensure_future(self.peer_pool.run()) + asyncio.ensure_future(self.upnp_service.run()) self.syncer = self._make_syncer(self.peer_pool) await self.syncer.run() async def _cleanup(self) -> None: self.logger.info("Closing server...") - await asyncio.gather(self.peer_pool.cancel(), self.discovery.stop()) + await asyncio.gather( + self.peer_pool.cancel(), + self.discovery.stop(), + ) await self._close() async def receive_handshake( diff --git a/p2p/service.py b/p2p/service.py index 04fed7d2c4..822255f478 100644 --- a/p2p/service.py +++ b/p2p/service.py @@ -89,10 +89,10 @@ async def cleanup(self) -> None: async def cancel(self): """Trigger the CancelToken and wait for the cleaned_up event to be set.""" - if not self.is_running: - raise RuntimeError("Cannot cancel a service that has not been started") - elif self.cancel_token.triggered: + if self.cancel_token.triggered: self.logger.warning("Tried to cancel %s, but it was already cancelled", self) + elif not self.is_running: + raise RuntimeError("Cannot cancel a service that has not been started") self.logger.debug("Cancelling %s", self) self.cancel_token.trigger() diff --git a/setup.py b/setup.py index 27168a2c66..75a954fc11 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,8 @@ 'evm-extra': [ "coincurve>=7.0.0,<8.0.0", "plyvel==1.0.4", - "eth-hash[pycryptodome]", + "eth-hash[pysha3];implementation_name=='cpython'", + "eth-hash[pycryptodome];implementation_name=='pypy'", ], 'p2p': [ "aiohttp>=2.3.1,<3.0.0", @@ -74,6 +75,7 @@ ], } + deps['dev'] = ( deps['dev'] + deps['evm'] + @@ -96,7 +98,7 @@ version='0.2.0-alpha.26', description='Python implementation of the Ethereum Virtual Machine', long_description_markdown_filename='README.md', - author='Piper Merriam', + author='Ethereum Foundation', author_email='piper@pipermerriam.com', url='https://github.com/ethereum/py-evm', include_package_data=True, diff --git a/tests/core/merkle-utils/test_merkle_trees.py b/tests/core/merkle-utils/test_merkle_trees.py new file mode 100644 index 0000000000..fc40856945 --- /dev/null +++ b/tests/core/merkle-utils/test_merkle_trees.py @@ -0,0 +1,144 @@ +import pytest + +from eth_hash.auto import ( + keccak, +) + +from evm.utils.merkle import ( + calc_merkle_root, + calc_merkle_tree, + get_root, + get_merkle_proof, + verify_merkle_proof, +) + +from evm.exceptions import ( + ValidationError, +) + + +@pytest.mark.parametrize("leaves,tree", [ + ( + (b"single leaf",), + ( + (keccak(b"single leaf"),), + ), + ), + ( + (b"left", b"right"), + ( + (keccak(keccak(b"left") + keccak(b"right")),), + (keccak(b"left"), keccak(b"right")), + ), + ), + ( + (b"1", b"2", b"3", b"4"), + ( + ( + keccak( + keccak( + keccak(b"1") + keccak(b"2") + ) + keccak( + keccak(b"3") + keccak(b"4") + ) + ), + ), + ( + keccak( + keccak(b"1") + keccak(b"2") + ), + keccak( + keccak(b"3") + keccak(b"4") + ), + ), + ( + keccak(b"1"), + keccak(b"2"), + keccak(b"3"), + keccak(b"4"), + ), + ), + ), +]) +def test_merkle_tree_calculation(leaves, tree): + calculated_tree = calc_merkle_tree(leaves) + assert calculated_tree == tree + assert get_root(tree) == tree[0][0] + assert calc_merkle_root(leaves) == get_root(tree) + + +@pytest.mark.parametrize("leave_number", [0, 3, 5, 6, 7, 9]) +def test_invalid_merkle_root_calculation(leave_number): + with pytest.raises(ValidationError): + calc_merkle_root((b"",) * leave_number) + + +@pytest.mark.parametrize("leaves,index,proof", [ + ( + (b"1", b"2"), + 0, + (keccak(b"2"),), + ), + ( + (b"1", b"2"), + 1, + (keccak(b"1"),), + ), + ( + (b"1", b"2", b"3", b"4"), + 0, + (keccak(b"2"), keccak(keccak(b"3") + keccak(b"4"))), + ), + ( + (b"1", b"2", b"3", b"4"), + 1, + (keccak(b"1"), keccak(keccak(b"3") + keccak(b"4"))), + ), + ( + (b"1", b"2", b"3", b"4"), + 2, + (keccak(b"4"), keccak(keccak(b"1") + keccak(b"2"))), + ), + ( + (b"1", b"2", b"3", b"4"), + 3, + (keccak(b"3"), keccak(keccak(b"1") + keccak(b"2"))), + ), +]) +def test_merkle_proofs(leaves, index, proof): + tree = calc_merkle_tree(leaves) + root = get_root(tree) + item = leaves[index] + calculated_proof = get_merkle_proof(tree, index) + assert calculated_proof == proof + assert verify_merkle_proof(root, item, index, calculated_proof) + + assert not verify_merkle_proof(b"\x00" * 32, item, index, proof) + assert not verify_merkle_proof(root, b"\x00" * 32, index, proof) + assert not verify_merkle_proof(root, item, (index + 1) % len(leaves), proof) + for replaced_index in range(len(proof)): + altered_proof = proof[:replaced_index] + (b"\x00" * 32,) + proof[replaced_index + 1:] + assert not verify_merkle_proof(root, item, index, altered_proof) + + +def test_single_element_merkle_proof(): + leaves = (b"1",) + tree = calc_merkle_tree(leaves) + root = get_root(tree) + assert get_merkle_proof(tree, 0) == () + assert verify_merkle_proof(root, b"1", 0, ()) + assert not verify_merkle_proof(b"\x00" * 32, b"1", 0, ()) + assert not verify_merkle_proof(root, b"2", 0, ()) + assert not verify_merkle_proof(root, b"1", 0, (b"\x00" * 32,)) + + +@pytest.mark.parametrize("leaves", [ + (b"1",), + (b"1", b"2"), + (b"1", b"2", b"3", b"4"), +]) +def test_proof_generation_index_validation(leaves): + tree = calc_merkle_tree(leaves) + for invalid_index in [-1, len(leaves)]: + with pytest.raises(ValidationError): + get_merkle_proof(tree, invalid_index) diff --git a/tests/core/test_blob_utils.py b/tests/core/sharding/test_blob_utils.py similarity index 88% rename from tests/core/test_blob_utils.py rename to tests/core/sharding/test_blob_utils.py index c56a7db2a5..6a286129f7 100644 --- a/tests/core/test_blob_utils.py +++ b/tests/core/sharding/test_blob_utils.py @@ -4,8 +4,6 @@ zip_longest, ) -from eth_hash.auto import keccak - from evm.utils.blobs import ( calc_chunk_root, calc_merkle_root, @@ -167,30 +165,6 @@ def test_chunk_iteration(): next(iterate_chunks(body)) -@pytest.mark.parametrize("leaves,root", [ - ([b"single leaf"], keccak(b"single leaf")), - ([b"left", b"right"], keccak(keccak(b"left") + keccak(b"right"))), - ( - [b"1", b"2", b"3", b"4"], - keccak( - keccak( - keccak(b"1") + keccak(b"2") - ) + keccak( - keccak(b"3") + keccak(b"4") - ) - ) - ) -]) -def test_merkle_root_calculation(leaves, root): - assert calc_merkle_root(leaves) == root - - -@pytest.mark.parametrize("leave_number", [0, 3, 5, 6, 7, 9]) -def test_invalid_merkle_root_calculation(leave_number): - with pytest.raises(ValidationError): - calc_merkle_root([b""] * leave_number) - - def test_chunk_root_calculation(): with pytest.raises(ValidationError): calc_chunk_root(b"\x00" * (COLLATION_SIZE - 1)) diff --git a/tests/trinity/json-fixtures-over-rpc/test_rpc_fixtures.py b/tests/trinity/json-fixtures-over-rpc/test_rpc_fixtures.py index 8938a1d84d..e6e1df43f3 100644 --- a/tests/trinity/json-fixtures-over-rpc/test_rpc_fixtures.py +++ b/tests/trinity/json-fixtures-over-rpc/test_rpc_fixtures.py @@ -4,10 +4,12 @@ from cytoolz import ( dissoc, + identity, get_in, ) from eth_utils import ( + add_0x_prefix, is_hex, is_integer, is_string, @@ -22,11 +24,11 @@ from trinity.rpc import RPCServer from trinity.rpc.format import ( - fixture_block_in_rpc_format, - fixture_state_in_rpc_format, - fixture_transaction_in_rpc_format, + empty_to_0x, + remove_leading_zeros, ) + ROOT_PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) @@ -86,6 +88,68 @@ 'DelegateCallSpam_EIP150', ) +RPC_STATE_NORMALIZERS = { + 'balance': remove_leading_zeros, + 'code': empty_to_0x, + 'nonce': remove_leading_zeros, +} + +RPC_BLOCK_REMAPPERS = { + 'bloom': 'logsBloom', + 'coinbase': 'miner', + 'transactionsTrie': 'transactionsRoot', + 'uncleHash': 'sha3Uncles', + 'receiptTrie': 'receiptsRoot', +} + +RPC_BLOCK_NORMALIZERS = { + 'difficulty': remove_leading_zeros, + 'extraData': empty_to_0x, + 'gasLimit': remove_leading_zeros, + 'gasUsed': remove_leading_zeros, + 'number': remove_leading_zeros, + 'timestamp': remove_leading_zeros, +} + +RPC_TRANSACTION_REMAPPERS = { + 'data': 'input', +} + +RPC_TRANSACTION_NORMALIZERS = { + 'nonce': remove_leading_zeros, + 'gasLimit': remove_leading_zeros, + 'gasPrice': remove_leading_zeros, + 'value': remove_leading_zeros, + 'data': empty_to_0x, + 'to': add_0x_prefix, + 'r': remove_leading_zeros, + 's': remove_leading_zeros, + 'v': remove_leading_zeros, +} + + +def fixture_block_in_rpc_format(state): + return { + RPC_BLOCK_REMAPPERS.get(key, key): + RPC_BLOCK_NORMALIZERS.get(key, identity)(value) + for key, value in state.items() + } + + +def fixture_state_in_rpc_format(state): + return { + key: RPC_STATE_NORMALIZERS.get(key, identity)(value) + for key, value in state.items() + } + + +def fixture_transaction_in_rpc_format(state): + return { + RPC_TRANSACTION_REMAPPERS.get(key, key): + RPC_TRANSACTION_NORMALIZERS.get(key, identity)(value) + for key, value in state.items() + } + def blockchain_fixture_mark_fn(fixture_path, fixture_name): for slow_test in SLOW_TESTS: diff --git a/trinity/rpc/format.py b/trinity/rpc/format.py index 4dc5c209c0..05146d59cf 100644 --- a/trinity/rpc/format.py +++ b/trinity/rpc/format.py @@ -2,11 +2,9 @@ from cytoolz import ( compose, - identity, ) from eth_utils import ( - add_0x_prefix, encode_hex, int_to_big_endian, ) @@ -99,67 +97,3 @@ def empty_to_0x(val): remove_leading_zeros = compose(hex, functools.partial(int, base=16)) - -RPC_STATE_NORMALIZERS = { - 'balance': remove_leading_zeros, - 'code': empty_to_0x, - 'nonce': remove_leading_zeros, -} - - -def fixture_state_in_rpc_format(state): - return { - key: RPC_STATE_NORMALIZERS.get(key, identity)(value) - for key, value in state.items() - } - - -RPC_BLOCK_REMAPPERS = { - 'bloom': 'logsBloom', - 'coinbase': 'miner', - 'transactionsTrie': 'transactionsRoot', - 'uncleHash': 'sha3Uncles', - 'receiptTrie': 'receiptsRoot', -} - -RPC_BLOCK_NORMALIZERS = { - 'difficulty': remove_leading_zeros, - 'extraData': empty_to_0x, - 'gasLimit': remove_leading_zeros, - 'gasUsed': remove_leading_zeros, - 'number': remove_leading_zeros, - 'timestamp': remove_leading_zeros, -} - - -def fixture_block_in_rpc_format(state): - return { - RPC_BLOCK_REMAPPERS.get(key, key): - RPC_BLOCK_NORMALIZERS.get(key, identity)(value) - for key, value in state.items() - } - - -RPC_TRANSACTION_REMAPPERS = { - 'data': 'input', -} - -RPC_TRANSACTION_NORMALIZERS = { - 'nonce': remove_leading_zeros, - 'gasLimit': remove_leading_zeros, - 'gasPrice': remove_leading_zeros, - 'value': remove_leading_zeros, - 'data': empty_to_0x, - 'to': add_0x_prefix, - 'r': remove_leading_zeros, - 's': remove_leading_zeros, - 'v': remove_leading_zeros, -} - - -def fixture_transaction_in_rpc_format(state): - return { - RPC_TRANSACTION_REMAPPERS.get(key, key): - RPC_TRANSACTION_NORMALIZERS.get(key, identity)(value) - for key, value in state.items() - } diff --git a/trinity/utils/ipc.py b/trinity/utils/ipc.py index 397d14f80a..6a842ba8ca 100644 --- a/trinity/utils/ipc.py +++ b/trinity/utils/ipc.py @@ -1,14 +1,15 @@ import os +import pathlib import signal import time from multiprocessing import Process from logging import Logger -def wait_for_ipc(ipc_path: str, timeout: int=1) -> None: +def wait_for_ipc(ipc_path: pathlib.Path, timeout: int=1) -> None: start_at = time.time() while time.time() - start_at < timeout: - if os.path.exists(ipc_path): + if ipc_path.exists(): break time.sleep(0.05)