Skip to content

Commit

Permalink
Revert "Some work on speculative execution (#1552)"
Browse files Browse the repository at this point in the history
This reverts commit ddbdf34.
  • Loading branch information
AdamSpitz committed Apr 24, 2023
1 parent ddbdf34 commit 8eb5b7d
Show file tree
Hide file tree
Showing 32 changed files with 374 additions and 942 deletions.
4 changes: 0 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,6 @@ ifneq ($(ENABLE_EVMC), 0)
T8N_PARAMS := -d:chronicles_enabled=off
endif

ifneq ($(ENABLE_SPECULATIVE_EXECUTION), 0)
NIM_PARAMS += -d:evm_speculative_execution
endif

# disabled by default, enable with ENABLE_VMLOWMEM=1
ifneq ($(if $(ENABLE_VMLOWMEM),$(ENABLE_VMLOWMEM),0),0)
NIM_PARAMS += -d:lowmem:1
Expand Down
2 changes: 1 addition & 1 deletion nimbus/core/executor/process_block.nim
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ proc procBlkPreamble(vmState: BaseVMState;

proc procBlkEpilogue(vmState: BaseVMState;
header: BlockHeader; body: BlockBody): bool
{.gcsafe, raises: [RlpError, CatchableError].} =
{.gcsafe, raises: [RlpError].} =
# Reward beneficiary
vmState.mutateStateDB:
if vmState.generateWitness:
Expand Down
27 changes: 3 additions & 24 deletions nimbus/core/executor/process_transaction.nim
Original file line number Diff line number Diff line change
Expand Up @@ -59,29 +59,6 @@ proc commitOrRollbackDependingOnGasUsed(vmState: BaseVMState, accTx: SavePoint,
vmState.gasPool += tx.gasLimit - gasBurned
return ok(gasBurned)

# For stateless mode with on-demand fetching, we need to do
# this, because it's possible for deletion to result in a
# branch node with only one child, which then needs to be
# transformed into an extension node or leaf node (or
# grafted onto one), but we don't actually have that node
# yet so we have to fetch it and then retry.
proc repeatedlyTryToPersist(vmState: BaseVMState, fork: EVMFork): Future[void] {.async.} =
#info("repeatedlyTryToPersist about to get started")
var i = 0
while i < 100:
#info("repeatedlyTryToPersist making an attempt to persist", i)
try:
await vmState.stateDB.asyncPersist(
clearEmptyAccount = fork >= FkSpurious,
clearCache = false)
return
except MissingNodesError as e:
#warn("repeatedlyTryToPersist found missing paths", missingPaths=e.paths, missingNodeHashes=e.nodeHashes)
await fetchAndPopulateNodes(vmState, e.paths, e.nodeHashes)
i += 1
error("repeatedlyTryToPersist failed after 100 tries")
raise newException(CatchableError, "repeatedlyTryToPersist failed after 100 tries")

proc asyncProcessTransactionImpl(
vmState: BaseVMState; ## Parent accounts environment for transaction
tx: Transaction; ## Transaction to validate
Expand Down Expand Up @@ -135,7 +112,9 @@ proc asyncProcessTransactionImpl(

if vmState.generateWitness:
vmState.stateDB.collectWitnessData()
await repeatedlyTryToPersist(vmState, fork)
vmState.stateDB.persist(
clearEmptyAccount = fork >= FkSpurious,
clearCache = false)

return res

Expand Down
2 changes: 1 addition & 1 deletion nimbus/core/tx_pool/tx_tasks/tx_packer.nim
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ template safeExecutor(info: string; code: untyped) =
raise newException(TxPackerError, info & "(): " & $e.name & " -- " & e.msg)

proc persist(pst: TxPackerStateRef)
{.gcsafe,raises: [RlpError, CatchableError].} =
{.gcsafe,raises: [RlpError].} =
## Smart wrapper
if not pst.cleanState:
let fork = pst.xp.chain.nextFork
Expand Down
120 changes: 20 additions & 100 deletions nimbus/db/accounts_cache.nim
Original file line number Diff line number Diff line change
@@ -1,11 +1,8 @@
import
std/[tables, hashes, sets],
chronos,
eth/[common, rlp], eth/trie/[hexary, db, trie_defs, nibbles],
../utils/functors/possible_futures,
eth/[common, rlp], eth/trie/[hexary, db, trie_defs],
../constants, ../utils/utils, storage_types,
../../stateless/multi_keys,
../evm/async/speculex,
./distinct_tries,
./access_list as ac_access_list

Expand All @@ -24,14 +21,12 @@ type

AccountFlags = set[AccountFlag]

StorageCell* = SpeculativeExecutionCell[UInt256]

RefAccount = ref object
account: Account
flags: AccountFlags
code: seq[byte]
originalStorage: TableRef[UInt256, UInt256]
overlayStorage: Table[UInt256, StorageCell]
overlayStorage: Table[UInt256, UInt256]

WitnessData* = object
storageKeys*: HashSet[UInt256]
Expand Down Expand Up @@ -266,15 +261,11 @@ proc originalStorageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef):

acc.originalStorage[slot] = result

proc storageCell(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): StorageCell =
acc.overlayStorage.withValue(slot, cell) do:
return cell[]
do:
return pureCell(acc.originalStorageValue(slot, db))

# FIXME-removeSynchronousInterface: we use this down below, but I don't think that's okay anymore.
proc storageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): UInt256 =
waitForValueOf(storageCell(acc, slot, db))
acc.overlayStorage.withValue(slot, val) do:
return val[]
do:
result = acc.originalStorageValue(slot, db)

proc kill(acc: RefAccount) =
acc.flags.excl Alive
Expand Down Expand Up @@ -305,7 +296,7 @@ proc persistCode(acc: RefAccount, db: TrieDatabaseRef) =
else:
db.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)

proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool): Future[void] {.async.} =
proc persistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool) =
if acc.overlayStorage.len == 0:
# TODO: remove the storage too if we figure out
# how to create 'virtual' storage room for each account
Expand All @@ -316,10 +307,9 @@ proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool)

var storageTrie = getStorageTrie(db, acc)

for slot, valueCell in acc.overlayStorage:
for slot, value in acc.overlayStorage:
let slotAsKey = createTrieKeyFromSlot slot

let value = await valueCell.toFuture
if value > 0:
let encodedValue = rlp.encode(value)
storageTrie.putSlotBytes(slotAsKey, encodedValue)
Expand All @@ -337,8 +327,7 @@ proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool)
if not clearCache:
# if we preserve cache, move the overlayStorage
# to originalStorage, related to EIP2200, EIP1283
for slot, valueCell in acc.overlayStorage:
let value = unsafeGetAlreadyAvailableValue(valueCell)
for slot, value in acc.overlayStorage:
if value > 0:
acc.originalStorage[slot] = value
else:
Expand Down Expand Up @@ -401,15 +390,11 @@ proc getCommittedStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256)
return
acc.originalStorageValue(slot, ac.db)

proc getStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256): StorageCell =
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 {.inline.} =
let acc = ac.getAccount(address, false)
if acc.isNil:
return pureCell(UInt256.zero)
return acc.storageCell(slot, ac.db)

# FIXME-removeSynchronousInterface
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 =
waitForValueOf(getStorageCell(ac, address, slot))
return
acc.storageValue(slot, ac.db)

proc hasCodeOrNonce*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
let acc = ac.getAccount(address, false)
Expand Down Expand Up @@ -475,21 +460,15 @@ proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) =
acc.code = code
acc.flags.incl CodeChanged

proc setStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256, cell: StorageCell) =
proc setStorage*(ac: AccountsCache, address: EthAddress, slot, value: UInt256) =
let acc = ac.getAccount(address)
acc.flags.incl {Alive}
# FIXME-removeSynchronousInterface: ugh, this seems like a problem (that we need the values to be able to check whether they're equal)
let oldValue = acc.storageValue(slot, ac.db)
let value = waitForValueOf(cell)
if oldValue != value:
var acc = ac.makeDirty(address)
acc.overlayStorage[slot] = cell
acc.overlayStorage[slot] = value
acc.flags.incl StorageChanged

# FIXME-removeSynchronousInterface
proc setStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256, value: UInt256) =
setStorageCell(ac, address, slot, pureCell(value))

proc clearStorage*(ac: AccountsCache, address: EthAddress) =
let acc = ac.getAccount(address)
acc.flags.incl {Alive}
Expand Down Expand Up @@ -549,52 +528,9 @@ proc clearEmptyAccounts(ac: AccountsCache) =
ac.deleteEmptyAccount(ripemdAddr)
ac.ripemdSpecial = false


type MissingNodesError* = ref object of Defect
paths*: seq[seq[seq[byte]]]
nodeHashes*: seq[Hash256]

# FIXME-Adam: Move this elsewhere.
# Also, I imagine there's a more efficient way to do this.
proc padRight[V](s: seq[V], n: int, v: V): seq[V] =
for sv in s:
result.add(sv)
while result.len < n:
result.add(v)

proc padRightWithZeroes(s: NibblesSeq, n: int): NibblesSeq =
initNibbleRange(padRight(s.getBytes, (n + 1) div 2, byte(0)))

# FIXME-Adam: Why can I never find the conversion function I need?
func toHash*(value: seq[byte]): Hash256 =
doAssert(value.len == 32)
var byteArray: array[32, byte]
for i, b in value:
byteArray[i] = b
result.data = byteArray

func encodePath(path: NibblesSeq): seq[byte] =
if path.len == 64:
path.getBytes
else:
hexPrefixEncode(path)

proc createMissingNodesErrorForAccount(missingAccountPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
MissingNodesError(
paths: @[@[encodePath(padRightWithZeroes(missingAccountPath, 64))]],
nodeHashes: @[nodeHash]
)

proc createMissingNodesErrorForSlot(address: EthAddress, missingSlotPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
MissingNodesError(
paths: @[@[@(address.keccakHash.data), encodePath(padRightWithZeroes(missingSlotPath, 64))]],
nodeHashes: @[nodeHash]
)


proc asyncPersist*(ac: AccountsCache,
clearEmptyAccount: bool = false,
clearCache: bool = true): Future[void] {.async.} =
proc persist*(ac: AccountsCache,
clearEmptyAccount: bool = false,
clearCache: bool = true) =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
var cleanAccounts = initHashSet[EthAddress]()
Expand All @@ -613,19 +549,10 @@ proc asyncPersist*(ac: AccountsCache,
if StorageChanged in acc.flags:
# storageRoot must be updated first
# before persisting account into merkle trie
#
# Also, see the comment on repeatedlyTryToPersist in
# process_transaction.nim.
try:
await acc.asyncPersistStorage(ac.db, clearCache)
except MissingNodeError as e:
raise createMissingNodesErrorForSlot(address, e.path, toHash(e.nodeHashBytes))
acc.persistStorage(ac.db, clearCache)
ac.trie.putAccountBytes address, rlp.encode(acc.account)
of Remove:
try:
ac.trie.delAccountBytes address
except MissingNodeError as e:
raise createMissingNodesErrorForAccount(e.path, toHash(e.nodeHashBytes))
ac.trie.delAccountBytes address
if not clearCache:
cleanAccounts.incl address
of DoNothing:
Expand All @@ -649,12 +576,6 @@ proc asyncPersist*(ac: AccountsCache,

ac.isDirty = false

# FIXME-removeSynchronousInterface
proc persist*(ac: AccountsCache,
clearEmptyAccount: bool = false,
clearCache: bool = true) =
waitFor(asyncPersist(ac, clearEmptyAccount, clearCache))

iterator addresses*(ac: AccountsCache): EthAddress =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
Expand Down Expand Up @@ -709,8 +630,7 @@ func update(wd: var WitnessData, acc: RefAccount) =
if v.isZero: continue
wd.storageKeys.incl k

for k, cell in acc.overlayStorage:
let v = unsafeGetAlreadyAvailableValue(cell) # FIXME-Adam: should be resolved by now, I think? wait, maybe not?
for k, v in acc.overlayStorage:
if v.isZero and k notin wd.storageKeys:
continue
if v.isZero and k in wd.storageKeys:
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/incomplete_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ proc ifNodesExistGetStorageBytesWithinAccount*(storageTrie: StorageTrie, slotAsK


proc populateDbWithNodes*(db: TrieDatabaseRef, nodes: seq[seq[byte]]) =
error("AARDVARK: populateDbWithNodes received nodes, about to populate", nodes) # AARDVARK not an error, I just want it to stand out
error("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG AARDVARK: populateDbWithNodes received nodes, about to populate", nodes) # AARDVARK not an error, I just want it to stand out
for nodeBytes in nodes:
let nodeHash = keccakHash(nodeBytes)
info("AARDVARK: populateDbWithNodes about to add node", nodeHash, nodeBytes)
db.put(nodeHash.data, nodeBytes)

# FIXME-Adam: just make the callers call populateDbWithNodes directly?
# AARDVARK: just make the callers call populateDbWithNodes directly?
proc populateDbWithBranch*(db: TrieDatabaseRef, branch: seq[seq[byte]]) =
for nodeBytes in branch:
let nodeHash = keccakHash(nodeBytes)
Expand Down
4 changes: 2 additions & 2 deletions nimbus/evm/async/data_sources.nim
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import
stint,
eth/common,
eth/trie/db,
../../sync/protocol,
../../db/db_chain

type
Expand All @@ -13,7 +12,8 @@ type
ifNecessaryGetCode*: proc(db: TrieDatabaseRef, blockNumber: BlockNumber, stateRoot: Hash256, address: EthAddress, newStateRootForSanityChecking: Hash256): Future[void] {.gcsafe.}
ifNecessaryGetAccount*: proc(db: TrieDatabaseRef, blockNumber: BlockNumber, stateRoot: Hash256, address: EthAddress, newStateRootForSanityChecking: Hash256): Future[void] {.gcsafe.}
ifNecessaryGetBlockHeaderByNumber*: proc(chainDB: ChainDBRef, blockNumber: BlockNumber): Future[void] {.gcsafe.}
fetchNodes*: proc(stateRoot: Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.gcsafe.}
# FIXME-Adam: Later.
#fetchNodes*: proc(stateRoot: Hash256, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.gcsafe.}
fetchBlockHeaderWithHash*: proc(h: Hash256): Future[BlockHeader] {.gcsafe.}
fetchBlockHeaderWithNumber*: proc(n: BlockNumber): Future[BlockHeader] {.gcsafe.}
fetchBlockHeaderAndBodyWithHash*: proc(h: Hash256): Future[(BlockHeader, BlockBody)] {.gcsafe.}
Expand Down
7 changes: 4 additions & 3 deletions nimbus/evm/async/data_sources/json_rpc_data_source.nim
Original file line number Diff line number Diff line change
Expand Up @@ -501,9 +501,10 @@ proc realAsyncDataSource*(peerPool: PeerPool, client: RpcClient, justChecking: b
await ifNecessaryGetBlockHeaderByNumber(client, chainDB, blockNumber, justChecking)
),

fetchNodes: (proc(stateRoot: Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} =
return await fetchNodes(peerPool, stateRoot, paths, nodeHashes)
),
# FIXME-Adam: This will be needed later, but for now let's just get the basic methods in place.
#fetchNodes: (proc(stateRoot: Hash256, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} =
# return await fetchNodes(peerPool, stateRoot, paths, nodeHashes)
#),

fetchBlockHeaderWithHash: (proc(h: Hash256): Future[BlockHeader] {.async.} =
return await fetchBlockHeaderWithHash(client, h)
Expand Down
16 changes: 5 additions & 11 deletions nimbus/evm/async/operations.nim
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
import
chronicles,
chronos,
sequtils,
stint,
eth/common/eth_types,
../../common,
../../db/distinct_tries,
../../db/accounts_cache,
../../db/incomplete_db,
../../sync/protocol,
#../../db/incomplete_db,
../types,
./data_sources

Expand All @@ -29,19 +27,15 @@ proc ifNecessaryGetSlot*(vmState: BaseVMState, address: EthAddress, slot: UInt25
proc ifNecessaryGetBlockHeaderByNumber*(vmState: BaseVMState, blockNumber: BlockNumber): Future[void] {.async.} =
await vmState.asyncFactory.ifNecessaryGetBlockHeaderByNumber(vmState.com.db, blockNumber)

proc snapTriePathFromByteSeqs(byteSeqs: seq[seq[byte]]): SnapTriePaths =
SnapTriePaths(
accPath: byteSeqs[0],
slotPaths: byteSeqs[1 ..< byteSeqs.len]
)

proc fetchAndPopulateNodes*(vmState: BaseVMState, pathByteSeqs: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[void] {.async.} =
#[
FIXME-Adam: This is for later.
proc fetchAndPopulateNodes*(vmState: BaseVMState, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[void] {.async.} =
if vmState.asyncFactory.maybeDataSource.isSome:
# let stateRoot = vmState.stateDB.rawTrie.rootHash # FIXME-Adam: this might not be right, huh? the peer might expect the parent block's final stateRoot, not this weirdo intermediate one
let stateRoot = vmState.parent.stateRoot
let paths = pathByteSeqs.map(snapTriePathFromByteSeqs)
let nodes = await vmState.asyncFactory.maybeDataSource.get.fetchNodes(stateRoot, paths, nodeHashes)
populateDbWithNodes(vmState.stateDB.rawDb, nodes)
]#


# Sometimes it's convenient to be able to do multiple at once.
Expand Down

0 comments on commit 8eb5b7d

Please sign in to comment.