Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Revert "Use prefixed keys for trie node. (#2130)"
Browse files Browse the repository at this point in the history
This reverts commit fd15825.
  • Loading branch information
andresilva committed Apr 2, 2019
1 parent 12a33e6 commit c2966fe
Show file tree
Hide file tree
Showing 27 changed files with 435 additions and 464 deletions.
226 changes: 113 additions & 113 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion core/client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ state-machine = { package = "substrate-state-machine", path = "../state-machine"
keyring = { package = "substrate-keyring", path = "../keyring", optional = true }
trie = { package = "substrate-trie", path = "../trie", optional = true }
substrate-telemetry = { path = "../telemetry", optional = true }
hash-db = { version = "0.12", optional = true }
hash-db = { version = "0.11", optional = true }
kvdb = { git = "https://github.com/paritytech/parity-common", optional = true, rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d" }
parity-codec = { version = "3.3", default-features = false, features = ["derive"] }
primitives = { package = "substrate-primitives", path = "../primitives", default-features = false }
Expand Down
2 changes: 1 addition & 1 deletion core/client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ kvdb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c
kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d" }
kvdb-memorydb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d", optional = true }
lru-cache = "0.1.1"
hash-db = { version = "0.12" }
hash-db = { version = "0.11" }
primitives = { package = "substrate-primitives", path = "../../primitives" }
runtime_primitives = { package = "sr-primitives", path = "../../sr-primitives" }
client = { package = "substrate-client", path = "../../client" }
Expand Down
85 changes: 42 additions & 43 deletions core/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use client::ExecutionStrategies;
use parity_codec::{Decode, Encode};
use hash_db::Hasher;
use kvdb::{KeyValueDB, DBTransaction};
use trie::{MemoryDB, PrefixedMemoryDB, prefixed_key};
use trie::MemoryDB;
use parking_lot::RwLock;
use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash};
use primitives::storage::well_known_keys;
Expand Down Expand Up @@ -267,7 +267,7 @@ impl<Block: BlockT> client::blockchain::ProvideCache<Block> for BlockchainDb<Blo
/// Database transaction
pub struct BlockImportOperation<Block: BlockT, H: Hasher> {
old_state: CachingState<Blake2Hasher, DbState, Block>,
db_updates: PrefixedMemoryDB<H>,
db_updates: MemoryDB<H>,
storage_updates: Vec<(Vec<u8>, Option<Vec<u8>>)>,
changes_trie_updates: MemoryDB<H>,
pending_block: Option<PendingBlock<Block>>,
Expand Down Expand Up @@ -318,7 +318,7 @@ where Block: BlockT<Hash=H256>,
// Currently cache isn't implemented on full nodes.
}

fn update_db_storage(&mut self, update: PrefixedMemoryDB<Blake2Hasher>) -> Result<(), client::error::Error> {
fn update_db_storage(&mut self, update: MemoryDB<Blake2Hasher>) -> Result<(), client::error::Error> {
self.db_updates = update;
Ok(())
}
Expand All @@ -329,7 +329,7 @@ where Block: BlockT<Hash=H256>,
return Err(client::error::ErrorKind::GenesisInvalid.into());
}

let mut transaction: PrefixedMemoryDB<Blake2Hasher> = Default::default();
let mut transaction: MemoryDB<Blake2Hasher> = Default::default();

for (child_key, child_map) in children {
if !well_known_keys::is_child_storage_key(&child_key) {
Expand Down Expand Up @@ -382,23 +382,22 @@ where Block: BlockT<Hash=H256>,

struct StorageDb<Block: BlockT> {
pub db: Arc<KeyValueDB>,
pub state_db: StateDb<Block::Hash, Vec<u8>>,
pub state_db: StateDb<Block::Hash, H256>,
}

impl<Block: BlockT> state_machine::Storage<Blake2Hasher> for StorageDb<Block> {
fn get(&self, key: &H256, prefix: &[u8]) -> Result<Option<DBValue>, String> {
let key = prefixed_key::<Blake2Hasher>(key, prefix);
self.state_db.get(&key, self).map(|r| r.map(|v| DBValue::from_slice(&v)))
fn get(&self, key: &H256) -> Result<Option<DBValue>, String> {
self.state_db.get(key, self).map(|r| r.map(|v| DBValue::from_slice(&v)))
.map_err(|e| format!("Database backend error: {:?}", e))
}
}

impl<Block: BlockT> state_db::NodeDb for StorageDb<Block> {
impl<Block: BlockT> state_db::HashDb for StorageDb<Block> {
type Error = io::Error;
type Key = [u8];
type Hash = H256;

fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec()))
fn get(&self, key: &H256) -> Result<Option<Vec<u8>>, Self::Error> {
self.db.get(columns::STATE, key.as_bytes()).map(|r| r.map(|v| v.to_vec()))
}
}

Expand All @@ -414,7 +413,7 @@ impl DbGenesisStorage {
}

impl state_machine::Storage<Blake2Hasher> for DbGenesisStorage {
fn get(&self, _key: &H256, _prefix: &[u8]) -> Result<Option<DBValue>, String> {
fn get(&self, _key: &H256) -> Result<Option<DBValue>, String> {
Ok(None)
}
}
Expand Down Expand Up @@ -524,7 +523,7 @@ impl<Block: BlockT> state_machine::ChangesTrieRootsStorage<Blake2Hasher> for DbC
}

impl<Block: BlockT> state_machine::ChangesTrieStorage<Blake2Hasher> for DbChangesTrieStorage<Block> {
fn get(&self, key: &H256, _prefix: &[u8]) -> Result<Option<DBValue>, String> {
fn get(&self, key: &H256) -> Result<Option<DBValue>, String> {
self.db.get(columns::CHANGES_TRIE, &key[..])
.map_err(|err| format!("{}", err))
}
Expand Down Expand Up @@ -568,7 +567,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
let blockchain = BlockchainDb::new(db.clone())?;
let meta = blockchain.meta.clone();
let map_e = |e: state_db::Error<io::Error>| ::client::error::Error::from(format!("State database error: {:?}", e));
let state_db: StateDb<_, _> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?;
let state_db: StateDb<Block::Hash, H256> = StateDb::new(pruning, &StateMetaDb(&*db)).map_err(map_e)?;
let storage_db = StorageDb {
db: db.clone(),
state_db,
Expand Down Expand Up @@ -841,7 +840,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
}

let mut changeset: state_db::ChangeSet<Vec<u8>> = state_db::ChangeSet::default();
let mut changeset: state_db::ChangeSet<H256> = state_db::ChangeSet::default();
for (key, (val, rc)) in operation.db_updates.drain() {
if rc > 0 {
changeset.inserted.push((key, val.to_vec()));
Expand Down Expand Up @@ -990,7 +989,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
}
}

fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet<Vec<u8>>) {
fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet<H256>) {
for (key, val) in commit.data.inserted.into_iter() {
transaction.put(columns::STATE, &key[..], &val);
}
Expand Down Expand Up @@ -1040,7 +1039,7 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
Ok(BlockImportOperation {
pending_block: None,
old_state,
db_updates: PrefixedMemoryDB::default(),
db_updates: MemoryDB::default(),
storage_updates: Default::default(),
changes_trie_updates: MemoryDB::default(),
aux_ops: Vec::new(),
Expand Down Expand Up @@ -1423,7 +1422,7 @@ mod tests {

op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap();

key = op.db_updates.insert(&[], b"hello");
key = op.db_updates.insert(b"hello");
op.set_block_data(
header,
Some(vec![]),
Expand Down Expand Up @@ -1457,8 +1456,8 @@ mod tests {
).0.into();
let hash = header.hash();

op.db_updates.insert(&[], b"hello");
op.db_updates.remove(&key, &[]);
op.db_updates.insert(b"hello");
op.db_updates.remove(&key);
op.set_block_data(
header,
Some(vec![]),
Expand Down Expand Up @@ -1492,7 +1491,7 @@ mod tests {
).0.into();
let hash = header.hash();

op.db_updates.remove(&key, &[]);
op.db_updates.remove(&key);
op.set_block_data(
header,
Some(vec![]),
Expand Down Expand Up @@ -1558,7 +1557,7 @@ mod tests {
assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root)));

for (key, (val, _)) in changes_trie_update.drain() {
assert_eq!(backend.changes_trie_storage().unwrap().get(&key, &[]), Ok(Some(val)));
assert_eq!(backend.changes_trie_storage().unwrap().get(&key), Ok(Some(val)));
}
};

Expand Down Expand Up @@ -1684,34 +1683,34 @@ mod tests {
let mut tx = DBTransaction::new();
backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, Default::default(), 12);
backend.storage.db.write(tx).unwrap();
assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root4, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root1).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root2).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root3).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root4).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root5).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root6).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root7).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root8).unwrap().is_some());

// now simulate finalization of block#16, causing prune of tries at #5..#8
let mut tx = DBTransaction::new();
backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, Default::default(), 16);
backend.storage.db.write(tx).unwrap();
assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root5).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root6).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root7).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root8).unwrap().is_none());

// now "change" pruning mode to archive && simulate finalization of block#20
// => no changes tries are pruned, because we never prune in archive mode
backend.changes_tries_storage.min_blocks_to_keep = None;
let mut tx = DBTransaction::new();
backend.changes_tries_storage.prune(Some(config), &mut tx, Default::default(), 20);
backend.storage.db.write(tx).unwrap();
assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root11, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root12, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root9).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root10).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root11).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root12).unwrap().is_some());
}

#[test]
Expand Down Expand Up @@ -1750,15 +1749,15 @@ mod tests {
let mut tx = DBTransaction::new();
backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, block5, 5);
backend.storage.db.write(tx).unwrap();
assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root1).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root2).unwrap().is_some());

// now simulate finalization of block#6, causing prune of tries at #2
let mut tx = DBTransaction::new();
backend.changes_tries_storage.prune(Some(config.clone()), &mut tx, block6, 6);
backend.storage.db.write(tx).unwrap();
assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some());
assert!(backend.changes_tries_storage.get(&root2).unwrap().is_none());
assert!(backend.changes_tries_storage.get(&root3).unwrap().is_some());
}

#[test]
Expand Down
4 changes: 2 additions & 2 deletions core/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -499,8 +499,8 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
}

impl<'a, Block: BlockT> ChangesTrieStorage<Blake2Hasher> for AccessedRootsRecorder<'a, Block> {
fn get(&self, key: &H256, prefix: &[u8]) -> Result<Option<DBValue>, String> {
self.storage.get(key, prefix)
fn get(&self, key: &H256) -> Result<Option<DBValue>, String> {
self.storage.get(key)
}
}

Expand Down
4 changes: 2 additions & 2 deletions core/client/src/in_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -709,8 +709,8 @@ impl<H: Hasher> state_machine::ChangesTrieRootsStorage<H> for ChangesTrieStorage
}

impl<H: Hasher> state_machine::ChangesTrieStorage<H> for ChangesTrieStorage<H> where H::Out: HeapSizeOf {
fn get(&self, key: &H::Out, prefix: &[u8]) -> Result<Option<state_machine::DBValue>, String> {
self.0.get(key, prefix)
fn get(&self, key: &H::Out) -> Result<Option<state_machine::DBValue>, String> {
self.0.get(key)
}
}

Expand Down
2 changes: 1 addition & 1 deletion core/client/src/light/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ impl<E, H, B: BlockT, S: BlockchainStorage<B>, F> LightDataChecker<E, H, B, S, F
// we share the storage for multiple checks, do it here
let mut cht_root = H::Out::default();
cht_root.as_mut().copy_from_slice(local_cht_root.as_ref());
if !storage.contains(&cht_root, &[]) {
if !storage.contains(&cht_root) {
return Err(ClientErrorKind::InvalidCHTProof.into());
}

Expand Down
32 changes: 16 additions & 16 deletions core/executor/wasm/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions core/primitives/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ byteorder = { version = "1.1", default-features = false }
primitive-types = { version = "0.2", default-features = false, features = ["codec"] }
impl-serde = { version = "0.1", optional = true }
wasmi = { version = "0.4.3", optional = true }
hash-db = { version = "0.12", default-features = false }
hash256-std-hasher = { version = "0.12", default-features = false }
hash-db = { version = "0.11", default-features = false }
hash256-std-hasher = { version = "0.11", default-features = false }
ring = { version = "0.14", optional = true }
untrusted = { version = "0.6", optional = true }
hex-literal = { version = "0.1", optional = true }
Expand Down
Loading

0 comments on commit c2966fe

Please sign in to comment.