Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Add childstate_getStorageEntries RPC (#9459)
Browse files Browse the repository at this point in the history
* Add storage query functions for multiple keys

fixes #9203

* Query all keys in one request and add more tests

* Make it compatible with stable release channel

* Update to new futures

* Update client/rpc/src/state/state_full.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_full.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_full.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_full.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_full.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_light.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Update client/rpc/src/state/state_light.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Satisfy borrowck

* Remove non-RPC `storage_entries` functions.

* Revert "Remove non-RPC `storage_entries` functions."

This reverts commit d840015.

* Revert "Revert "Remove non-RPC `storage_entries` functions.""

This reverts commit 5813b43.

* Finally some formatting

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
  • Loading branch information
hirschenberger and bkchr committed Sep 13, 2021
1 parent b13319a commit c022f88
Show file tree
Hide file tree
Showing 5 changed files with 210 additions and 1 deletion.
9 changes: 9 additions & 0 deletions client/rpc-api/src/child_state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,15 @@ pub trait ChildStateApi<Hash> {
hash: Option<Hash>,
) -> FutureResult<Option<StorageData>>;

/// Returns child storage entries for multiple keys at a specific block's state.
#[rpc(name = "childstate_getStorageEntries")]
fn storage_entries(
&self,
child_storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
hash: Option<Hash>,
) -> FutureResult<Vec<Option<StorageData>>>;

/// Returns the hash of a child storage entry at a block's state.
#[rpc(name = "childstate_getStorageHash")]
fn storage_hash(
Expand Down
17 changes: 17 additions & 0 deletions client/rpc/src/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,14 @@ where
key: StorageKey,
) -> FutureResult<Option<StorageData>>;

/// Returns child storage entries at a specific block's state.
fn storage_entries(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>>;

/// Returns the hash of a child storage entry at a block's state.
fn storage_hash(
&self,
Expand Down Expand Up @@ -516,6 +524,15 @@ where
self.backend.storage(block, storage_key, key)
}

fn storage_entries(
&self,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
block: Option<Block::Hash>,
) -> FutureResult<Vec<Option<StorageData>>> {
self.backend.storage_entries(block, storage_key, keys)
}

fn storage_keys(
&self,
storage_key: PrefixedStorageKey,
Expand Down
33 changes: 32 additions & 1 deletion client/rpc/src/state/state_full.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,11 @@

//! State API backend for full nodes.

use futures::{future, stream, FutureExt, SinkExt, StreamExt};
use futures::{
future,
future::{err, try_join_all},
stream, FutureExt, SinkExt, StreamExt,
};
use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId};
use log::warn;
use rpc::Result as RpcResult;
Expand Down Expand Up @@ -715,6 +719,33 @@ where
async move { r }.boxed()
}

fn storage_entries(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
Arc::new(ChildInfo::new_default(storage_key)),
None => return err(client_err(sp_blockchain::Error::InvalidChildStorageKey)).boxed(),
};
let block = match self.block_or_best(block) {
Ok(b) => b,
Err(e) => return err(client_err(e)).boxed(),
};
let client = self.client.clone();
try_join_all(keys.into_iter().map(move |key| {
let res = client
.clone()
.child_storage(&BlockId::Hash(block), &child_info, &key)
.map_err(client_err);

async move { res }
}))
.boxed()
}

fn storage_hash(
&self,
block: Option<Block::Hash>,
Expand Down
44 changes: 44 additions & 0 deletions client/rpc/src/state/state_light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,50 @@ where
child_storage.boxed()
}

fn storage_entries(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let block = self.block_or_best(block);
let fetcher = self.fetcher.clone();
let keys = keys.iter().map(|k| k.0.clone()).collect::<Vec<_>>();
let child_storage =
resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| {
match result {
Ok(header) => Either::Left(
fetcher
.remote_read_child(RemoteReadChildRequest {
block,
header,
storage_key,
keys: keys.clone(),
retry_count: Default::default(),
})
.then(move |result| {
ready(
result
.map(|data| {
data.iter()
.filter_map(|(k, d)| {
keys.contains(k).then(|| {
d.as_ref().map(|v| StorageData(v.to_vec()))
})
})
.collect::<Vec<_>>()
})
.map_err(client_err),
)
}),
),
Err(error) => Either::Right(ready(Err(error))),
}
});

child_storage.boxed()
}

fn storage_hash(
&self,
block: Option<Block::Hash>,
Expand Down
108 changes: 108 additions & 0 deletions client/rpc/src/state/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,54 @@ fn should_return_storage() {
);
}

#[test]
fn should_return_storage_entries() {
const KEY1: &[u8] = b":mock";
const KEY2: &[u8] = b":turtle";
const VALUE: &[u8] = b"hello world";
const CHILD_VALUE1: &[u8] = b"hello world !";
const CHILD_VALUE2: &[u8] = b"hello world !";

let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = TestClientBuilder::new()
.add_extra_storage(KEY1.to_vec(), VALUE.to_vec())
.add_extra_child_storage(&child_info, KEY1.to_vec(), CHILD_VALUE1.to_vec())
.add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec())
.build();
let genesis_hash = client.genesis_hash();
let (_client, child) = new_full(
Arc::new(client),
SubscriptionManager::new(Arc::new(TaskExecutor)),
DenyUnsafe::No,
None,
);

let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())];
assert_eq!(
executor::block_on(child.storage_entries(
prefixed_storage_key(),
keys.to_vec(),
Some(genesis_hash).into()
))
.map(|x| x.into_iter().map(|x| x.map(|x| x.0.len()).unwrap()).sum::<usize>())
.unwrap(),
CHILD_VALUE1.len() + CHILD_VALUE2.len()
);

// should fail if not all keys exist.
let mut failing_keys = vec![StorageKey(b":soup".to_vec())];
failing_keys.extend_from_slice(keys);
assert_matches!(
executor::block_on(child.storage_entries(
prefixed_storage_key(),
failing_keys,
Some(genesis_hash).into()
))
.map(|x| x.iter().all(|x| x.is_some())),
Ok(false)
);
}

#[test]
fn should_return_child_storage() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
Expand All @@ -115,6 +163,19 @@ fn should_return_child_storage() {
)),
Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1
);

// should fail if key does not exist.
let failing_key = StorageKey(b":soup".to_vec());
assert_matches!(
executor::block_on(child.storage(
prefixed_storage_key(),
failing_key,
Some(genesis_hash).into()
))
.map(|x| x.is_some()),
Ok(false)
);

assert_matches!(
executor::block_on(child.storage_hash(
child_key.clone(),
Expand All @@ -130,6 +191,53 @@ fn should_return_child_storage() {
);
}

#[test]
fn should_return_child_storage_entries() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = Arc::new(
substrate_test_runtime_client::TestClientBuilder::new()
.add_child_storage(&child_info, "key1", vec![42_u8])
.add_child_storage(&child_info, "key2", vec![43_u8, 44])
.build(),
);
let genesis_hash = client.genesis_hash();
let (_client, child) =
new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None);
let child_key = prefixed_storage_key();
let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())];

let res = executor::block_on(child.storage_entries(
child_key.clone(),
keys.clone(),
Some(genesis_hash).into(),
))
.unwrap();

assert_matches!(
res[0],
Some(StorageData(ref d))
if d[0] == 42 && d.len() == 1
);
assert_matches!(
res[1],
Some(StorageData(ref d))
if d[0] == 43 && d[1] == 44 && d.len() == 2
);
assert_matches!(
executor::block_on(child.storage_hash(
child_key.clone(),
keys[0].clone(),
Some(genesis_hash).into()
))
.map(|x| x.is_some()),
Ok(true)
);
assert_matches!(
executor::block_on(child.storage_size(child_key.clone(), keys[0].clone(), None)),
Ok(Some(1))
);
}

#[test]
fn should_call_contract() {
let client = Arc::new(substrate_test_runtime_client::new());
Expand Down

0 comments on commit c022f88

Please sign in to comment.