Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Add childstate_getStorageEntries RPC #9459

Merged
19 commits merged into from
Sep 13, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
d2ad280
Add storage query functions for multiple keys
hirschenberger Jul 29, 2021
993a877
Merge branch 'master' of https://github.com/paritytech/substrate into…
hirschenberger Aug 2, 2021
ab725c8
Query all keys in one request and add more tests
hirschenberger Aug 3, 2021
3a3c1ae
Merge branch 'master' of https://github.com/paritytech/substrate into…
hirschenberger Aug 3, 2021
a23ed69
Make it compatible with stable release channel
hirschenberger Aug 3, 2021
fa146dc
Merge branch 'master' of https://github.com/paritytech/substrate into…
hirschenberger Aug 30, 2021
d73a745
Update to new futures
hirschenberger Aug 31, 2021
0cbe5eb
Update client/rpc/src/state/state_full.rs
hirschenberger Sep 9, 2021
cd4c158
Update client/rpc/src/state/state_full.rs
hirschenberger Sep 9, 2021
3199a10
Update client/rpc/src/state/state_full.rs
hirschenberger Sep 9, 2021
a1b6eed
Update client/rpc/src/state/state_full.rs
hirschenberger Sep 9, 2021
83251ad
Update client/rpc/src/state/state_full.rs
hirschenberger Sep 9, 2021
58fe359
Update client/rpc/src/state/state_light.rs
hirschenberger Sep 9, 2021
f3e3ce3
Update client/rpc/src/state/state_light.rs
hirschenberger Sep 9, 2021
d618704
Satisfy borrowck
hirschenberger Sep 9, 2021
d840015
Remove non-RPC `storage_entries` functions.
hirschenberger Sep 9, 2021
5813b43
Revert "Remove non-RPC `storage_entries` functions."
hirschenberger Sep 9, 2021
a001771
Revert "Revert "Remove non-RPC `storage_entries` functions.""
hirschenberger Sep 9, 2021
19f5d24
Finally some formatting
hirschenberger Sep 9, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions client/rpc-api/src/child_state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,15 @@ pub trait ChildStateApi<Hash> {
hash: Option<Hash>,
) -> FutureResult<Option<StorageData>>;

/// Returns child storage entries for multiple keys at a specific block's state.
#[rpc(name = "childstate_getStorages")]
fn storages(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

storage_entries maybe? storages sounds weird.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, that's better.

Another question is if we should deprecate storage because of the overlapping functionality?

&self,
child_storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
hash: Option<Hash>,
) -> FutureResult<Vec<Option<StorageData>>>;

/// Returns the hash of a child storage entry at a block's state.
#[rpc(name = "childstate_getStorageHash")]
fn storage_hash(
Expand Down
24 changes: 24 additions & 0 deletions client/rpc/src/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,13 @@ where
key: StorageKey,
) -> FutureResult<Option<StorageData>>;

/// Returns storage entries for multiple keys at a specific block's state.
fn storages(
&self,
block: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>>;

/// Returns the hash of a storage entry at a block's state.
fn storage_hash(
&self,
Expand Down Expand Up @@ -462,6 +469,14 @@ where
key: StorageKey,
) -> FutureResult<Option<StorageData>>;

/// Returns child storage entries at a specific block's state.
fn storages(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>>;

/// Returns the hash of a child storage entry at a block's state.
fn storage_hash(
&self,
Expand Down Expand Up @@ -511,6 +526,15 @@ where
self.backend.storage(block, storage_key, key)
}

fn storages(
&self,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
block: Option<Block::Hash>,
) -> FutureResult<Vec<Option<StorageData>>> {
self.backend.storages(block, storage_key, keys)
}

fn storage_keys(
&self,
storage_key: PrefixedStorageKey,
Expand Down
45 changes: 44 additions & 1 deletion client/rpc/src/state/state_full.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ use futures::{future, StreamExt as _, TryStreamExt as _};
use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId};
use log::warn;
use rpc::{
futures::{future::result, stream, Future, Sink, Stream},
futures::{
future::{join_all, result},
stream, Future, Sink, Stream,
},
Result as RpcResult,
};
use std::{
Expand Down Expand Up @@ -358,6 +361,22 @@ where
))
}

fn storages(
&self,
block: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let block = match self.block_or_best(block) {
Ok(b) => b,
Err(e) => return Box::new(result(Err(client_err(e)))),
};
let client = self.client.clone();
Box::new(join_all(
keys.into_iter()
.map(move |key| client.storage(&BlockId::Hash(block), &key).map_err(client_err)),
))
bkchr marked this conversation as resolved.
Show resolved Hide resolved
}

fn storage_size(
&self,
block: Option<Block::Hash>,
Expand Down Expand Up @@ -722,6 +741,30 @@ where
))
}

fn storages(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let block = match self.block_or_best(block) {
Ok(b) => b,
Err(e) => return Box::new(result(Err(client_err(e)))),
};
let client = self.client.clone();
Box::new(
join_all(keys.into_iter().map(move |key| {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here

let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
client.child_storage(&BlockId::Hash(block), &child_info, &key)
}))
.map_err(client_err),
)
}

fn storage_hash(
&self,
block: Option<Block::Hash>,
Expand Down
68 changes: 67 additions & 1 deletion client/rpc/src/state/state_light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use log::warn;
use parking_lot::Mutex;
use rpc::{
futures::{
future::{result, Future},
future::{join_all, result, Future},
stream::Stream,
Sink,
},
Expand Down Expand Up @@ -245,6 +245,26 @@ where
)
}

fn storages(
&self,
block: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let remote_blockchain = self.remote_blockchain.clone();
let fetcher = self.fetcher.clone();
let block = self.block_or_best(block);
Box::new(join_all(keys.into_iter().map(move |key| {
storage(&*remote_blockchain, fetcher.clone(), block, vec![key.0.clone()])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You should request all keys in one request and not one request per key.

.boxed()
.compat()
.map(move |mut values| {
values
.remove(&key)
.expect("successful request has entries for all requested keys; qed")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

At least it's the claim that was made in the storage function. But I'll add a test to check it.

})
})))
}

fn storage_hash(
&self,
block: Option<Block::Hash>,
Expand Down Expand Up @@ -563,6 +583,52 @@ where
Box::new(child_storage.boxed().compat())
}

fn storages(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<Option<StorageData>>> {
let block = self.block_or_best(block);
let fetcher = self.fetcher.clone();
let remote_blockchain = self.remote_blockchain.clone();
Box::new(join_all(keys.into_iter().map(move |key| {
let storage_key = storage_key.clone();
let fetcher2 = fetcher.clone();
let child_storage =
resolve_header(&*remote_blockchain, &*fetcher, block).then(move |result| {
match result {
Ok(header) => Either::Left(
fetcher2
.remote_read_child(RemoteReadChildRequest {
block,
header,
storage_key,
keys: vec![key.0.clone()],
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Again you are only requesting one key after another.

retry_count: Default::default(),
})
.then(move |result| {
ready(
result
.map(|mut data| {
data.remove(&key.0)
.expect(
"successful result has entry for all keys; qed",
)
.map(StorageData)
})
.map_err(client_err),
)
}),
),
Err(error) => Either::Right(ready(Err(error))),
}
});

Box::new(child_storage.boxed().compat())
})))
}

fn storage_hash(
&self,
block: Option<Block::Hash>,
Expand Down
78 changes: 78 additions & 0 deletions client/rpc/src/state/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,41 @@ fn should_return_storage() {
);
}

#[test]
fn should_return_storages() {
const KEY1: &[u8] = b":mock";
const KEY2: &[u8] = b":turtle";
const VALUE: &[u8] = b"hello world";
const CHILD_VALUE1: &[u8] = b"hello world !";
const CHILD_VALUE2: &[u8] = b"hello world !";

let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = TestClientBuilder::new()
.add_extra_storage(KEY1.to_vec(), VALUE.to_vec())
.add_extra_child_storage(&child_info, KEY1.to_vec(), CHILD_VALUE1.to_vec())
.add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec())
.build();
let genesis_hash = client.genesis_hash();
let (_client, child) = new_full(
Arc::new(client),
SubscriptionManager::new(Arc::new(TaskExecutor)),
DenyUnsafe::No,
None,
);
let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())];

assert_eq!(
executor::block_on(
child
.storages(prefixed_storage_key(), keys.to_vec(), Some(genesis_hash).into())
.map(|x| x.into_iter().map(|x| x.map(|y| y.0.len()).unwrap()).sum::<usize>())
.compat(),
)
.unwrap(),
CHILD_VALUE1.len() + CHILD_VALUE2.len(),
);
}

#[test]
fn should_return_child_storage() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
Expand Down Expand Up @@ -130,6 +165,49 @@ fn should_return_child_storage() {
assert_matches!(child.storage_size(child_key.clone(), key.clone(), None).wait(), Ok(Some(1)));
}

#[test]
fn should_return_child_storages() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = Arc::new(
substrate_test_runtime_client::TestClientBuilder::new()
.add_child_storage(&child_info, "key1", vec![42_u8])
.add_child_storage(&child_info, "key2", vec![43_u8, 44])
.build(),
);
let genesis_hash = client.genesis_hash();
let (_client, child) =
new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None);
let child_key = prefixed_storage_key();
let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())];

let res = child
.storages(child_key.clone(), keys.clone(), Some(genesis_hash).into())
.wait()
.unwrap();

assert_matches!(
res[0],
Some(StorageData(ref d))
if d[0] == 42 && d.len() == 1
);
assert_matches!(
res[1],
Some(StorageData(ref d))
if d[0] == 43 && d[1] == 44 && d.len() == 2
);
assert_matches!(
child
.storage_hash(child_key.clone(), keys[0].clone(), Some(genesis_hash).into(),)
.wait()
.map(|x| x.is_some()),
Ok(true)
);
assert_matches!(
child.storage_size(child_key.clone(), keys[0].clone(), None).wait(),
Ok(Some(1))
);
}

#[test]
fn should_call_contract() {
let client = Arc::new(substrate_test_runtime_client::new());
Expand Down