Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Safe and sane multi-item storage removal #11490

Merged
merged 40 commits into from May 29, 2022
Merged
Show file tree
Hide file tree
Changes from 39 commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
0e48dc9
Fix overlay prefix removal result
gavofyork May 18, 2022
ae9b285
Second part of the overlay prefix removal fix.
gavofyork May 18, 2022
7f80f12
Report only items deleted from storage in clear_prefix
gavofyork May 20, 2022
3e580e5
Fix kill_prefix
gavofyork May 20, 2022
4a5ff62
Formatting
gavofyork May 20, 2022
8d56c13
Remove unused code
gavofyork May 20, 2022
91362cd
Fixes
gavofyork May 20, 2022
4767538
Fixes
gavofyork May 20, 2022
08b898c
Introduce clear_prefix host function v3
gavofyork May 20, 2022
aee7993
Formatting
gavofyork May 20, 2022
a1982d1
Use v2 for now
gavofyork May 20, 2022
ed26985
Fixes
gavofyork May 20, 2022
72f8fdd
Formatting
gavofyork May 20, 2022
d97aa89
Docs
gavofyork May 20, 2022
f4bbf8d
Child prefix removal should also hide v3 for now
gavofyork May 20, 2022
e82b1ca
Fixes
gavofyork May 20, 2022
73294b0
Fixes
gavofyork May 20, 2022
8a8efb1
Formatting
gavofyork May 20, 2022
a626d54
Fixes
gavofyork May 21, 2022
6867534
apply_to_keys_whle takes start_at
gavofyork May 21, 2022
94e885e
apply_to_keys_whle takes start_at
gavofyork May 21, 2022
2566a4d
apply_to_keys_whle takes start_at
gavofyork May 23, 2022
a71f9a0
Cursor API; force limits
gavofyork May 23, 2022
f435107
Use unsafe deprecated functions
gavofyork May 23, 2022
bc181a0
Formatting
gavofyork May 23, 2022
9317f6a
Merge remote-tracking branch 'origin/master' into gav-clear-prefix-v2
gavofyork May 23, 2022
0e51dbe
Fixes
gavofyork May 23, 2022
ee53604
Grumbles
gavofyork May 25, 2022
1fb2ef5
Fixes
gavofyork May 25, 2022
0338fce
Docs
gavofyork May 25, 2022
73d1fc6
Some nitpicks :see_no_evil:
bkchr May 25, 2022
ef7f475
Update primitives/externalities/src/lib.rs
gavofyork May 25, 2022
7db73b1
Formatting
gavofyork May 25, 2022
6aeb3a3
Fixes
KiChjang May 25, 2022
4692d06
cargo fmt
KiChjang May 25, 2022
7e8a8c2
Fixes
KiChjang May 25, 2022
c22b924
Fixes
gavofyork May 25, 2022
edf7ce0
Update primitives/io/src/lib.rs
gavofyork May 25, 2022
33224f2
Formatting
gavofyork May 25, 2022
a5f349b
Fixes
gavofyork May 26, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion client/db/src/bench.rs
Expand Up @@ -393,10 +393,11 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.apply_to_keys_while(child_info, prefix, f)
state.apply_to_keys_while(child_info, prefix, start_at, f)
}
}

Expand Down
3 changes: 2 additions & 1 deletion client/db/src/lib.rs
Expand Up @@ -226,9 +226,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, f)
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}

fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
Expand Down
6 changes: 4 additions & 2 deletions client/db/src/storage_cache.rs
Expand Up @@ -639,9 +639,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, f)
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}

fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Expand Down Expand Up @@ -839,9 +840,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>>
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.caching_state().apply_to_keys_while(child_info, prefix, f)
self.caching_state().apply_to_keys_while(child_info, prefix, start_at, f)
}

fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Expand Down
2 changes: 2 additions & 0 deletions frame/bags-list/src/list/mod.rs
Expand Up @@ -94,7 +94,9 @@ impl<T: Config<I>, I: 'static> List<T, I> {
/// this function should generally not be used in production as it could lead to a very large
/// number of storage accesses.
pub(crate) fn unsafe_clear() {
#[allow(deprecated)]
crate::ListBags::<T, I>::remove_all(None);
#[allow(deprecated)]
crate::ListNodes::<T, I>::remove_all();
}

Expand Down
1 change: 1 addition & 0 deletions frame/contracts/src/migration.rs
Expand Up @@ -56,6 +56,7 @@ mod v4 {
use super::*;

pub fn migrate<T: Config>() -> Weight {
#[allow(deprecated)]
migration::remove_storage_prefix(<Pallet<T>>::name().as_bytes(), b"CurrentSchedule", b"");
T::DbWeight::get().writes(1)
}
Expand Down
14 changes: 7 additions & 7 deletions frame/contracts/src/storage.rs
Expand Up @@ -27,12 +27,12 @@ use crate::{
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{
dispatch::{DispatchError, DispatchResult},
storage::child::{self, ChildInfo, KillStorageResult},
storage::child::{self, ChildInfo},
weights::Weight,
};
use scale_info::TypeInfo;
use sp_core::crypto::UncheckedFrom;
use sp_io::hashing::blake2_256;
use sp_io::{hashing::blake2_256, KillStorageResult};
use sp_runtime::{
traits::{Hash, Zero},
RuntimeDebug,
Expand Down Expand Up @@ -266,16 +266,16 @@ where
while !queue.is_empty() && remaining_key_budget > 0 {
// Cannot panic due to loop condition
let trie = &mut queue[0];
let outcome =
child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget));
#[allow(deprecated)]
let outcome = child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget));
let keys_removed = match outcome {
// This happens when our budget wasn't large enough to remove all keys.
KillStorageResult::SomeRemaining(count) => count,
KillStorageResult::AllRemoved(count) => {
KillStorageResult::SomeRemaining(c) => c,
KillStorageResult::AllRemoved(c) => {
// We do not care to preserve order. The contract is deleted already and
// no one waits for the trie to be deleted.
queue.swap_remove(0);
count
c
},
};
remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed);
Expand Down
1 change: 1 addition & 0 deletions frame/elections-phragmen/src/benchmarking.rs
Expand Up @@ -148,6 +148,7 @@ fn clean<T: Config>() {
<Members<T>>::kill();
<Candidates<T>>::kill();
<RunnersUp<T>>::kill();
#[allow(deprecated)]
<Voting<T>>::remove_all(None);
}

Expand Down
2 changes: 2 additions & 0 deletions frame/im-online/src/lib.rs
Expand Up @@ -900,7 +900,9 @@ impl<T: Config> OneSessionHandler<T::AccountId> for Pallet<T> {
// Remove all received heartbeats and number of authored blocks from the
// current session, they have already been processed and won't be needed
// anymore.
#[allow(deprecated)]
ReceivedHeartbeats::<T>::remove_prefix(&T::ValidatorSet::session_index(), None);
#[allow(deprecated)]
AuthoredBlocks::<T>::remove_prefix(&T::ValidatorSet::session_index(), None);

if offenders.is_empty() {
Expand Down
2 changes: 2 additions & 0 deletions frame/scheduler/src/lib.rs
Expand Up @@ -565,6 +565,7 @@ impl<T: Config> Pallet<T> {
},
);

#[allow(deprecated)]
frame_support::storage::migration::remove_storage_prefix(
Self::name().as_bytes(),
b"StorageVersion",
Expand Down Expand Up @@ -601,6 +602,7 @@ impl<T: Config> Pallet<T> {
)
});

#[allow(deprecated)]
frame_support::storage::migration::remove_storage_prefix(
Self::name().as_bytes(),
b"StorageVersion",
Expand Down
3 changes: 3 additions & 0 deletions frame/society/src/lib.rs
Expand Up @@ -1067,6 +1067,7 @@ pub mod pallet {
Founder::<T, I>::kill();
Rules::<T, I>::kill();
Candidates::<T, I>::kill();
#[allow(deprecated)]
SuspendedCandidates::<T, I>::remove_all(None);
Self::deposit_event(Event::<T, I>::Unfounded { founder });
Ok(())
Expand Down Expand Up @@ -1511,6 +1512,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
.collect::<Vec<_>>();

// Clean up all votes.
#[allow(deprecated)]
<Votes<T, I>>::remove_all(None);

// Reward one of the voters who voted the right way.
Expand Down Expand Up @@ -1695,6 +1697,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
}

// Clean up all votes.
#[allow(deprecated)]
<DefenderVotes<T, I>>::remove_all(None);
}

Expand Down
9 changes: 9 additions & 0 deletions frame/staking/src/pallet/impls.rs
Expand Up @@ -585,8 +585,11 @@ impl<T: Config> Pallet<T> {

/// Clear all era information for given era.
pub(crate) fn clear_era_information(era_index: EraIndex) {
#[allow(deprecated)]
<ErasStakers<T>>::remove_prefix(era_index, None);
#[allow(deprecated)]
<ErasStakersClipped<T>>::remove_prefix(era_index, None);
#[allow(deprecated)]
<ErasValidatorPrefs<T>>::remove_prefix(era_index, None);
<ErasValidatorReward<T>>::remove(era_index);
<ErasRewardPoints<T>>::remove(era_index);
Expand Down Expand Up @@ -984,9 +987,13 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {

#[cfg(feature = "runtime-benchmarks")]
fn clear() {
#[allow(deprecated)]
<Bonded<T>>::remove_all(None);
#[allow(deprecated)]
<Ledger<T>>::remove_all(None);
#[allow(deprecated)]
<Validators<T>>::remove_all();
#[allow(deprecated)]
<Nominators<T>>::remove_all();

T::VoterList::unsafe_clear();
Expand Down Expand Up @@ -1368,7 +1375,9 @@ impl<T: Config> SortedListProvider<T::AccountId> for UseNominatorsAndValidatorsM
fn unsafe_clear() {
// NOTE: Caller must ensure this doesn't lead to too many storage accesses. This is a
// condition of SortedListProvider::unsafe_clear.
#[allow(deprecated)]
Nominators::<T>::remove_all();
#[allow(deprecated)]
Validators::<T>::remove_all();
}
}
Expand Down
2 changes: 2 additions & 0 deletions frame/staking/src/slashing.rs
Expand Up @@ -557,7 +557,9 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> {

/// Clear slashing metadata for an obsolete era.
pub(crate) fn clear_era_metadata<T: Config>(obsolete_era: EraIndex) {
#[allow(deprecated)]
<Pallet<T> as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era, None);
#[allow(deprecated)]
<Pallet<T> as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era, None);
}

Expand Down
2 changes: 2 additions & 0 deletions frame/staking/src/testing_utils.rs
Expand Up @@ -36,9 +36,11 @@ const SEED: u32 = 0;

/// This function removes all validators and nominators from storage.
pub fn clear_validators_and_nominators<T: Config>() {
#[allow(deprecated)]
Validators::<T>::remove_all();

// whenever we touch nominators counter we should update `T::VoterList` as well.
#[allow(deprecated)]
Nominators::<T>::remove_all();

// NOTE: safe to call outside block production
Expand Down
8 changes: 6 additions & 2 deletions frame/support/src/lib.rs
Expand Up @@ -1097,8 +1097,12 @@ pub mod tests {
DoubleMap::insert(&(key1 + 1), &key2, &4u64);
DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64);
assert!(matches!(
DoubleMap::remove_prefix(&key1, None),
sp_io::KillStorageResult::AllRemoved(0), // all in overlay
DoubleMap::clear_prefix(&key1, u32::max_value(), None),
// Note this is the incorrect answer (for now), since we are using v2 of
// `clear_prefix`.
// When we switch to v3, then this will become:
// sp_io::MultiRemovalResults::NoneLeft { db: 0, total: 2 },
sp_io::MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 },
));
assert_eq!(DoubleMap::get(&key1, &key2), 0u64);
assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64);
Expand Down
55 changes: 54 additions & 1 deletion frame/support/src/storage/child.rs
Expand Up @@ -21,7 +21,7 @@
// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info
// of null length parent storage key).

pub use crate::sp_io::KillStorageResult;
pub use crate::sp_io::{KillStorageResult, MultiRemovalResults};
use crate::sp_std::prelude::*;
use codec::{Codec, Decode, Encode};
pub use sp_core::storage::{ChildInfo, ChildType, StateVersion};
Expand Down Expand Up @@ -136,13 +136,66 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool {
/// not make much sense because it is not cumulative when called inside the same block.
/// Use this function to distribute the deletion of a single child trie across multiple
/// blocks.
#[deprecated = "Use `clear_storage` instead"]
pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageResult {
match child_info.child_type() {
ChildType::ParentKeyId =>
sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit),
}
}

/// Partially clear the child storage of each key-value pair.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are there any use cases where it makes sense to pass None to maybe_limit? If not, it sounds like we should just not make it an Option.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Might be worth keeping the host functions general for now. Can't predict future uses quite yet.

/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete storage items in case it is too large or costly
/// to delete all in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls
/// operating on the same prefix should pass `Some` and this value should be equal to the
/// previous call result's `maybe_cursor` field. The only exception to this is when you can
/// guarantee that the subsequent call is in a new block; in this case the previous call's result
/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
/// then making this call solely from a block-hook such as `on_initialize`.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given child storage, it is important that no keys further
/// keys are inserted. If so, then they may or may not be deleted by subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for the child are deleted without
/// counting towards the `limit`.
pub fn clear_storage(
child_info: &ChildInfo,
maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

WTF? Now our code using the new ::clear API is in the infinite loop, thanks a lot.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you're writing this comment just to blow off some steam, this is not the right place.

What exactly is the code in your repo that is undergoing an infinite loop after this PR?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The code like this:

let mut res = <MyMap<T>>::clear(16, None);
while let Some(cursor) = res.maybe_cursor {
    res = <MyMap<T>>::clear(16, Some(&cursor));
}

Where MyMap is:

#[pallet::storage]
pub type MyMap<T: Config> =
        StorageMap<_, Twox64Concat, T::AccountId, MyValue, OptionQuery>;

It is quite a serious issue with how one would use a cursor. Here, the issue is obvious, as the cursor is simply ignored, leading to a never-ending loop, since the code goes over the same data over and over again.

The workaround is to use a deprecated remove_all call. However, you'd think this new API would work.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah that is right! Sorry that was an oversight by us. This currently happens because we don't have the new host function enabled. With this we wouldn't get this infinite loop.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's my understanding as well, and it is also understandable.

But it shouldn't probably have been integrated at this stage. It also doesn't help that the only working option remove_all at this time.

We're guilty of this too, but somehow this issue has passed through all the QA layers here and at our end - and we ended up with a bug.

I propose that, as a hotfix measure, and so that people don't start jumping to the new (and broken so far) API, we undeprecate remove_all, and hide the new clear implementations behind a feature flag (unstable?).

Copy link
Member Author

@gavofyork gavofyork Jul 16, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

clear does not give the semantics needed for the code example above to work. This is why the documentation reads:

Attempt to remove all items from the map.

Emphasis added on purpose. clear attempts to make progress with each call, and once per block it certainly will with any sensible implementation. But it will not necessarily make progress on every call, and indeed, with current host functions only the first call per transaction may make any progress.

The API avoided giving guarantees so that downstream projects can write code which works regardless of the status of host functions.

A simpler (and non-broken) version of the code above is just to use <MyMap<T>>::clear(u32::max_value(), None);. This is not especially sensible since it essentially leaves the amount of work unlimited and might cause your block to be overweight and unvalidatable.

In general if you see an API designed to force a limit on the number of operations, then it might not be such a great idea to "workaround" the API designer's intention by placing it within an unbounded loop.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

First of all thanks for the workaround of <MyMap<T>>::clear(u32::max_value()) - there's nothing in the code/docs of this fn that would hint me to try this (simply because in rust you don't have to resort by this - people tend to use Option to express this). It would be nice if there was still a call that would not take the limit argument, or for this call to take the limit as an Option. At the very least, add this "passing the u32::max_value() as the limit" bit to the docs.

Secondly, this is definitely going even more sideways. When I use the code that documents how the cursor behaves, I expect it to do what is documented.

In general if you see an API designed to force a limit on the number of operations, then it might not be such a great idea to "workaround" the API designer's intention by placing it within a loop.

The thing is, I am not trying to work around anything. I am using the API as documented. It is not my fault that it is broken. Look, I've tried to wrap my mind around this phrase to try to agree with it, like in a general case, but I don't think in the general case it makes any sense at all. And in the context - well, the cursor APIs are precisely designed to be used in a loop of some shape or form. How else would you use them?

What I'd like to see improved here is the documentation on the correctness. The documentation does not mention that this function can only work once per block (but it should've been). In fact, I don't think it intended to have this limitation - with a proper overlay API it can just work (but that's where you need new host functions, right?). And that's what I expected from this API. Cause why would it be worth working on otherwise? We already have remove_prefix that takes a limit, which will only work over multiple blocks, and doesn't have a cursor.

So, one question about the intent: the cursor, will it work if we pass it from one block to another? Obviously, it doesn't matter in the current implementation, but what's the intent here? It would be nice if it's also explained more thoroughly in the documentation.

This is pretty serious, and it seems like this issue has the potential to be shoved under the rug without a proper resolution, but without any real reason to do this. So, currently, the documentation doesn't mention anything about the requirement to run the code over multiple blocks (that would work properly since the removals would span across many instances of the storage overlays). The thing is, the cursor is unused in this implementation, so the documentation on it will not be true still. So, really, I don't know to make this API right without the proper support from the host end, so, I'd stick with the suggestion I gave in the last message.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see how this is broken.

If I were to write the code that you provided above, I would have checked to see whether or not the returned cursor is at the same position as it was before the clear operation.

When I see the word attempt, that is immediately what I think of, because it signals to me that the cursor may not advance after every operation. Regardless of whether or not we actually consume the cursor provided, checking that the cursor has in fact advanced is a sensible operation to perform after every clear call.

) -> MultiRemovalResults {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor)
let r = match child_info.child_type() {
ChildType::ParentKeyId =>
sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit),
};
use sp_io::KillStorageResult::*;
let (maybe_cursor, backend) = match r {
AllRemoved(db) => (None, db),
SomeRemaining(db) => (Some(child_info.storage_key().to_vec()), db),
};
MultiRemovalResults { maybe_cursor, backend, unique: backend, loops: backend }
}

/// Ensure `key` has no explicit entry in storage.
pub fn kill(child_info: &ChildInfo, key: &[u8]) {
match child_info.child_type() {
Expand Down
21 changes: 19 additions & 2 deletions frame/support/src/storage/generator/double_map.rs
Expand Up @@ -202,11 +202,28 @@ where
unhashed::kill(&Self::storage_double_map_final_key(k1, k2))
}

fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
fn remove_prefix<KArg1>(k1: KArg1, maybe_limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: EncodeLike<K1>,
{
unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit)
unhashed::clear_prefix(Self::storage_double_map_final_key1(k1).as_ref(), maybe_limit, None)
.into()
}

fn clear_prefix<KArg1>(
k1: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: EncodeLike<K1>,
{
unhashed::clear_prefix(
Self::storage_double_map_final_key1(k1).as_ref(),
Some(limit),
maybe_cursor,
)
.into()
}

fn iter_prefix_values<KArg1>(k1: KArg1) -> storage::PrefixIterator<V>
Expand Down
17 changes: 16 additions & 1 deletion frame/support/src/storage/generator/nmap.rs
Expand Up @@ -183,7 +183,22 @@ where
where
K: HasKeyPrefix<KP>,
{
unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit)
unhashed::clear_prefix(&Self::storage_n_map_partial_key(partial_key), limit, None).into()
}

fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
K: HasKeyPrefix<KP>,
{
unhashed::clear_prefix(
&Self::storage_n_map_partial_key(partial_key),
Some(limit),
maybe_cursor,
)
}

fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<V>
Expand Down