From 64d5fd4c1167293455b30d1894e6aa13f9e7e8ad Mon Sep 17 00:00:00 2001 From: GreenBaneling | Supercolony Date: Sat, 21 May 2022 12:30:23 +0100 Subject: [PATCH] Removed `collection` and `lazy` from storage (#1271) * Removed collections and lazy * Fixed comment --- .../src/collections/binary_heap/children.rs | 209 --- .../collections/binary_heap/children_vec.rs | 386 ----- .../src/collections/binary_heap/impls.rs | 48 - .../src/collections/binary_heap/mod.rs | 270 ---- .../src/collections/binary_heap/reverse.rs | 120 -- .../src/collections/binary_heap/storage.rs | 112 -- .../src/collections/binary_heap/tests.rs | 403 ----- .../src/collections/bitstash/counts.rs | 115 -- .../src/collections/bitstash/fuzz_tests.rs | 74 - .../storage/src/collections/bitstash/mod.rs | 187 --- .../src/collections/bitstash/storage.rs | 113 -- .../storage/src/collections/bitstash/tests.rs | 164 -- .../storage/src/collections/bitvec/bitref.rs | 196 --- .../storage/src/collections/bitvec/bits256.rs | 373 ----- .../storage/src/collections/bitvec/bitsref.rs | 206 --- .../storage/src/collections/bitvec/impls.rs | 74 - crates/storage/src/collections/bitvec/iter.rs | 310 ---- crates/storage/src/collections/bitvec/mod.rs | 307 ---- .../storage/src/collections/bitvec/storage.rs | 110 -- .../storage/src/collections/bitvec/tests.rs | 212 --- .../src/collections/hashmap/fuzz_tests.rs | 264 ---- .../storage/src/collections/hashmap/impls.rs | 179 --- .../storage/src/collections/hashmap/iter.rs | 420 ----- crates/storage/src/collections/hashmap/mod.rs | 565 ------- .../src/collections/hashmap/storage.rs | 155 -- .../storage/src/collections/hashmap/tests.rs | 383 ----- crates/storage/src/collections/mod.rs | 57 - .../storage/src/collections/smallvec/impls.rs | 124 -- .../storage/src/collections/smallvec/iter.rs | 213 --- .../storage/src/collections/smallvec/mod.rs | 318 ---- .../src/collections/smallvec/storage.rs | 87 -- .../storage/src/collections/smallvec/tests.rs | 456 ------ crates/storage/src/collections/stash/impls.rs | 154 -- crates/storage/src/collections/stash/iter.rs | 321 ---- crates/storage/src/collections/stash/mod.rs | 588 ------- .../storage/src/collections/stash/storage.rs | 192 --- crates/storage/src/collections/stash/tests.rs | 812 ---------- .../storage/src/collections/vec/fuzz_tests.rs | 191 --- crates/storage/src/collections/vec/impls.rs | 139 -- crates/storage/src/collections/vec/iter.rs | 213 --- crates/storage/src/collections/vec/mod.rs | 524 ------- crates/storage/src/collections/vec/storage.rs | 92 -- crates/storage/src/collections/vec/tests.rs | 561 ------- crates/storage/src/hashmap_entry_api_tests.rs | 446 ------ crates/storage/src/lazy/cache_cell.rs | 105 -- crates/storage/src/lazy/entry.rs | 337 ---- crates/storage/src/lazy/lazy_array.rs | 881 ----------- crates/storage/src/lazy/lazy_cell.rs | 813 ---------- crates/storage/src/lazy/lazy_hmap.rs | 1360 ----------------- crates/storage/src/lazy/lazy_imap.rs | 790 ---------- crates/storage/src/lazy/mapping.rs | 4 +- crates/storage/src/lazy/mod.rs | 302 ---- crates/storage/src/lib.rs | 20 +- crates/storage/src/test_utils.rs | 136 -- crates/storage/src/traits/mod.rs | 8 +- crates/storage/src/traits/optspec.rs | 98 +- 56 files changed, 6 insertions(+), 16291 deletions(-) delete mode 100644 crates/storage/src/collections/binary_heap/children.rs delete mode 100644 crates/storage/src/collections/binary_heap/children_vec.rs delete mode 100644 crates/storage/src/collections/binary_heap/impls.rs delete mode 100644 crates/storage/src/collections/binary_heap/mod.rs delete mode 100644 crates/storage/src/collections/binary_heap/reverse.rs delete mode 100644 crates/storage/src/collections/binary_heap/storage.rs delete mode 100644 crates/storage/src/collections/binary_heap/tests.rs delete mode 100644 crates/storage/src/collections/bitstash/counts.rs delete mode 100644 crates/storage/src/collections/bitstash/fuzz_tests.rs delete mode 100644 crates/storage/src/collections/bitstash/mod.rs delete mode 100644 crates/storage/src/collections/bitstash/storage.rs delete mode 100644 crates/storage/src/collections/bitstash/tests.rs delete mode 100644 crates/storage/src/collections/bitvec/bitref.rs delete mode 100644 crates/storage/src/collections/bitvec/bits256.rs delete mode 100644 crates/storage/src/collections/bitvec/bitsref.rs delete mode 100644 crates/storage/src/collections/bitvec/impls.rs delete mode 100644 crates/storage/src/collections/bitvec/iter.rs delete mode 100644 crates/storage/src/collections/bitvec/mod.rs delete mode 100644 crates/storage/src/collections/bitvec/storage.rs delete mode 100644 crates/storage/src/collections/bitvec/tests.rs delete mode 100644 crates/storage/src/collections/hashmap/fuzz_tests.rs delete mode 100644 crates/storage/src/collections/hashmap/impls.rs delete mode 100644 crates/storage/src/collections/hashmap/iter.rs delete mode 100644 crates/storage/src/collections/hashmap/mod.rs delete mode 100644 crates/storage/src/collections/hashmap/storage.rs delete mode 100644 crates/storage/src/collections/hashmap/tests.rs delete mode 100644 crates/storage/src/collections/mod.rs delete mode 100644 crates/storage/src/collections/smallvec/impls.rs delete mode 100644 crates/storage/src/collections/smallvec/iter.rs delete mode 100644 crates/storage/src/collections/smallvec/mod.rs delete mode 100644 crates/storage/src/collections/smallvec/storage.rs delete mode 100644 crates/storage/src/collections/smallvec/tests.rs delete mode 100644 crates/storage/src/collections/stash/impls.rs delete mode 100644 crates/storage/src/collections/stash/iter.rs delete mode 100644 crates/storage/src/collections/stash/mod.rs delete mode 100644 crates/storage/src/collections/stash/storage.rs delete mode 100644 crates/storage/src/collections/stash/tests.rs delete mode 100644 crates/storage/src/collections/vec/fuzz_tests.rs delete mode 100644 crates/storage/src/collections/vec/impls.rs delete mode 100644 crates/storage/src/collections/vec/iter.rs delete mode 100644 crates/storage/src/collections/vec/mod.rs delete mode 100644 crates/storage/src/collections/vec/storage.rs delete mode 100644 crates/storage/src/collections/vec/tests.rs delete mode 100644 crates/storage/src/hashmap_entry_api_tests.rs delete mode 100644 crates/storage/src/lazy/cache_cell.rs delete mode 100644 crates/storage/src/lazy/entry.rs delete mode 100644 crates/storage/src/lazy/lazy_array.rs delete mode 100644 crates/storage/src/lazy/lazy_cell.rs delete mode 100644 crates/storage/src/lazy/lazy_hmap.rs delete mode 100644 crates/storage/src/lazy/lazy_imap.rs diff --git a/crates/storage/src/collections/binary_heap/children.rs b/crates/storage/src/collections/binary_heap/children.rs deleted file mode 100644 index 1a023b099c..0000000000 --- a/crates/storage/src/collections/binary_heap/children.rs +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A `Children` object consists of two elements: a left and right child. - -use crate::traits::{ - forward_clear_packed, - forward_pull_packed, - forward_push_packed, - KeyPtr, - PackedLayout, - SpreadLayout, -}; -use ink_primitives::Key; - -#[cfg(feature = "std")] -use scale_info::TypeInfo; - -/// Each `Children` object may contain up to two elements. It is always -/// stored in one single storage cell. This reduces storage access operations -/// of the binary heap algorithm. -#[cfg_attr(feature = "std", derive(TypeInfo))] -#[derive(scale::Encode, scale::Decode, Default, PartialEq, Eq, Debug)] -pub(super) struct Children { - left: Option, - right: Option, -} - -/// The position which a child has in a `Children` object. -#[derive(Copy, Clone, PartialEq, Debug)] -pub(super) enum ChildPosition { - Left, - Right, -} - -/// Number of elements stored in each `Children` object. -/// -/// Note that the first `Children` object (at index `0`) will only ever -/// contain one element (the root element). -pub(super) const CHILDREN_PER_NODE: u32 = 2; - -/// Returns the index of the `Children` object in which the nth element of -/// the heap is stored. -pub(super) fn get_children_storage_index(n: u32) -> u32 { - if n == 0 { - return 0 - } - // The first `Children` object only ever contains the root element: - // `[Some(root), None]`. So when calculating indices we need to account - // for the items which have been left empty in the first `Children` object. - let padding = CHILDREN_PER_NODE - 1; - (n + padding) / CHILDREN_PER_NODE -} - -/// Returns the `ChildPosition` of the nth heap element. -/// -/// For example, the element `3` is found at the child position `0` -/// (within the `Children` object at storage index `2`). -pub(super) fn get_child_pos(n: u32) -> ChildPosition { - let storage_index = get_children_storage_index(n); - match (storage_index, n) { - (0, 0) => ChildPosition::Left, - (0, _) => panic!("first children object contains only the root element"), - (_, _) => { - let child_pos = (n - 1) % CHILDREN_PER_NODE; - match child_pos { - 0 => ChildPosition::Left, - 1 => ChildPosition::Right, - _ => { - unreachable!( - "CHILDREN_PER_NODE is 2, following the modulo op index must be 0 or 1" - ) - } - } - } - } -} - -impl Children -where - T: PackedLayout + Ord, -{ - /// Creates a new `Children` object with a left and right element. - pub fn new(left: Option, right: Option) -> Self { - Self { left, right } - } - - /// Returns the number of existent children in this object. - pub fn count(&self) -> usize { - self.left.is_some() as usize + self.right.is_some() as usize - } - - /// Returns a shared reference to the element at `which`. - pub fn child(&self, which: ChildPosition) -> &Option { - match which { - ChildPosition::Left => &self.left, - ChildPosition::Right => &self.right, - } - } - - /// Returns an exclusive reference to the element at `which`. - pub fn child_mut(&mut self, which: ChildPosition) -> &mut Option { - match which { - ChildPosition::Left => &mut self.left, - ChildPosition::Right => &mut self.right, - } - } -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::{ - CellLayout, - Layout, - LayoutKey, - }; - - impl StorageLayout for Children - where - T: PackedLayout + Ord + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Cell(CellLayout::new::>(LayoutKey::from( - key_ptr.advance_by(1), - ))) - } - } -}; - -impl SpreadLayout for Children -where - T: PackedLayout + Ord, -{ - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = false; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl PackedLayout for Children -where - T: PackedLayout + Ord, -{ - fn push_packed(&self, at: &Key) { - as PackedLayout>::push_packed(&self.left, at); - as PackedLayout>::push_packed(&self.right, at); - } - - fn clear_packed(&self, at: &Key) { - as PackedLayout>::clear_packed(&self.left, at); - as PackedLayout>::clear_packed(&self.right, at); - } - - fn pull_packed(&mut self, at: &Key) { - as PackedLayout>::pull_packed(&mut self.left, at); - as PackedLayout>::pull_packed(&mut self.right, at); - } -} - -#[test] -fn get_children_storage_index_works() { - // root is in cell 0 - assert_eq!(get_children_storage_index(0), 0); - - // element 1 + 2 are childrent of element 0 and - // should be in one cell together - assert_eq!(get_children_storage_index(1), 1); - assert_eq!(get_children_storage_index(2), 1); - - // element 3 and 4 should be in one cell - assert_eq!(get_children_storage_index(3), 2); - assert_eq!(get_children_storage_index(4), 2); -} - -#[test] -fn get_child_pos_works() { - assert_eq!(get_child_pos(0), ChildPosition::Left); - - assert_eq!(get_child_pos(1), ChildPosition::Left); - assert_eq!(get_child_pos(2), ChildPosition::Right); - - assert_eq!(get_child_pos(3), ChildPosition::Left); - assert_eq!(get_child_pos(4), ChildPosition::Right); - - assert_eq!(get_child_pos(5), ChildPosition::Left); - assert_eq!(get_child_pos(6), ChildPosition::Right); -} diff --git a/crates/storage/src/collections/binary_heap/children_vec.rs b/crates/storage/src/collections/binary_heap/children_vec.rs deleted file mode 100644 index fbdcc5700f..0000000000 --- a/crates/storage/src/collections/binary_heap/children_vec.rs +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Provides an interface around the vector used to store elements of the -//! [`BinaryHeap`](`super::BinaryHeap`) in storage. This is necessary since -//! we don't just store each element in its own storage cell, but rather -//! optimize storage access by putting children together in one storage cell. - -use super::{ - children, - children::Children, - StorageVec, -}; -use crate::{ - traits::{ - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, - Lazy, -}; - -/// Provides an interface for accessing elements in the `BinaryHeap`. -/// -/// Elements of the heap are stored in a vector of `Children` objects, whereby -/// each `Children` object contains two elements. When operating on indices of -/// the `BinaryHeap`, this interface transposes heap indices to the child inside -/// the `Children` object, in which the element is stored. -#[derive(Default, PartialEq, Eq, Debug)] -pub struct ChildrenVec -where - T: PackedLayout + Ord, -{ - /// The number of elements stored in the heap. - /// We cannot use the length of the storage vector, since each entry (i.e. each - /// `Children` object) in the vector contains two child elements (except the root - /// element which occupies a `Children` object on its own. - len: Lazy, - /// The underlying storage vec containing the `Children`. - children: StorageVec>, -} - -/// Encapsulates information regarding a particular child. -pub(super) struct ChildInfo<'a, T> { - /// A reference to the value in this child, if existent. - pub child: &'a Option, -} - -impl<'a, T> ChildInfo<'a, T> { - /// Creates a new `ChildInfo` object. - fn new(child: &'a Option) -> Self { - Self { child } - } -} - -/// Encapsulates information regarding a particular child. -pub(super) struct ChildInfoMut<'a, T> { - /// A mutable reference to the value in this child, if existent. - pub child: &'a mut Option, - /// The number of children which are set in this `Children` object. - /// - /// This property exists only in `ChildInfoMut`, but not in `ChildInfo`. - /// The reason is that in the case of pop-ping a child from a `Children` - /// object we need to check if the child count of that object is `0` after - /// the pop operation. In that case no children are left in the object, - /// and it can be removed altogether from the heap. - pub child_count: usize, -} - -impl<'a, T> ChildInfoMut<'a, T> { - /// Creates a new `ChildInfoMut` object. - fn new(child: &'a mut Option, child_count: usize) -> Self { - Self { child, child_count } - } -} - -impl ChildrenVec -where - T: PackedLayout + Ord, -{ - /// Creates a new empty storage heap. - #[inline] - pub fn new() -> Self { - Self { - len: Lazy::new(0), - children: StorageVec::new(), - } - } - - /// Returns the number of elements in the heap, also referred to as its length. - #[inline] - pub fn len(&self) -> u32 { - *self.len - } - - /// Returns the amount of `Children` objects stored in the vector. - #[allow(dead_code)] - #[cfg(all(test, feature = "ink-fuzz-tests"))] - pub fn children_count(&self) -> u32 { - self.children.len() - } - - /// Returns `true` if the heap contains no elements. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a shared reference to the indexed element. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get(&self, index: u32) -> Option<&T> { - self.get_child(index)?.child.as_ref() - } - - /// Returns an exclusive reference to the indexed element. - /// The element in a `Children` is an `Option`. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { - let child_info = self.get_child_mut(index)?; - child_info.child.as_mut() - } - - /// Swaps the elements at the given indices. - /// - /// # Panics - /// - /// If one or both indices are out of bounds. - pub fn swap(&mut self, a: u32, b: u32) { - if a == b { - return - } - assert!(a < self.len(), "a is out of bounds"); - assert!(b < self.len(), "b is out of bounds"); - - let child_info_a = self.get_child_mut(a).expect("index a must exist"); - let a_opt = child_info_a.child.take(); - - let child_info_b = self.get_child_mut(b).expect("index b must exist"); - let b_opt = core::mem::replace(child_info_b.child, a_opt); - - let child_info_a = self.get_child_mut(a).expect("index a must exist"); - *child_info_a.child = b_opt; - } - - /// Removes the element at `index` from the heap and returns it. - /// - /// The last element of the heap is put into the slot at `index`. - /// Returns `None` and does not mutate the heap if it is empty. - pub fn swap_remove(&mut self, index: u32) -> Option { - if self.is_empty() { - return None - } - self.swap(index, self.len() - 1); - self.pop() - } - - /// Returns an iterator yielding shared references to all elements of the heap. - /// - /// # Note - /// - /// Avoid unbounded iteration over big storage heaps. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - Iter::new(self) - } - - /// Returns a shared reference to the first element if any. - pub fn first(&self) -> Option<&T> { - if self.is_empty() { - return None - } - self.get(0) - } - - /// Returns an exclusive reference to the first element if any. - pub fn first_mut(&mut self) -> Option<&mut T> { - if self.is_empty() { - return None - } - self.get_mut(0) - } - - /// Removes all elements from this heap. - /// - /// # Note - /// - /// Use this method to clear the heap instead of e.g. iterative `pop()`. - /// This method performs significantly better and does not actually read - /// any of the elements (whereas `pop()` does). - pub fn clear(&mut self) { - if self.is_empty() { - return - } - self.children.clear(); - self.len = Lazy::new(0); - } - - /// Appends an element to the back of the heap. - pub fn push(&mut self, value: T) { - assert!( - self.len() < core::u32::MAX, - "cannot push more elements into the storage heap" - ); - let last_index = self.len(); - *self.len += 1; - self.insert(last_index, Some(value)); - } - - /// Returns information about the child at the heap index if any. - pub(super) fn get_child(&self, index: u32) -> Option> { - let storage_index = children::get_children_storage_index(index); - let child_pos = children::get_child_pos(index); - let children = self.children.get(storage_index)?; - let child = children.child(child_pos); - Some(ChildInfo::new(child)) - } - - /// Returns information about the child at the heap index if any. - /// - /// The returned `ChildInfoMut` contains a mutable reference to the value `T`. - pub(super) fn get_child_mut(&mut self, index: u32) -> Option> { - let storage_index = children::get_children_storage_index(index); - let child_pos = children::get_child_pos(index); - let children = self.children.get_mut(storage_index)?; - let count = children.count(); - let child = children.child_mut(child_pos); - Some(ChildInfoMut::new(child, count)) - } - - /// Inserts `value` at the heap index `index`. - /// - /// If there is already a child in storage which `index` resolves to - /// then `value` is inserted there. Otherwise a new child is created. - fn insert(&mut self, index: u32, value: Option) { - let info = self.get_child_mut(index); - if let Some(info) = info { - *info.child = value; - return - } - - self.children.push(Children::new(value, None)); - debug_assert!( - { - let storage_index = children::get_children_storage_index(index); - self.children.get(storage_index).is_some() - }, - "the new children were not placed at children_index!" - ); - } - - /// Pops the last element from the heap and returns it. - // - /// Returns `None` if the heap is empty. - fn pop(&mut self) -> Option { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - *self.len = last_index; - - let info = self - .get_child_mut(last_index) - .expect("children must exist at last_index"); - let popped_val = info.child.take(); - if info.child_count == 1 { - // if both children are non-existent the entire `Children` object can be removed - self.children.pop(); - } - popped_val - } -} - -impl SpreadLayout for ChildrenVec -where - T: SpreadLayout + Ord + PackedLayout, -{ - const FOOTPRINT: u64 = 1 + > as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - let len = SpreadLayout::pull_spread(ptr); - let children = SpreadLayout::pull_spread(ptr); - Self { len, children } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.len, ptr); - SpreadLayout::push_spread(&self.children, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(&self.len, ptr); - SpreadLayout::clear_spread(&self.children, ptr); - } -} - -impl SpreadAllocate for ChildrenVec -where - T: SpreadLayout + Ord + PackedLayout, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadAllocate::allocate_spread(ptr), - children: SpreadAllocate::allocate_spread(ptr), - } - } -} - -/// An iterator over shared references to the elements of the `BinaryHeap`. -#[derive(Debug, Clone, Copy)] -pub struct Iter<'a, T> -where - T: PackedLayout + Ord, -{ - /// The heap elements to iterate over. - elements: &'a ChildrenVec, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> Iter<'a, T> -where - T: PackedLayout + Ord, -{ - /// Creates a new iterator for the given heap elements. - pub fn new(elements: &'a ChildrenVec) -> Self { - Self { - elements, - begin: 0, - end: elements.len(), - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.end - self.begin - } -} - -impl<'a, T> Iterator for Iter<'a, T> -where - T: PackedLayout + Ord, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } - - fn nth(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin + n >= self.end { - return None - } - let cur = self.begin + n; - self.begin += 1 + n; - - self.elements.get_child(cur)?.child.as_ref() - } -} diff --git a/crates/storage/src/collections/binary_heap/impls.rs b/crates/storage/src/collections/binary_heap/impls.rs deleted file mode 100644 index 5ad3a9975b..0000000000 --- a/crates/storage/src/collections/binary_heap/impls.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::BinaryHeap; -use crate::traits::PackedLayout; -use core::iter::{ - Extend, - FromIterator, -}; - -impl Extend for BinaryHeap -where - T: PackedLayout + Ord, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.push(item) - } - } -} - -impl FromIterator for BinaryHeap -where - T: PackedLayout + Ord, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut heap = Self::new(); - heap.extend(iter); - heap - } -} diff --git a/crates/storage/src/collections/binary_heap/mod.rs b/crates/storage/src/collections/binary_heap/mod.rs deleted file mode 100644 index e8d6ff04c9..0000000000 --- a/crates/storage/src/collections/binary_heap/mod.rs +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A priority queue implemented with a binary heap. -//! -//! Insertion and popping the largest element have `O(log(n))` complexity. -//! Checking the largest element is `O(1)`. - -mod children; -mod children_vec; -mod impls; -mod reverse; -mod storage; - -#[cfg(test)] -mod tests; - -use self::children_vec::ChildrenVec; -use crate::{ - collections::vec::Vec as StorageVec, - traits::PackedLayout, -}; - -pub use children_vec::Iter; -pub use reverse::Reverse; - -/// A priority queue implemented with a binary heap. -/// -/// # Note -/// -/// The heap is a *max-heap* by default, i.e. the first element is the largest. -/// Either [`Reverse`] or a custom `Ord` implementation can be used to -/// make `BinaryHeap` a *min-heap*. This then makes `heap.pop()` return the -/// smallest value instead of the largest one. -#[derive(Default, PartialEq, Eq, Debug)] -pub struct BinaryHeap -where - T: PackedLayout + Ord, -{ - /// The individual elements of the heap. - elements: ChildrenVec, -} - -impl BinaryHeap -where - T: PackedLayout + Ord, -{ - /// Creates a new empty storage heap. - pub fn new() -> Self { - Self { - elements: ChildrenVec::new(), - } - } - - /// Returns the number of elements in the heap, also referred to as its length. - pub fn len(&self) -> u32 { - self.elements.len() - } - - /// Returns `true` if the heap contains no elements. - pub fn is_empty(&self) -> bool { - self.elements.is_empty() - } -} - -impl BinaryHeap -where - T: PackedLayout + Ord, -{ - /// Returns an iterator yielding shared references to all elements of the heap. - /// - /// # Note - /// - /// Avoid unbounded iteration over large heaps. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - self.elements.iter() - } - - /// Returns a shared reference to the greatest element of the heap - /// - /// Returns `None` if the heap is empty - pub fn peek(&self) -> Option<&T> { - self.elements.first() - } - - /// Returns an exclusive reference to the greatest element of the heap - /// - /// Returns `None` if the heap is empty - /// - /// # Note: - /// - /// If the `PeekMut` value is leaked, the heap may be in an inconsistent state. - /// - /// # Example - /// - /// ```ignore - /// # // Tracking issue [#1119]: We currently ignore this test since we stopped exposing - /// # // `collections` publicly. - /// use ink_storage::collections::BinaryHeap; - /// let mut heap = BinaryHeap::new(); - /// assert!(heap.peek_mut().is_none()); - /// - /// heap.push(1); - /// heap.push(5); - /// heap.push(2); - /// { - /// let mut val = heap.peek_mut().unwrap(); - /// *val = 0; - /// } - /// assert_eq!(heap.peek(), Some(&2)); - /// ``` - pub fn peek_mut(&mut self) -> Option> { - if self.is_empty() { - None - } else { - Some(PeekMut { - heap: self, - sift: true, - }) - } - } - - /// Take an element at `pos` and move it down the heap, while its children - /// are smaller. - fn sift_down(&mut self, mut pos: u32) { - let end = self.len(); - let mut child = 2 * pos + 1; - while child < end { - let right = child + 1; - // compare with the greater of the two children - if right < end && self.elements.get(child) <= self.elements.get(right) { - child = right; - } - // if we are already in order, stop. - if self.elements.get(pos) >= self.elements.get(child) { - break - } - self.elements.swap(child, pos); - pos = child; - child = 2 * pos + 1; - } - } - - /// Pops greatest element from the heap and returns it - /// - /// Returns `None` if the heap is empty - pub fn pop(&mut self) -> Option { - // replace the root of the heap with the last element - let elem = self.elements.swap_remove(0); - self.sift_down(0); - elem - } - - /// Removes all elements from this heap. - /// - /// # Note - /// - /// Use this method to clear the heap instead of e.g. iterative `pop()`. - /// This method performs significantly better and does not actually read - /// any of the elements (whereas `pop()` does). - pub fn clear(&mut self) { - self.elements.clear() - } -} - -impl BinaryHeap -where - T: PackedLayout + Ord, -{ - /// Take an element at `pos` and move it up the heap, while its parent is - /// larger. - fn sift_up(&mut self, mut pos: u32) { - while pos > 0 { - let parent = (pos - 1) / 2; - if self.elements.get(pos) <= self.elements.get(parent) { - break - } - self.elements.swap(parent, pos); - pos = parent; - } - } - - /// Pushes the given element to the binary heap. - pub fn push(&mut self, value: T) { - let old_len = self.len(); - self.elements.push(value); - self.sift_up(old_len) - } -} - -/// Structure wrapping a mutable reference to the greatest item on a -/// [`BinaryHeap`]. -/// -/// This `struct` is created by the [`BinaryHeap::peek_mut`] method. -pub struct PeekMut<'a, T> -where - T: 'a + PackedLayout + Ord, -{ - heap: &'a mut BinaryHeap, - /// If `true`, on `drop()` will sift the peeked value down the tree if after mutation it is no - /// longer the largest value, in order to keep the heap in a consistent state. - /// - /// If the peeked value is consumed via `PeekMut::pop()` then this is set to false in order - /// to prevent a redundant reorganization which would already have happened via `BinaryHeap::pop()`. - sift: bool, -} - -impl Drop for PeekMut<'_, T> -where - T: PackedLayout + Ord, -{ - fn drop(&mut self) { - if self.sift { - self.heap.sift_down(0); - } - } -} - -impl core::ops::Deref for PeekMut<'_, T> -where - T: PackedLayout + Ord, -{ - type Target = T; - fn deref(&self) -> &T { - self.heap - .elements - .first() - .expect("PeekMut is only instantiated for non-empty heaps") - } -} - -impl core::ops::DerefMut for PeekMut<'_, T> -where - T: PackedLayout + Ord, -{ - fn deref_mut(&mut self) -> &mut T { - self.heap - .elements - .first_mut() - .expect("PeekMut is only instantiated for non-empty heaps") - } -} - -impl<'a, T> PeekMut<'a, T> -where - T: PackedLayout + Ord, -{ - /// Removes the peeked value from the heap and returns it. - pub fn pop(mut this: PeekMut<'a, T>) -> T { - let value = this - .heap - .pop() - .expect("PeekMut is only instantiated for non-empty heaps"); - this.sift = false; - value - } -} diff --git a/crates/storage/src/collections/binary_heap/reverse.rs b/crates/storage/src/collections/binary_heap/reverse.rs deleted file mode 100644 index 603d2f0858..0000000000 --- a/crates/storage/src/collections/binary_heap/reverse.rs +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! [`BinaryHeap`](`super::BinaryHeap`) is a max-heap by default, where the *largest* element will -//! be returned by `heap.pop()`. To use a [`BinaryHeap`](`super::BinaryHeap`) as a min-heap, where -//! the *smallest* element returned by `heap.pop()`, the type `T` of the binary heap can be wrapped -//! in a `Reverse`. -//! -//! [`Reverse`] simply wraps [`core::cmp::Reverse`] and implements all the required traits for use -//! as a storage struct. - -use crate::traits::{ - KeyPtr, - PackedLayout, - SpreadLayout, -}; -use ink_prelude::vec::Vec; -use ink_primitives::Key; - -/// Wrapper for [`core::cmp::Reverse`] for using a [`BinaryHeap`](`super::BinaryHeap`) as a -/// min-heap. -#[derive(PartialEq, Eq, Ord, PartialOrd, Debug, Copy, Clone, Default)] -pub struct Reverse(core::cmp::Reverse); - -impl Reverse -where - T: PackedLayout + Ord, -{ - /// Construct a new [`Reverse`] from the given value. - pub fn new(value: T) -> Self { - Self(core::cmp::Reverse(value)) - } - - /// Return a shared reference to the inner value. - pub fn value(&self) -> &T { - &(self.0).0 - } -} - -impl SpreadLayout for Reverse -where - T: PackedLayout + Ord, -{ - const FOOTPRINT: u64 = ::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self::new(SpreadLayout::pull_spread(ptr)) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(self.value(), ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(self.value(), ptr); - } -} - -impl PackedLayout for Reverse -where - T: PackedLayout + Ord, -{ - fn pull_packed(&mut self, at: &Key) { - ::pull_packed(&mut (self.0).0, at) - } - - fn push_packed(&self, at: &Key) { - ::push_packed(&(self.0).0, at) - } - - fn clear_packed(&self, at: &Key) { - ::clear_packed(&(self.0).0, at) - } -} - -impl scale::Encode for Reverse -where - T: PackedLayout + Ord + scale::Encode, -{ - #[inline] - fn size_hint(&self) -> usize { - ::size_hint(self.value()) - } - - #[inline] - fn encode_to(&self, dest: &mut O) { - ::encode_to(self.value(), dest) - } - - #[inline] - fn encode(&self) -> Vec { - ::encode(self.value()) - } - - #[inline] - fn using_encoded R>(&self, f: F) -> R { - ::using_encoded(self.value(), f) - } -} - -impl scale::Decode for Reverse -where - T: PackedLayout + Ord + scale::Decode, -{ - fn decode(value: &mut I) -> Result { - let value = ::decode(value)?; - Ok(Self::new(value)) - } -} diff --git a/crates/storage/src/collections/binary_heap/storage.rs b/crates/storage/src/collections/binary_heap/storage.rs deleted file mode 100644 index d68a7b7476..0000000000 --- a/crates/storage/src/collections/binary_heap/storage.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of ink! storage traits. - -use super::{ - BinaryHeap, - ChildrenVec, -}; -use crate::traits::{ - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for BinaryHeap - where - T: PackedLayout + Ord + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([FieldLayout::new( - "elements", - as StorageLayout>::layout(key_ptr), - )])) - } - } -}; - -#[cfg(feature = "std")] -const _: () = { - use super::children::Children; - use crate::{ - collections::binary_heap::StorageVec, - lazy::Lazy, - traits::StorageLayout, - }; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for ChildrenVec - where - T: PackedLayout + Ord + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new(vec![ - FieldLayout::new("len", as StorageLayout>::layout(key_ptr)), - FieldLayout::new( - "children", - > as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for BinaryHeap -where - T: PackedLayout + Ord, -{ - const FOOTPRINT: u64 = as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - elements: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.elements, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(&self.elements, ptr); - } -} - -impl SpreadAllocate for BinaryHeap -where - T: PackedLayout + Ord, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - elements: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/binary_heap/tests.rs b/crates/storage/src/collections/binary_heap/tests.rs deleted file mode 100644 index c38444a8a7..0000000000 --- a/crates/storage/src/collections/binary_heap/tests.rs +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - BinaryHeap, - PeekMut, - Reverse, -}; -use crate::traits::{ - KeyPtr, - PackedLayout, - SpreadLayout, -}; -use ink_primitives::Key; - -fn heap_from_slice(slice: &[T]) -> BinaryHeap -where - T: Clone + PackedLayout + Ord, -{ - slice.iter().cloned().collect() -} - -/// Creates a heap populated with `n` consecutive values. -fn heap_of_size(n: u32) -> BinaryHeap { - std::iter::repeat(0u32) - .take(n as usize) - .enumerate() - .map(|(i, _)| i as u32 + 1) - .collect() -} - -/// Returns the number of cells a binary tree of `heap_size` occupies -/// in the storage. -/// -/// *Note*: `heap_size` must be even, if it is odd we cannot calculate -/// the number of cells with certainty, since for e.g. `heap_size = 5` -/// there might be two leaf cells with one element each or alternatively -/// one leaf with two elements. -fn get_count_cells(heap_size: u32) -> u32 { - fn division_round_up(dividend: u32, divisor: u32) -> u32 { - (dividend + divisor - 1) / divisor - } - assert!(heap_size % 2 == 0, "heap_size must be even"); - let rest = match heap_size { - 0 => 0, - 1 => 0, - _ => division_round_up(heap_size, super::children::CHILDREN_PER_NODE), - }; - rest + 1 -} - -#[test] -fn new_binary_heap_works() { - // `BinaryHeap::new` - let heap = >::new(); - assert!(heap.is_empty()); - assert_eq!(heap.len(), 0); - // `BinaryHeap::default` - let default = as Default>::default(); - assert!(default.is_empty()); - assert_eq!(default.len(), 0); - // `BinaryHeap::new` and `BinaryHeap::default` should be equal. - assert_eq!(heap, default); -} - -#[test] -fn empty_pop_works() { - let mut heap = BinaryHeap::::new(); - assert!(heap.pop().is_none()); -} - -#[test] -fn empty_peek_works() { - let empty = BinaryHeap::::new(); - assert!(empty.peek().is_none()); -} - -#[test] -fn empty_peek_mut_works() { - let mut empty = BinaryHeap::::new(); - assert!(empty.peek_mut().is_none()); -} - -#[test] -fn empty_iter_works() { - let empty = BinaryHeap::::new(); - assert!(empty.iter().next().is_none()); -} - -#[test] -fn from_iterator_works() { - let some_primes = [1, 2, 3, 5, 7, 11, 13]; - assert_eq!(some_primes.iter().copied().collect::>(), { - let mut vec = BinaryHeap::new(); - for prime in &some_primes { - vec.push(*prime) - } - vec - }); -} - -#[test] -fn from_empty_iterator_works() { - assert_eq!( - [].iter().copied().collect::>(), - BinaryHeap::new(), - ); -} - -#[test] -fn push_works() { - let mut heap = heap_from_slice(&[2, 4, 9]); - assert_eq!(heap.len(), 3); - assert_eq!(*heap.peek().unwrap(), 9); - heap.push(11); - assert_eq!(heap.len(), 4); - assert_eq!(*heap.peek().unwrap(), 11); - heap.push(5); - assert_eq!(heap.len(), 5); - assert_eq!(*heap.peek().unwrap(), 11); - heap.push(27); - assert_eq!(heap.len(), 6); - assert_eq!(*heap.peek().unwrap(), 27); - heap.push(3); - assert_eq!(heap.len(), 7); - assert_eq!(*heap.peek().unwrap(), 27); - heap.push(103); - assert_eq!(heap.len(), 8); - assert_eq!(*heap.peek().unwrap(), 103); -} - -#[test] -fn peek_works() { - let mut heap = >::new(); - heap.push(33); - - assert_eq!(heap.peek(), Some(&33)); -} - -#[test] -fn peek_and_pop_works() { - let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; - let mut sorted = data.clone(); - sorted.sort_unstable(); - let mut heap = heap_from_slice(&data); - while !heap.is_empty() { - assert_eq!(heap.peek().unwrap(), sorted.last().unwrap()); - assert_eq!(heap.pop().unwrap(), sorted.pop().unwrap()); - } -} - -#[test] -fn peek_mut_works() { - let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; - let mut heap = heap_from_slice(&data); - assert_eq!(heap.peek(), Some(&10)); - { - let mut top = heap.peek_mut().unwrap(); - *top -= 2; - } - assert_eq!(heap.peek(), Some(&9)); -} - -#[test] -fn peek_mut_pop_works() { - let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; - let mut heap = heap_from_slice(&data); - assert_eq!(heap.peek(), Some(&10)); - { - let mut top = heap.peek_mut().unwrap(); - *top -= 2; - assert_eq!(PeekMut::pop(top), 8); - } - assert_eq!(heap.peek(), Some(&9)); -} - -#[test] -fn min_heap_works() { - let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1] - .iter() - .map(|x| Reverse::new(*x)) - .collect::>(); - let mut sorted = data.clone(); - sorted.sort_unstable(); - let mut heap = heap_from_slice(&data); - while !heap.is_empty() { - assert_eq!(heap.peek().unwrap(), sorted.last().unwrap()); - assert_eq!(heap.pop().unwrap(), sorted.pop().unwrap()); - } -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let heap1 = heap_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&heap1, &mut KeyPtr::from(root_key)); - // Load the pushed binary heap into another instance and check that - // both instances are equal: - let heap2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(heap1, heap2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let heap1 = heap_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&heap1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `heap1` and check whether - // loading another instance from this storage will panic since the - // heap's length property cannot read a value: - SpreadLayout::clear_spread(&heap1, &mut KeyPtr::from(root_key)); - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} - -#[test] -fn clear_works_on_filled_heap() { - let mut heap = heap_from_slice(&[b'a', b'b', b'c', b'd']); - heap.clear(); - assert!(heap.is_empty()); -} - -#[test] -fn clear_works_on_empty_heap() { - let mut heap = BinaryHeap::::default(); - heap.clear(); - assert!(heap.is_empty()); -} - -fn check_complexity_read_writes( - heap_size: u32, - heap_op: F, - expected_net_reads: usize, - expected_net_writes: usize, -) -> ink_env::Result<()> -where - F: FnOnce(&mut BinaryHeap), -{ - ink_env::test::run_test::(|_| { - let heap1 = heap_of_size(heap_size); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&heap1, &mut KeyPtr::from(root_key)); - - let contract_account = ink_env::test::callee::(); - - let mut lazy_heap = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - - let (base_reads, base_writes) = ink_env::test::get_contract_storage_rw::< - ink_env::DefaultEnvironment, - >(&contract_account); - - // elements.len + vec.len - const CONST_WRITES: u32 = 2; - assert_eq!( - (base_reads as u32, base_writes as u32), - (0, CONST_WRITES + get_count_cells(heap_size)) - ); - - heap_op(&mut lazy_heap); - - // write back to storage so we can see how many writes required - SpreadLayout::push_spread(&lazy_heap, &mut KeyPtr::from(root_key)); - - let (reads, writes) = ink_env::test::get_contract_storage_rw::< - ink_env::DefaultEnvironment, - >(&contract_account); - - let net_reads = reads - base_reads; - let net_writes = writes - base_writes; - - assert_eq!( - net_reads, expected_net_reads, - "size {}: storage reads", - heap_size - ); - assert_eq!( - net_writes, expected_net_writes, - "size {}: storage writes", - heap_size - ); - - Ok(()) - }) -} - -#[test] -fn push_largest_value_complexity_big_o_log_n() -> ink_env::Result<()> { - // 1 elements overhead (#508) + 1 elements.len + 1 heap overhead (#508) + 1 heap.len + 1 cell - const CONST_READS: usize = 5; - - // 1 elements.len + 1 cell which was pushed to - // vec.len does not get larger because no cell is added - const CONST_WRITES: usize = 2; - - for (n, log_n) in &[(2, 1), (4, 2), (8, 3), (16, 4), (32, 5), (64, 6)] { - let largest_value = n + 1; - let expected_reads = log_n + CONST_READS; - let expected_writes = log_n + CONST_WRITES; - check_complexity_read_writes( - *n, - |heap| heap.push(largest_value), - expected_reads, - expected_writes, - )?; - } - Ok(()) -} - -#[test] -fn push_smallest_value_complexity_big_o_1() -> ink_env::Result<()> { - const SMALLEST_VALUE: u32 = 0; - - // 1 elements overhead (#508) + 1 elements.len + 1 vec overhead (#508) + - // 1 vec.len + 1 vec.cell in which to insert + 1 parent cell during `sift_up` - const EXPECTED_READS: usize = 6; - - // binary heap len + one cell - // vec.len does not get larger because no cell is added - const EXPECTED_WRITES: usize = 2; - - for n in &[2, 4, 8, 16, 32, 64] { - check_complexity_read_writes( - *n, - |heap| { - heap.push(SMALLEST_VALUE); - }, - EXPECTED_READS, - EXPECTED_WRITES, - )?; - } - Ok(()) -} - -#[test] -fn pop_complexity_big_o_log_n() -> ink_env::Result<()> { - // 1 elements overhead (#508) + elements.len + 1 vec overhead (#508) + - // 1 vec.len + 1 vec.cell from which to pop - const CONST_READS: usize = 5; - - // 1 elements.len + 1 vec.len + cell which was modified - const CONST_WRITES: usize = 3; - - for (n, log_n) in &[(2, 1), (4, 2), (8, 3), (16, 4), (32, 5), (64, 6)] { - let expected_reads = log_n + CONST_READS; - let expected_writes = log_n + CONST_WRITES; - - check_complexity_read_writes( - *n, - |heap| { - heap.pop(); - }, - expected_reads, - expected_writes, - )?; - } - Ok(()) -} - -#[cfg(feature = "ink-fuzz-tests")] -#[quickcheck] -fn fuzz_pop_always_returns_largest_element(xs: Vec) { - ink_env::test::run_test::(|_| { - let mut sorted = xs.clone(); - sorted.sort_unstable(); - let mut heap = heap_from_slice(&xs); - - for x in sorted.iter().rev() { - assert_eq!(Some(*x), heap.pop()) - } - - assert_eq!(heap.len(), 0); - - // all elements must have been removed as well - assert_eq!(heap.elements.children_count(), 0); - - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/bitstash/counts.rs b/crates/storage/src/collections/bitstash/counts.rs deleted file mode 100644 index d9a870ee93..0000000000 --- a/crates/storage/src/collections/bitstash/counts.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use core::ops::{ - Index, - IndexMut, -}; - -/// Stores the number of set bits for each 256-bits block in a compact `u8`. -#[derive(Debug, Default, PartialEq, Eq, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -pub struct CountFree { - /// Set bits per 256-bit chunk. - counts: [u8; 32], - /// Since with `u8` can only count up to 255 but there might be the need - /// to count up to 256 bits for 256-bit chunks we need to store one extra - /// bit per counter to determine filled chunks. - full: FullMask, -} - -impl Index for CountFree { - type Output = u8; - - fn index(&self, index: u8) -> &Self::Output { - &self.counts[index as usize] - } -} - -impl IndexMut for CountFree { - fn index_mut(&mut self, index: u8) -> &mut Self::Output { - &mut self.counts[index as usize] - } -} - -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -pub struct FullMask(u32); - -impl FullMask { - /// Returns `true` if the 256-bit chunk at the given index is full. - pub fn is_full(self, index: u8) -> bool { - assert!(index < 32); - (self.0 >> (31 - index as u32)) & 0x01 == 1 - } - - /// Sets the flag for the 256-bit chunk at the given index to `full`. - pub fn set_full(&mut self, index: u8) { - self.0 |= 1_u32 << (31 - index as u32); - } - - /// Resets the flag for the 256-bit chunk at the given index to not `full`. - pub fn reset_full(&mut self, index: u8) { - self.0 &= !(1_u32 << (31 - index as u32)); - } -} - -impl CountFree { - /// Returns the position of the first free `u8` in the free counts. - /// - /// Returns `None` if all counts are `0xFF`. - pub fn position_first_zero(&self) -> Option { - let i = (!self.full.0).leading_zeros(); - if i == 32 { - return None - } - Some(i as u8) - } - - /// Increases the number of set bits for the given index. - /// - /// # Panics - /// - /// - If the given index is out of bounds. - /// - If the increment would cause an overflow. - pub fn inc(&mut self, index: usize) { - assert!(index < 32, "index is out of bounds"); - if self.counts[index] == !0 { - self.full.set_full(index as u8); - } else { - self.counts[index] += 1; - } - } - - /// Decreases the number of set bits for the given index. - /// - /// Returns the new number of set bits. - /// - /// # Panics - /// - /// - If the given index is out of bounds. - /// - If the decrement would cause an overflow. - pub fn dec(&mut self, index: u8) -> u8 { - assert!(index < 32, "index is out of bounds"); - if self.full.is_full(index) { - self.full.reset_full(index); - } else { - let new_value = self.counts[index as usize] - .checked_sub(1) - .expect("set bits decrement overflowed"); - self.counts[index as usize] = new_value; - } - self.counts[index as usize] - } -} diff --git a/crates/storage/src/collections/bitstash/fuzz_tests.rs b/crates/storage/src/collections/bitstash/fuzz_tests.rs deleted file mode 100644 index f6d3bbf119..0000000000 --- a/crates/storage/src/collections/bitstash/fuzz_tests.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::BitStash; - -/// Conducts repeated insert and remove operations into the stash by iterating -/// over `xs`. Typically the `xs` and `inserts_each` arguments are provided -/// by our fuzzing engine in an iterative manner. -/// -/// For each odd `x` in `xs` a number of put operations are executed. -/// For each even `x` it is asserted that the previously inserted elements -/// are in the stash, and they are taken out subsequently. -/// -/// The reasoning behind this even/odd sequence is to introduce some -/// randomness into when elements are inserted/removed. -/// -/// `inserts_each` was chosen as `u8` to keep the number of inserts per `x` in -/// a reasonable range. -fn put_and_take(xs: Vec, additional_puts_each: u8) { - let mut stash = BitStash::new(); - let mut previous_even_x = None; - let mut last_put_indices = Vec::new(); - - for x in 0..xs.len() as u32 { - if x % 2 == 0 { - // On even numbers we put - let mut put_index = None; - for _ in 0..x + additional_puts_each as u32 { - let index = stash.put(); - assert_eq!(stash.get(index), Some(true)); - last_put_indices.push(index); - put_index = Some(index); - } - if previous_even_x.is_none() && put_index.is_some() { - previous_even_x = put_index; - } - } else if previous_even_x.is_some() { - // If it's an odd number and we inserted in a previous run we assert - // that the last insert worked correctly and remove the elements again. - // - // It can happen that after one insert run there are many more - // insert runs (i.e. more susbequent even `x` in `xs`) before we remove - // the numbers of the last run again. This is intentional, as to include - // testing if subsequent insert operations have an effect on already - // inserted items. - while let Some(index) = last_put_indices.pop() { - assert_eq!(stash.get(index), Some(true)); - assert_eq!(stash.take(index), Some(true)); - assert_eq!(stash.get(index), Some(false)); - } - previous_even_x = None; - } - } -} - -#[quickcheck] -fn fuzz_repeated_puts_and_takes(xs: Vec, additional_puts_each: u8) { - ink_env::test::run_test::(|_| { - put_and_take(xs, additional_puts_each); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/bitstash/mod.rs b/crates/storage/src/collections/bitstash/mod.rs deleted file mode 100644 index 4a94ee5891..0000000000 --- a/crates/storage/src/collections/bitstash/mod.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Storage bit stash data structure and utilities. -//! -//! Allows to compactly and efficiently put and take bits in a compressed -//! and very efficient way. - -mod counts; -mod storage; - -#[cfg(test)] -mod tests; - -#[cfg(all(test, feature = "ink-fuzz-tests"))] -mod fuzz_tests; - -use self::counts::CountFree; -use crate::collections::{ - Bitvec as StorageBitvec, - Vec as StorageVec, -}; - -/// The index type used in the storage bit stash. -type Index = u32; - -/// A stash for bits operating on the contract storage. -/// -/// Allows to efficiently put and take bits and -/// stores the underlying bits in an extremely compressed format. -#[derive(Debug, Default, PartialEq, Eq)] -pub struct BitStash { - /// Counter for set bits in a 256-bit chunk of the `free` list. - /// - /// For every 256-bit chunk stored in `free` stores a `u8` that counts - /// the number of set bits in the 256-bit chunk. This information is used - /// to compact the information in `free` to make a `first fit` linear - /// search for a new free storage slot more scalable. Since `u8` can only - /// represent 256 different states but since we consider 0 we need an extra - /// 9th bit. This 9th bit tells for every 256-bit chunk if it is full. - /// - /// In theory, it is possible to search up to 8192 storage cells for free - /// slots with a single contract storage look-up. By iterating over the 32 - /// `CountFree` instances of a single instance. - counts: StorageVec, - /// Stores the underlying bits of the storage bit stash. - free: StorageBitvec, -} - -impl BitStash { - /// Creates a new storage bit stash. - pub fn new() -> Self { - Self { - counts: StorageVec::new(), - free: StorageBitvec::new(), - } - } - - /// Returns the bit position of the first 256-bit chunk with zero bits - /// in the `free` list. - /// - /// Returns the bit position of the first bit in the 256-bit chunk and not - /// the chunk position since that's what - /// [`Bitvec::get_chunk`][`crate::collections::Bitvec::get_chunk`] - /// expects. - /// - /// Also directly increases the count of the first found free bit chunk. - fn position_first_zero(&mut self) -> Option { - // Iterate over the `counts` list of the bit stash. - // The counts list consists of packs of 32 counts per element. - for (n, counts) in self.counts.iter_mut().enumerate() { - if let Some(i) = counts.position_first_zero() { - counts.inc(i as usize); - let n = n as u64; - let i = i as u64; - return Some(n * (32 * 256) + i * 256) - } - } - None - } - - /// Returns the number of required counts elements. - fn required_counts(&self) -> u32 { - let capacity = self.free.capacity(); - if capacity == 0 { - return 0 - } - 1 + ((capacity - 1) / (32 * 256)) as u32 - } - - /// Returns `true` if the bit at the indexed slot is set (`1`). - /// - /// Returns `None` if the index is out of bounds. - pub fn get(&self, index: Index) -> Option { - self.free.get(index) - } - - /// Puts another set bit into the storage bit stash. - /// - /// Returns the index to the slot where the set bit has been inserted. - pub fn put(&mut self) -> Index { - if let Some(index) = self.position_first_zero() { - if index == self.free.len() as u64 { - self.free.push(true); - return self.free.len() - 1 - } - let mut bits256 = self - .free - .get_chunk_mut(index as u32) - .expect("must exist if indices have been found"); - if let Some(first_zero) = bits256.position_first_zero() { - bits256 - .get_mut(first_zero) - .expect("first zero is invalid") - .set(); - index as u32 + first_zero as u32 - } else { - // We found a free storage slot but it is not within the valid - // bounds of the free list but points to its end. So we simply - // append another 1 bit (`true`) to the free list and return - // a new index pointing to it. No need to push to the counts - // list in this case. - self.free.push(true); - self.free.len() - 1 - } - } else { - // We found no free 256-bit slot: - // - // - Check if we already have allocated too many (2^32) bits and - // panic if that's the case. The check is done on the internal - // storage bit vector. - // - Otherwise allocate a new pack of 256-bits in the free list - // and mirror it in the counts list. - self.free.push(true); - if self.counts.len() < self.required_counts() { - // We need to push another counts element. - let mut counter = CountFree::default(); - counter[0_u8] = 1; - self.counts.push(counter); - } - // Return the new slot. - self.free.len() - 1 - } - } - - /// Takes the bit from the given index and returns it. - /// - /// Returns `true` if the indexed bit was set (`1`). - /// Returns `None` if the index is out of bounds. - /// - /// # Note - /// - /// This frees up the indexed slot for putting in another set bit. - pub fn take(&mut self, index: Index) -> Option { - if index >= self.free.len() { - // Bail out early if index is out of bounds. - return None - } - let mut access = self.free.get_mut(index).expect("index is out of bounds"); - if !access.get() { - return Some(false) - } - // At this point the bit was found to be set (`true`) and we have - // update the underlying internals in order to reset it so the index - // becomes free for another bit again. - access.reset(); - // Update the counts list. - let counts_id = index / (256 * 32); - let byte_id = ((index / 256) % 32) as u8; - self.counts - .get_mut(counts_id) - .expect("invalid counts ID") - .dec(byte_id); - Some(true) - } -} diff --git a/crates/storage/src/collections/bitstash/storage.rs b/crates/storage/src/collections/bitstash/storage.rs deleted file mode 100644 index 835a58250e..0000000000 --- a/crates/storage/src/collections/bitstash/storage.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - BitStash, - CountFree, -}; -use crate::{ - collections::{ - Bitvec as StorageBitvec, - Vec as StorageVec, - }, - traits::{ - forward_clear_packed, - forward_pull_packed, - forward_push_packed, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, -}; -use ink_primitives::Key; - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - - impl StorageLayout for BitStash { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new( - "counts", - as StorageLayout>::layout(key_ptr), - ), - FieldLayout::new( - "elems", - ::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for CountFree { - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = false; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl PackedLayout for CountFree { - fn pull_packed(&mut self, _at: &Key) {} - fn push_packed(&self, _at: &Key) {} - fn clear_packed(&self, _at: &Key) {} -} - -impl SpreadLayout for BitStash { - const FOOTPRINT: u64 = as SpreadLayout>::FOOTPRINT - + ::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - counts: SpreadLayout::pull_spread(ptr), - free: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.counts, ptr); - SpreadLayout::push_spread(&self.free, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(&self.counts, ptr); - SpreadLayout::clear_spread(&self.free, ptr); - } -} - -impl SpreadAllocate for BitStash { - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - counts: SpreadAllocate::allocate_spread(ptr), - free: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/bitstash/tests.rs b/crates/storage/src/collections/bitstash/tests.rs deleted file mode 100644 index 3afdd66a64..0000000000 --- a/crates/storage/src/collections/bitstash/tests.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::BitStash; -use crate::traits::{ - KeyPtr, - SpreadLayout, -}; -use ink_primitives::Key; - -cfg_if::cfg_if! { - if #[cfg(miri)] { - // We need to lower the test allocations because miri's stacked borrows - // analysis currently is super linear for some work loads. - // Read more here: https://github.com/rust-lang/miri/issues/1367 - const TEST_ALLOCATIONS: u32 = 10; - } else { - const TEST_ALLOCATIONS: u32 = 10_000; - } -} - -#[test] -fn default_works() { - let default = BitStash::default(); - assert_eq!(default.get(0), None); -} - -#[test] -fn put_and_take_works() { - let mut default = BitStash::default(); - assert_eq!(default.get(0), None); - assert_eq!(default.put(), 0); - assert_eq!(default.get(0), Some(true)); - assert_eq!(default.take(0), Some(true)); - assert_eq!(default.get(0), Some(false)); -} - -#[test] -fn put_works() { - let mut default = BitStash::default(); - for i in 0..TEST_ALLOCATIONS { - assert_eq!(default.get(i), None); - assert_eq!(default.put(), i); - assert_eq!(default.get(i), Some(true)); - } -} - -fn filled_bitstash() -> BitStash { - let mut default = BitStash::default(); - for i in 0..TEST_ALLOCATIONS { - assert_eq!(default.put(), i); - assert_eq!(default.get(i), Some(true)); - } - default -} - -#[test] -fn get_works() { - let mut default = filled_bitstash(); - // Remove all bits at indices `(% 3 == 0)` and `(% 5 == 0)`. - for i in 0..TEST_ALLOCATIONS { - if i % 3 == 0 || i % 5 == 0 { - default.take(i); - } - } - for i in 0..TEST_ALLOCATIONS { - let expected = !(i % 3 == 0 || i % 5 == 0); - assert_eq!(default.get(i), Some(expected)); - } -} - -#[test] -fn take_in_order_works() { - let mut default = filled_bitstash(); - for i in 0..TEST_ALLOCATIONS { - assert_eq!(default.get(i), Some(true)); - assert_eq!(default.take(i), Some(true)); - assert_eq!(default.get(i), Some(false)); - } -} - -#[test] -fn take_in_rev_order_works() { - let mut default = filled_bitstash(); - for i in (0..TEST_ALLOCATIONS).rev() { - assert_eq!(default.get(i), Some(true)); - assert_eq!(default.take(i), Some(true)); - assert_eq!(default.get(i), Some(false)); - } -} - -#[test] -fn take_refill_works() { - let mut default = filled_bitstash(); - for i in 0..TEST_ALLOCATIONS { - assert_eq!(default.get(i), Some(true)); - assert_eq!(default.take(i), Some(true)); - assert_eq!(default.get(i), Some(false)); - assert_eq!(default.put(), i); - assert_eq!(default.get(i), Some(true)); - } -} - -#[test] -fn take_refill_rev_works() { - let mut default = filled_bitstash(); - for i in (0..TEST_ALLOCATIONS).rev() { - assert_eq!(default.get(i), Some(true)); - assert_eq!(default.take(i), Some(true)); - assert_eq!(default.get(i), Some(false)); - assert_eq!(default.put(), i); - assert_eq!(default.get(i), Some(true)); - } -} - -#[test] -fn spread_layout_push_pull_works() { - ink_env::test::run_test::(|_| { - let default = filled_bitstash(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&default, &mut KeyPtr::from(root_key)); - let pulled = ::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(default, pulled); - Ok(()) - }) - .unwrap() -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let default = filled_bitstash(); - // First push the instance to the contract storage. - // Then load a valid instance, check it and clear its associated storage. - // Afterwards load the invalid instance from the same storage region - // and try to interact with it which is expected to fail. - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&default, &mut KeyPtr::from(root_key)); - let pulled = ::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(default, pulled); - SpreadLayout::clear_spread(&pulled, &mut KeyPtr::from(root_key)); - let invalid = - ::pull_spread(&mut KeyPtr::from(root_key)); - // We have to prevent calling its destructor since that would also panic but - // in an uncontrollable way. - let mut invalid = core::mem::ManuallyDrop::new(invalid); - // Now interact with invalid instance. - let _ = invalid.put(); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/bitvec/bitref.rs b/crates/storage/src/collections/bitvec/bitref.rs deleted file mode 100644 index 3768b6d0f0..0000000000 --- a/crates/storage/src/collections/bitvec/bitref.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(clippy::len_without_is_empty)] - -use super::{ - Bits256, - Index256, -}; - -/// A mutable bit access for operating on a single bit within a 256-bit pack. -#[derive(Debug)] -pub struct BitRefMut<'a> { - /// The queried pack of 256 bits. - bits: &'a mut Bits256, - /// The bit position within the queried bit pack. - at: u8, -} - -impl<'a> PartialEq for BitRefMut<'a> { - fn eq(&self, other: &Self) -> bool { - self.get() == other.get() - } -} - -impl<'a> Eq for BitRefMut<'a> {} - -impl<'a> BitRefMut<'a> { - /// Creates access to the indexed bit within the 256-bit pack. - pub(super) fn new(bits: &'a mut Bits256, at: Index256) -> Self { - Self { bits, at } - } - - /// Returns the value of the indexed bit. - /// - /// # Note - /// - /// - If 0: returns `false` - /// - If 1: returns `true` - pub fn get(&self) -> bool { - self.bits.get(self.at) - } - - /// Sets the value of the indexed bit to the given new value. - pub fn set_to(&mut self, new_value: bool) { - self.bits.set_to(self.at, new_value) - } - - /// Sets the indexed bit to `1` (true). - pub fn set(&mut self) { - self.bits.set(self.at) - } - - /// Resets the indexed bit to `0` (false). - pub fn reset(&mut self) { - self.bits.reset(self.at) - } - - /// Flips the indexed bit. - pub fn flip(&mut self) { - self.bits.flip(self.at) - } - - /// Computes bitwise XOR for the indexed bit and `rhs`. - pub fn xor(&mut self, rhs: bool) { - self.bits.xor(self.at, rhs) - } - - /// Computes bitwise AND for the indexed bit and `rhs`. - pub fn and(&mut self, rhs: bool) { - self.bits.and(self.at, rhs) - } - - /// Computes bitwise OR for the indexed bit and `rhs`. - pub fn or(&mut self, rhs: bool) { - self.bits.or(self.at, rhs) - } -} - -#[cfg(test)] -mod tests { - use super::BitRefMut; - use crate::collections::bitvec::Bits256; - - fn is_populated_bit_set(index: u8) -> bool { - (index % 5) == 0 || (index % 13) == 0 - } - - fn populated_bits256() -> Bits256 { - let mut bits256 = Bits256::default(); - for i in 0..256 { - let i = i as u8; - bits256.set_to(i, is_populated_bit_set(i)); - } - bits256 - } - - #[test] - fn get_set_works() { - let mut bits256 = populated_bits256(); - for i in 0..=255 { - let mut bitref = BitRefMut::new(&mut bits256, i); - let expected = is_populated_bit_set(i); - assert_eq!(bitref.get(), expected); - // Set only every second bit to true and check this later: - bitref.set_to(i % 2 == 0); - } - // Check if `set_to` was successful: - for i in 0..=255 { - assert_eq!(bits256.get(i), i % 2 == 0); - } - } - - #[test] - fn flip_works() { - let mut bits256 = populated_bits256(); - for i in 0..=255 { - let mut bitref = BitRefMut::new(&mut bits256, i); - bitref.flip(); - } - // Check if `flip` was successful: - for i in 0..=255 { - assert_eq!(bits256.get(i), !is_populated_bit_set(i)); - } - } - - #[test] - fn set_and_reset_works() { - let mut bits256 = populated_bits256(); - for i in 0..=255 { - let mut bitref = BitRefMut::new(&mut bits256, i); - if i % 2 == 0 { - bitref.set(); - } else { - bitref.reset(); - } - } - // Check if `set` and `reset` was successful: - for i in 0..=255 { - assert_eq!(bits256.get(i), i % 2 == 0); - } - } - - #[test] - fn bitops_works() { - let mut bits256 = populated_bits256(); - for i in 0..=255 { - let mut bitref = BitRefMut::new(&mut bits256, i); - let expected = is_populated_bit_set(i); - fn test_xor(bitref: &mut BitRefMut, expected: bool) { - fn test_xor_for(bitref: &mut BitRefMut, expected: bool, input: bool) { - assert_eq!(bitref.get(), expected); - bitref.xor(input); - assert_eq!(bitref.get(), expected ^ input); - bitref.set_to(expected); - } - test_xor_for(bitref, expected, false); - test_xor_for(bitref, expected, true); - } - test_xor(&mut bitref, expected); - fn test_and(bitref: &mut BitRefMut, expected: bool) { - fn test_and_for(bitref: &mut BitRefMut, expected: bool, input: bool) { - assert_eq!(bitref.get(), expected); - bitref.and(input); - assert_eq!(bitref.get(), expected & input); - bitref.set_to(expected); - } - test_and_for(bitref, expected, false); - test_and_for(bitref, expected, true); - } - test_and(&mut bitref, expected); - fn test_or(bitref: &mut BitRefMut, expected: bool) { - fn test_or_for(bitref: &mut BitRefMut, expected: bool, input: bool) { - assert_eq!(bitref.get(), expected); - bitref.or(input); - assert_eq!(bitref.get(), expected | input); - bitref.set_to(expected); - } - test_or_for(bitref, expected, false); - test_or_for(bitref, expected, true); - } - test_or(&mut bitref, expected); - } - } -} diff --git a/crates/storage/src/collections/bitvec/bits256.rs b/crates/storage/src/collections/bitvec/bits256.rs deleted file mode 100644 index 786f9f11b3..0000000000 --- a/crates/storage/src/collections/bitvec/bits256.rs +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - super::extend_lifetime, - BitRefMut, - Bits64, - Index256, - Index64, -}; - -/// A chunk of 256 bits. -#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -pub struct Bits256 { - bits: [Bits64; 4], -} - -/// Iterator over the valid bits of a pack of 256 bits. -#[derive(Debug, Copy, Clone)] -pub struct Iter<'a> { - bits: &'a Bits256, - start: u16, - end: u16, -} - -impl<'a> Iter<'a> { - fn new(bits256: &'a Bits256, len: u16) -> Self { - Self { - bits: bits256, - start: 0, - end: len, - } - } - - fn remaining(&self) -> u16 { - self.end - self.start - } -} - -impl<'a> ExactSizeIterator for Iter<'a> {} - -impl<'a> Iterator for Iter<'a> { - type Item = bool; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn nth(&mut self, n: usize) -> Option { - assert!(n < 256); - let n = n as u16; - if self.start + n >= self.end { - return None - } - let start = self.start + n; - self.start += 1 + n; - Some(self.bits.get(start as u8)) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } -} - -impl<'a> DoubleEndedIterator for Iter<'a> { - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - assert!(n < 256); - let n = n as u16; - if self.start + n >= self.end { - return None - } - self.end -= 1 + n; - Some(self.bits.get(self.end as u8)) - } -} - -/// Iterator over the valid mutable bits of a pack of 256 bits. -#[derive(Debug)] -pub struct IterMut<'a> { - bits: &'a mut Bits256, - start: u16, - end: u16, -} - -impl<'a> IterMut<'a> { - fn new(bits256: &'a mut Bits256, len: u16) -> Self { - Self { - bits: bits256, - start: 0, - end: len, - } - } - - fn remaining(&self) -> u16 { - self.end - self.start - } - - /// Returns access for the given bit index with extended but valid lifetimes. - fn get<'b>(&'b mut self, index: u8) -> BitRefMut<'a> { - unsafe { BitRefMut::new(extend_lifetime(self.bits), index) } - } -} - -impl<'a> ExactSizeIterator for IterMut<'a> {} - -impl<'a> Iterator for IterMut<'a> { - type Item = BitRefMut<'a>; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn nth(&mut self, n: usize) -> Option { - assert!(n < 256); - let n = n as u16; - if self.start + n >= self.end { - return None - } - let start = self.start + n; - self.start += 1 + n; - Some(self.get(start as u8)) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } -} - -impl<'a> DoubleEndedIterator for IterMut<'a> { - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - assert!(n < 256); - let n = n as u16; - if self.start + n >= self.end { - return None - } - self.end -= 1 + n; - Some(self.get(self.end as u8)) - } -} - -impl Bits256 { - fn bits_at(&self, index: Index256) -> (&u64, Index64) { - (&self.bits[(index / 64) as usize], index % 64) - } - - fn bits_at_mut(&mut self, index: Index256) -> (&mut u64, Index64) { - (&mut self.bits[(index / 64) as usize], index % 64) - } - - /// Yields the first `len` bits of the pack of 256 bits. - pub(super) fn iter(&self, len: u16) -> Iter { - Iter::new(self, len) - } - - /// Yields mutable accessors to the first `len` bits of the pack of 256 bits. - pub(super) fn iter_mut(&mut self, len: u16) -> IterMut { - IterMut::new(self, len) - } - - /// Returns the bit value for the bit at the given index. - pub fn get(&self, at: Index256) -> bool { - let (bits64, pos64) = self.bits_at(at); - bits64 & (0x01 << (63 - pos64)) != 0 - } - - /// Sets the bit value for the bit at the given index to the given value. - pub(super) fn set_to(&mut self, at: Index256, new_value: bool) { - if new_value { - self.set(at) - } else { - self.reset(at) - } - } - - /// Flips the bit value for the bit at the given index. - pub(super) fn flip(&mut self, at: Index256) { - self.xor(at, true) - } - - /// Sets the bit value for the bit at the given index to 1 (`true`). - pub(super) fn set(&mut self, at: Index256) { - self.or(at, true) - } - - /// Sets the bit value for the bit at the given index to 0 (`false`). - pub(super) fn reset(&mut self, at: Index256) { - self.and(at, false) - } - - fn op_at_with(&mut self, at: Index256, rhs: bool, op: F) - where - F: FnOnce(&mut Bits64, Bits64), - { - let (bits64, pos64) = self.bits_at_mut(at); - let rhs = (rhs as u64) << (63 - pos64); - op(bits64, rhs); - } - - /// Computes bitwise AND for the bit at the given index and `rhs`. - pub(super) fn and(&mut self, at: Index256, rhs: bool) { - self.op_at_with(at, !rhs, |bits64, rhs| *bits64 &= !rhs) - } - - /// Computes bitwise OR for the bit at the given index and `rhs`. - pub(super) fn or(&mut self, at: Index256, rhs: bool) { - self.op_at_with(at, rhs, |bits64, rhs| *bits64 |= rhs) - } - - /// Computes bitwise XOR for the bit at the given index and `rhs`. - pub(super) fn xor(&mut self, at: Index256, rhs: bool) { - self.op_at_with(at, rhs, |bits64, rhs| *bits64 ^= rhs) - } - - /// Returns the position of the first zero bit if any. - pub fn position_first_zero(&self) -> Option { - let mut offset: u32 = 0; - for bits64 in &self.bits { - if *bits64 != !0 { - return Some(((!bits64).leading_zeros() + offset) as u8) - } - offset += 64; - } - None - } -} - -#[cfg(test)] -mod tests { - use super::Bits256; - - #[test] - fn default_works() { - assert_eq!( - Bits256::default(), - Bits256 { - bits: [0x00, 0x00, 0x00, 0x00], - } - ); - } - - fn populated_bits256() -> Bits256 { - let mut bits256 = Bits256::default(); - for i in 0..256 { - let i = i as u8; - bits256.set_to(i, (i % 5) == 0 || (i % 13) == 0); - } - bits256 - } - - #[test] - fn get_works() { - let bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - assert_eq!(bits256.get(i), (i % 5) == 0 || (i % 13) == 0); - } - } - - #[test] - fn set_works() { - let mut bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - bits256.set(i); - assert!(bits256.get(i)); - } - } - - #[test] - fn reset_works() { - let mut bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - bits256.reset(i); - assert!(!bits256.get(i)); - } - } - - #[test] - fn and_works() { - let mut bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - bits256.and(i, i % 2 == 0); - assert_eq!( - bits256.get(i), - (i % 2) == 0 && ((i % 5) == 0 || (i % 13) == 0) - ); - } - } - - #[test] - fn or_works() { - let mut bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - bits256.or(i, i % 2 == 0); - assert_eq!( - bits256.get(i), - (i % 2) == 0 || (i % 5) == 0 || (i % 13) == 0 - ); - } - } - - #[test] - fn xor_works() { - let mut bits256 = populated_bits256(); - for i in 0..256 { - let i = i as u8; - bits256.xor(i, i % 2 == 0); - let a = (i % 2) == 0; - let b = (i % 5) == 0 || (i % 13) == 0; - assert_eq!(bits256.get(i), a != b); - } - } - - #[test] - fn position_first_zero_works() { - // Zero bits256: - let empty = Bits256::default(); - assert_eq!(empty.position_first_zero(), Some(0)); - // First bit is set: - let first_bit_is_set = Bits256 { - bits: [0x8000_0000_0000_0000, 0x00, 0x00, 0x00], - }; - assert_eq!(first_bit_is_set.position_first_zero(), Some(1)); - // Last bit is unset: - let first_bit_is_set = Bits256 { - bits: [!0, !0, !0, !1], - }; - assert_eq!(first_bit_is_set.position_first_zero(), Some(3 * 64 + 63)); - // Some middle bit is unset: - let first_bit_is_set = Bits256 { - bits: [!0, !0, !0xFFFF_FFFF, !1], - }; - assert_eq!(first_bit_is_set.position_first_zero(), Some(2 * 64 + 32)); - // All bits set: - let all_bits_set = Bits256 { - bits: [!0, !0, !0, !0], - }; - assert_eq!(all_bits_set.position_first_zero(), None); - } -} diff --git a/crates/storage/src/collections/bitvec/bitsref.rs b/crates/storage/src/collections/bitvec/bitsref.rs deleted file mode 100644 index 2abdcfc525..0000000000 --- a/crates/storage/src/collections/bitvec/bitsref.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(clippy::len_without_is_empty)] - -use super::{ - BitRefMut, - Bits256, - Bits256BitsIter, - Bits256BitsIterMut, -}; - -/// A reference to a sub-slice within a 256-bit chunk. -/// -/// This is a reference wrapper around either a shared 256-bit chunk -/// or an exclusive 256-bit chunk. Also it prevents accesses to out of bounds -/// bits. -#[derive(Debug, Copy, Clone)] -#[repr(C)] // This is repr(C) to be on the safe side for the Deref impl. -pub struct ChunkRef { - /// The reference to the 256-bits chunk. - bits: T, - /// The length of the accessible chunk area. - len: u32, -} - -impl ChunkRef { - /// Returns the length of the 256-bit chunk. - /// - /// # Note - /// - /// This is the number of valid bits in the chunk of 256 bits. - /// The valid bits are consecutive and always start from index 0. - pub fn len(&self) -> u32 { - self.len - } -} - -impl<'a> ChunkRef<&'a Bits256> { - /// Creates a new shared 256-bit chunk access with the given length. - pub(super) fn shared(bits: &'a Bits256, len: u32) -> Self { - Self { bits, len } - } - - /// Returns the position of the first valid zero bit if any. - pub fn position_first_zero(&self) -> Option { - let position = self.bits.position_first_zero()?; - if position as u32 >= self.len() { - return None - } - Some(position) - } - - /// Returns the value of the indexed bit. - /// - /// # Note - /// - /// - If 0: returns `false` - /// - If 1: returns `true` - pub fn get(&self, index: u8) -> Option { - if index as u32 >= self.len { - return None - } - self.bits.get(index).into() - } - - /// Returns an iterator over the valid bits of `self`. - pub(super) fn iter(&self) -> Bits256BitsIter { - self.bits.iter(self.len as u16) - } -} - -impl<'a> ChunkRef<&'a mut Bits256> { - /// Creates a new exclusive 256-bit chunk access with the given length. - pub(super) fn exclusive(bits: &'a mut Bits256, len: u32) -> Self { - Self { bits, len } - } - - /// Returns mutable access to a single bit if the index is out of bounds. - pub fn get_mut(&mut self, index: u8) -> Option { - if index as u32 >= self.len { - return None - } - BitRefMut::new(self.bits, index).into() - } - - /// Returns an iterator over mutable accessors to the valid bits of `self`. - pub(super) fn iter_mut(&mut self) -> Bits256BitsIterMut { - self.bits.iter_mut(self.len as u16) - } -} - -impl<'a> core::ops::Deref for ChunkRef<&'a mut Bits256> { - type Target = ChunkRef<&'a Bits256>; - - fn deref(&self) -> &Self::Target { - // This implementation allows to mirror the interface on - // `ChunkRef<&'a Bits256>` onto `ChunkRef<&'a mut Bits256>` - // without the need of separate implementations. - // - // SAFETY: The `ChunkRef` struct is `repr(C)` which should guarantee - // that both `ChunkRef<&'a mut Bits256>` as well as - // `ChunkRef<&'a Bits256>` have the same internal layout - // and thus can be transmuted safely. - let ptr: *const Self = self; - unsafe { &*(ptr as *const Self::Target) } - } -} - -#[cfg(test)] -mod tests { - use super::{ - Bits256, - ChunkRef, - }; - - fn is_populated_bit_set(index: u8) -> bool { - (index % 5) == 0 || (index % 13) == 0 - } - - fn populated_bits256() -> Bits256 { - let mut bits256 = Bits256::default(); - for i in 0..256 { - let i = i as u8; - bits256.set_to(i, is_populated_bit_set(i)); - } - bits256 - } - - #[test] - fn shared_works() { - let len: u8 = 100; - let bits = populated_bits256(); - let cref = ChunkRef::shared(&bits, len as u32); - assert_eq!(cref.len(), len as u32); - // Get works: - for i in 0..len { - assert_eq!(cref.get(i), Some(is_populated_bit_set(i))); - } - assert_eq!(cref.get(len), None); - // Iter works: - for (i, val) in cref.iter().enumerate() { - assert_eq!(val, is_populated_bit_set(i as u8)); - } - } - - #[test] - fn exclusive_works() { - let len: u8 = 100; - let mut bits = populated_bits256(); - let mut cref = ChunkRef::exclusive(&mut bits, len as u32); - assert_eq!(cref.len(), len as u32); - // `get` and `get_mut` works: - for i in 0..len { - assert_eq!(cref.get(i), Some(is_populated_bit_set(i))); - assert_eq!( - cref.get_mut(i).map(|br| br.get()), - Some(is_populated_bit_set(i)) - ); - } - assert_eq!(cref.get(len), None); - assert_eq!(cref.get_mut(len), None); - // `iter` works: - for (i, val) in cref.iter().enumerate() { - assert_eq!(val, is_populated_bit_set(i as u8)); - } - } - - #[test] - fn position_first_zero_works() { - let len = 256; - let mut zeros = Default::default(); - let mut cref = ChunkRef::exclusive(&mut zeros, len); - for i in 0..len { - assert_eq!(cref.position_first_zero(), Some(i as u8)); - cref.get_mut(i as u8).unwrap().set(); - } - // Now all bits are set to `1`: - assert_eq!(cref.position_first_zero(), None); - } - - #[test] - fn iter_mut_works() { - let len = 100; - let mut zeros = Default::default(); - let mut cref = ChunkRef::exclusive(&mut zeros, len); - // Initialize all bits with 0 and set them to 1 via `iter_mut`. - // Then check if they are 1: - for mut byte in cref.iter_mut() { - assert!(!byte.get()); - byte.set(); - } - assert!(cref.iter().all(|byte| byte)); - } -} diff --git a/crates/storage/src/collections/bitvec/impls.rs b/crates/storage/src/collections/bitvec/impls.rs deleted file mode 100644 index 56c9e9b4da..0000000000 --- a/crates/storage/src/collections/bitvec/impls.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - BitsIter, - Bitvec as StorageBitvec, -}; - -impl Default for StorageBitvec { - fn default() -> Self { - Self::new() - } -} - -impl PartialEq for StorageBitvec { - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false - } - self.bits.eq(&other.bits) - } -} - -impl Eq for StorageBitvec {} - -impl Extend for StorageBitvec { - fn extend>(&mut self, iter: T) { - for value in iter { - self.push(value) - } - } -} - -impl<'a> Extend<&'a bool> for StorageBitvec { - fn extend>(&mut self, iter: T) { - for value in iter { - self.push(*value) - } - } -} - -impl FromIterator for StorageBitvec { - fn from_iter>(iter: T) -> Self { - let mut bitvec = Self::default(); - bitvec.extend(iter); - bitvec - } -} - -impl<'a> FromIterator<&'a bool> for StorageBitvec { - fn from_iter>(iter: T) -> Self { - iter.into_iter().copied().collect() - } -} - -impl<'a> IntoIterator for &'a StorageBitvec { - type Item = bool; - type IntoIter = BitsIter<'a>; - - fn into_iter(self) -> Self::IntoIter { - self.bits() - } -} diff --git a/crates/storage/src/collections/bitvec/iter.rs b/crates/storage/src/collections/bitvec/iter.rs deleted file mode 100644 index cbd3e8e4bc..0000000000 --- a/crates/storage/src/collections/bitvec/iter.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - super::extend_lifetime, - BitRefMut, - Bits256, - Bits256BitsIter, - Bits256BitsIterMut, - Bitvec as StorageBitvec, - ChunkRef, -}; -use crate::collections::vec::{ - Iter as StorageVecIter, - IterMut as StorageVecIterMut, -}; -use core::cmp::min; - -/// Iterator over the bits of a storage bit vector. -#[derive(Debug, Copy, Clone)] -pub struct BitsIter<'a> { - remaining: u32, - bits256_iter: Bits256Iter<'a>, - front_iter: Option>, - back_iter: Option>, -} - -impl<'a> BitsIter<'a> { - /// Creates a new iterator yielding the bits of the storage bit vector. - pub(super) fn new(bitvec: &'a StorageBitvec) -> Self { - Self { - remaining: bitvec.len(), - bits256_iter: bitvec.iter_chunks(), - front_iter: None, - back_iter: None, - } - } -} - -impl<'a> ExactSizeIterator for BitsIter<'a> {} - -impl<'a> Iterator for BitsIter<'a> { - type Item = bool; - - fn next(&mut self) -> Option { - loop { - if let Some(ref mut front_iter) = self.front_iter { - if let front @ Some(_) = front_iter.next() { - self.remaining -= 1; - return front - } - } - match self.bits256_iter.next() { - None => { - if let Some(back) = self.back_iter.as_mut()?.next() { - self.remaining -= 1; - return Some(back) - } - return None - } - Some(ref mut front) => { - self.front_iter = Some(unsafe { extend_lifetime(front) }.iter()); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining as usize - } -} - -impl<'a> DoubleEndedIterator for BitsIter<'a> { - fn next_back(&mut self) -> Option { - loop { - if let Some(ref mut back_iter) = self.back_iter { - if let back @ Some(_) = back_iter.next_back() { - self.remaining -= 1; - return back - } - } - match self.bits256_iter.next_back() { - None => { - if let Some(front) = self.front_iter.as_mut()?.next_back() { - self.remaining -= 1; - return Some(front) - } - return None - } - Some(ref mut back) => { - self.back_iter = Some(unsafe { extend_lifetime(back) }.iter()); - } - } - } - } -} - -/// Iterator over the bits of a storage bit vector. -#[derive(Debug)] -pub struct BitsIterMut<'a> { - remaining: u32, - bits256_iter: Bits256IterMut<'a>, - front_iter: Option>, - back_iter: Option>, -} - -impl<'a> BitsIterMut<'a> { - /// Creates a new iterator yielding the bits of the storage bit vector. - pub(super) fn new(bitvec: &'a mut StorageBitvec) -> Self { - Self { - remaining: bitvec.len(), - bits256_iter: bitvec.iter_chunks_mut(), - front_iter: None, - back_iter: None, - } - } -} - -impl<'a> ExactSizeIterator for BitsIterMut<'a> {} - -impl<'a> Iterator for BitsIterMut<'a> { - type Item = BitRefMut<'a>; - - fn next(&mut self) -> Option { - loop { - if let Some(ref mut front_iter) = self.front_iter { - if let front @ Some(_) = front_iter.next() { - self.remaining -= 1; - return front - } - } - match self.bits256_iter.next() { - None => { - if let Some(back) = self.back_iter.as_mut()?.next() { - self.remaining -= 1; - return Some(back) - } - return None - } - Some(ref mut front) => { - self.front_iter = Some(unsafe { extend_lifetime(front) }.iter_mut()); - } - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining as usize - } -} - -impl<'a> DoubleEndedIterator for BitsIterMut<'a> { - fn next_back(&mut self) -> Option { - loop { - if let Some(ref mut back_iter) = self.back_iter { - if let back @ Some(_) = back_iter.next_back() { - self.remaining -= 1; - return back - } - } - match self.bits256_iter.next_back() { - None => { - if let Some(front) = self.front_iter.as_mut()?.next_back() { - self.remaining -= 1; - return Some(front) - } - return None - } - Some(ref mut back) => { - self.back_iter = Some(unsafe { extend_lifetime(back) }.iter_mut()); - } - } - } - } -} - -/// Iterator over the 256-bit chunks of a storage bitvector. -#[derive(Debug, Copy, Clone)] -pub struct Bits256Iter<'a> { - /// The storage vector iterator over the internal 256-bit chunks. - iter: StorageVecIter<'a, Bits256>, - /// The remaining bits to be yielded. - remaining: u32, -} - -impl<'a> Bits256Iter<'a> { - /// Creates a new 256-bit chunks iterator over the given storage bitvector. - pub(super) fn new(bitvec: &'a StorageBitvec) -> Self { - Self { - iter: bitvec.bits.iter(), - remaining: bitvec.len(), - } - } -} - -impl<'a> Iterator for Bits256Iter<'a> { - type Item = ChunkRef<&'a Bits256>; - - fn next(&mut self) -> Option { - if self.remaining == 0 { - return None - } - let len = min(256, self.remaining); - self.remaining = self.remaining.saturating_sub(256); - self.iter - .next() - .map(|bits256| ChunkRef::shared(bits256, len)) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn count(self) -> usize { - self.iter.count() - } -} - -impl<'a> DoubleEndedIterator for Bits256Iter<'a> { - fn next_back(&mut self) -> Option { - if self.remaining == 0 { - return None - } - let mut len = self.remaining % 256; - if len == 0 { - len = 256; - } - self.remaining = self.remaining.saturating_sub(len); - self.iter - .next_back() - .map(|bits256| ChunkRef::shared(bits256, len)) - } -} - -impl<'a> ExactSizeIterator for Bits256Iter<'a> {} - -/// Iterator over mutable 256-bit chunks of a storage bitvector. -#[derive(Debug)] -pub struct Bits256IterMut<'a> { - /// The storage vector iterator over the internal mutable 256-bit chunks. - iter: StorageVecIterMut<'a, Bits256>, - /// The remaining bits to be yielded. - remaining: u32, -} - -impl<'a> Bits256IterMut<'a> { - /// Creates a new 256-bit chunks iterator over the given storage bitvector. - pub(super) fn new(bitvec: &'a mut StorageBitvec) -> Self { - Self { - remaining: bitvec.len(), - iter: bitvec.bits.iter_mut(), - } - } -} - -impl<'a> Iterator for Bits256IterMut<'a> { - type Item = ChunkRef<&'a mut Bits256>; - - fn next(&mut self) -> Option { - let len = min(256, self.remaining); - self.remaining = self.remaining.saturating_sub(256); - self.iter - .next() - .map(|bits256| ChunkRef::exclusive(bits256, len)) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn count(self) -> usize { - self.iter.count() - } -} - -impl<'a> DoubleEndedIterator for Bits256IterMut<'a> { - fn next_back(&mut self) -> Option { - let mut len = self.remaining % 256; - self.remaining -= len; - if len == 0 { - len = 256; - } - self.iter - .next_back() - .map(|bits256| ChunkRef::exclusive(bits256, len)) - } -} - -impl<'a> ExactSizeIterator for Bits256IterMut<'a> {} diff --git a/crates/storage/src/collections/bitvec/mod.rs b/crates/storage/src/collections/bitvec/mod.rs deleted file mode 100644 index 5dbcabedad..0000000000 --- a/crates/storage/src/collections/bitvec/mod.rs +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Storage bit vector data structure and utilities. -//! -//! Allows to compactly and efficiently store and manipulate on single bits. - -mod bitref; -mod bits256; -mod bitsref; -mod impls; -mod iter; -mod storage; - -#[cfg(test)] -mod tests; - -pub use self::{ - bitref::BitRefMut, - bitsref::ChunkRef, - iter::{ - BitsIter, - BitsIterMut, - }, -}; -use self::{ - bits256::{ - Bits256, - Iter as Bits256BitsIter, - IterMut as Bits256BitsIterMut, - }, - iter::{ - Bits256Iter, - Bits256IterMut, - }, -}; -use crate::{ - Lazy, - Vec as StorageVec, -}; - -/// The index of a bit pack within the bit vector. -type Index = u32; - -/// The position of a bit within a 256-bit package. -type Index256 = u8; - -/// The position of a bit within a `u64`. -type Index64 = u8; - -/// A pack of 64 bits. -type Bits64 = u64; - -/// A storage bit vector. -/// -/// # Note -/// -/// Organizes its bits in chunks of 256 bits. -/// Allows to `push`, `pop`, inspect and manipulate the underlying bits. -#[derive(Debug)] -pub struct Bitvec { - /// The length of the bit vector. - len: Lazy, - /// The bits of the bit vector. - /// - /// Organized in packs of 256 bits. - bits: StorageVec, -} - -impl Bitvec { - /// Creates a new empty bit vector. - pub fn new() -> Self { - Self { - len: Lazy::from(0), - bits: StorageVec::new(), - } - } - - /// Returns the length of the bit vector in bits. - pub fn len(&self) -> u32 { - *self.len - } - - /// Returns `true` if the bit vector is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the capacity of the bit vector in bits. - /// - /// # Note - /// - /// Returns a `u64` since it is always greater than or equal to `self.len()` - /// which itself returns a `u32`. - pub fn capacity(&self) -> u64 { - (self.bits.len() * 256) as u64 - } - - /// Returns an iterator over the bits of the storage bit vector. - pub fn bits(&self) -> BitsIter { - BitsIter::new(self) - } - - /// Returns an iterator over the mutable bits of the storage bit vector. - pub fn bits_mut(&mut self) -> BitsIterMut { - BitsIterMut::new(self) - } - - /// Returns an iterator over the 256-bit chunks of the storage bit vector. - pub(super) fn iter_chunks(&self) -> Bits256Iter { - Bits256Iter::new(self) - } - - /// Returns an iterator over the mutable 256-bit chunks of the storage bit vector. - pub(super) fn iter_chunks_mut(&mut self) -> Bits256IterMut { - Bits256IterMut::new(self) - } - - /// Splits the given index into a 256-bit pack index and a position index of the bit. - fn split_index(&self, at: Index) -> Option<(Index, Index256)> { - if at >= self.len() { - return None - } - Some((at / 256, (at % 256) as u8)) - } - - /// Returns the immutable access pair to the underlying 256-bits pack and bit. - /// - /// Returns `None` if the given index is out of bounds. - fn get_bits256(&self, at: Index) -> Option<(&Bits256, Index256)> { - let (index, pos256) = self.split_index(at)?; - let bits256 = self.bits.get(index).expect("index is out of bounds"); - Some((bits256, pos256)) - } - - /// Returns the mutable access pair to the underlying 256-bits pack and bit. - /// - /// Returns `None` if the given index is out of bounds. - fn get_bits256_mut(&mut self, at: Index) -> Option<(&mut Bits256, Index256)> { - let (index, pos256) = self.split_index(at)?; - let bits256 = self.bits.get_mut(index).expect("index is out of bounds"); - Some((bits256, pos256)) - } - - /// Returns a mutable bit access to the bit at the given index if any. - fn get_access_mut(&mut self, at: Index) -> Option { - self.get_bits256_mut(at) - .map(|(bits256, pos256)| BitRefMut::new(bits256, pos256)) - } - - /// Returns the value of the bit at the given index if any. - pub fn get(&self, at: Index) -> Option { - self.get_bits256(at) - .map(|(bits256, pos256)| bits256.get(pos256)) - } - - /// Returns a mutable bit access to the bit at the given index if any. - pub fn get_mut(&mut self, at: Index) -> Option { - self.get_access_mut(at) - } - - /// Returns a shared reference to the 256-bit chunk for the bit at the given index. - pub fn get_chunk(&self, at: Index) -> Option> { - if at >= self.len() { - return None - } - use core::cmp::min; - let chunk_id = at / 256; - let chunk_len = min(256, self.len() - at); - let bits256 = self.bits.get(chunk_id).expect("index is out of bounds"); - Some(ChunkRef::shared(bits256, chunk_len)) - } - - /// Returns an exclusive reference to the 256-bit chunk for the bit at the given index. - pub fn get_chunk_mut(&mut self, at: Index) -> Option> { - if at >= self.len() { - return None - } - use core::cmp::min; - let chunk_id = at / 256; - let chunk_len = min(256, self.len() - at); - let bits256 = self.bits.get_mut(chunk_id).expect("index is out of bounds"); - Some(ChunkRef::exclusive(bits256, chunk_len)) - } - - /// Returns the first bit of the bit vector. - /// - /// # Note - /// - /// Returns `None` if the bit vector is empty. - pub fn first(&self) -> Option { - if self.is_empty() { - return None - } - self.get(0) - } - - /// Returns a mutable bit access to the first bit of the bit vector. - /// - /// # Note - /// - /// Returns `None` if the bit vector is empty. - pub fn first_mut(&mut self) -> Option { - if self.is_empty() { - return None - } - self.get_access_mut(0) - } - - /// Returns the last bit of the bit vector. - /// - /// # Note - /// - /// Returns `None` if the bit vector is empty. - pub fn last(&self) -> Option { - if self.is_empty() { - return None - } - self.get(self.len() - 1) - } - - /// Returns a mutable bit access to the last bit of the bit vector. - /// - /// # Note - /// - /// Returns `None` if the bit vector is empty. - pub fn last_mut(&mut self) -> Option { - if self.is_empty() { - return None - } - self.get_access_mut(self.len() - 1) - } - - /// The maximum number of bits that can be pushed to a storage bit vector. - fn maximum_capacity(&self) -> u32 { - u32::MAX - } - - /// Pushes the given value onto the bit vector. - /// - /// # Note - /// - /// This increases the length of the bit vector. - /// - /// # Panics - /// - /// If the storage bit vector reached its maximum capacity. - pub fn push(&mut self, value: bool) { - assert!( - self.len() < self.maximum_capacity(), - "reached maximum capacity for storage bit vector" - ); - if self.len() as u64 == self.capacity() { - // Case: All 256-bits packs are full or there are none: - // Need to push another 256-bit pack to the storage vector. - let mut bits256 = Bits256::default(); - if value { - // If `value` is `true` set its first bit to `1`. - bits256.set(0); - debug_assert!(bits256.get(0)); - }; - self.bits.push(bits256); - *self.len += 1; - return - } - // Case: The last 256-bit pack has unused bits: - // - Set last bit of last 256-bit pack to the given value. - // - Opt.: Since bits are initialized as 0 we only need - // to mutate this value if `value` is `true`. - *self.len += 1; - if value { - self.last_mut() - .expect("must have at least a valid bit in this case") - .set() - } - } - - /// Pops the last bit from the bit vector. - /// - /// Returns the popped bit as `bool`. - /// - /// # Note - /// - /// This reduces the length of the bit vector by one. - pub fn pop(&mut self) -> Option { - if self.is_empty() { - // Bail out early if the bit vector is emtpy. - return None - } - let mut access = self.last_mut().expect("must be some if non-empty"); - let popped = access.get(); - access.reset(); - *self.len -= 1; - Some(popped) - } -} diff --git a/crates/storage/src/collections/bitvec/storage.rs b/crates/storage/src/collections/bitvec/storage.rs deleted file mode 100644 index b5d1603002..0000000000 --- a/crates/storage/src/collections/bitvec/storage.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - Bits256, - Bitvec as StorageBitvec, -}; -use crate::{ - traits::{ - forward_clear_packed, - forward_pull_packed, - forward_push_packed, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, - Pack, - Vec as StorageVec, -}; -use ink_primitives::Key; - -#[cfg(feature = "std")] -const _: () = { - use crate::{ - lazy::Lazy, - traits::StorageLayout, - }; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - - impl StorageLayout for StorageBitvec { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new("len", as StorageLayout>::layout(key_ptr)), - FieldLayout::new( - "elems", - as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for Bits256 { - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = false; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl PackedLayout for Bits256 { - fn pull_packed(&mut self, _at: &Key) {} - fn push_packed(&self, _at: &Key) {} - fn clear_packed(&self, _at: &Key) {} -} - -impl SpreadLayout for StorageBitvec { - const FOOTPRINT: u64 = 1 + > as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadLayout::pull_spread(ptr), - bits: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.len, ptr); - SpreadLayout::push_spread(&self.bits, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(&self.len, ptr); - SpreadLayout::clear_spread(&self.bits, ptr); - } -} - -impl SpreadAllocate for StorageBitvec { - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadAllocate::allocate_spread(ptr), - bits: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/bitvec/tests.rs b/crates/storage/src/collections/bitvec/tests.rs deleted file mode 100644 index 2241616915..0000000000 --- a/crates/storage/src/collections/bitvec/tests.rs +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::Bitvec as StorageBitvec; -use crate::traits::{ - KeyPtr, - SpreadLayout, -}; -use ink_primitives::Key; - -#[test] -fn new_default_works() { - // Check if `Bitvec::new` works: - let mut bitvec = StorageBitvec::new(); - assert_eq!(bitvec.len(), 0); - assert_eq!(bitvec.capacity(), 0); - assert!(bitvec.is_empty()); - assert_eq!(bitvec.bits().next(), None); - assert_eq!(bitvec.get(0), None); - assert!(bitvec.first().is_none()); - assert!(bitvec.first_mut().is_none()); - assert!(bitvec.last().is_none()); - assert!(bitvec.last_mut().is_none()); - // Check if `Bitvec::default` works: - let mut default = StorageBitvec::default(); - assert_eq!(default.len(), 0); - assert_eq!(bitvec.capacity(), 0); - assert!(default.is_empty()); - assert_eq!(default.bits().next(), None); - assert_eq!(default.get(0), None); - assert!(default.first().is_none()); - assert!(default.first_mut().is_none()); - assert!(default.last().is_none()); - assert!(default.last_mut().is_none()); - // Check if both are equal: - assert_eq!(bitvec, default); -} - -/// Creates a storage bitvector where every bit at every 5th and 13th index -/// is set to `1` (true). The bitvector has a total length of 600 bits which -/// requires it to have 3 chunks of 256-bit giving a capacity of 768 bits. -fn bitvec_600() -> StorageBitvec { - let bitvec = (0..600) - .map(|i| (i % 5) == 0 || (i % 13) == 0) - .collect::(); - assert_eq!(bitvec.len(), 600); - assert_eq!(bitvec.capacity(), 768); - bitvec -} - -#[test] -fn get_works() { - let mut bitvec = bitvec_600(); - for i in 0..bitvec.len() { - assert_eq!(bitvec.get(i), Some((i % 5) == 0 || (i % 13) == 0)); - assert_eq!( - bitvec.get_mut(i).map(|b| b.get()), - Some((i % 5) == 0 || (i % 13) == 0) - ); - } -} - -#[test] -fn iter_next_works() { - let bitvec = bitvec_600(); - // Test iterator over read-only bits. - for (i, bit) in bitvec.bits().enumerate() { - assert_eq!(bit, (i % 5) == 0 || (i % 13) == 0); - } - // Test iterator over mutable accessors to bits. - let mut bitvec = bitvec; - for (i, accessor) in bitvec.bits_mut().enumerate() { - assert_eq!(accessor.get(), (i % 5) == 0 || (i % 13) == 0); - } -} - -#[test] -fn iter_next_back_works() { - let bitvec = bitvec_600(); - // Test iterator over read-only bits. - for (i, bit) in bitvec.bits().enumerate().rev() { - assert_eq!(bit, (i % 5) == 0 || (i % 13) == 0); - } - // Test iterator over mutable accessors to bits. - let mut bitvec = bitvec; - for (i, accessor) in bitvec.bits_mut().enumerate().rev() { - assert_eq!(accessor.get(), (i % 5) == 0 || (i % 13) == 0); - } -} - -#[test] -fn double_ended_iter_works() { - let mut bitvec = StorageBitvec::default(); - bitvec.push(true); - bitvec.push(true); - bitvec.push(true); - - let mut iter = bitvec.bits(); - assert_eq!(Some(true), iter.next()); - assert_eq!(Some(true), iter.next_back()); - assert_eq!(Some(true), iter.next()); - assert_eq!(None, iter.next()); - assert_eq!(None, iter.next_back()); -} - -#[test] -fn push_works() { - let mut bitvec = StorageBitvec::new(); - assert_eq!(bitvec.len(), 0); - assert_eq!(bitvec.capacity(), 0); - // Push `1` - bitvec.push(true); - assert_eq!(bitvec.len(), 1); - assert_eq!(bitvec.capacity(), 256); - assert_eq!(bitvec.first(), Some(true)); - assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); - assert_eq!(bitvec.last(), Some(true)); - assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); - // Push `0` - bitvec.push(false); - assert_eq!(bitvec.len(), 2); - assert_eq!(bitvec.capacity(), 256); - assert_eq!(bitvec.first(), Some(true)); - assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); - assert_eq!(bitvec.last(), Some(false)); - assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(false)); - // Push `1` - bitvec.push(true); - assert_eq!(bitvec.len(), 3); - assert_eq!(bitvec.capacity(), 256); - assert_eq!(bitvec.first(), Some(true)); - assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); - assert_eq!(bitvec.last(), Some(true)); - assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); -} - -#[test] -fn pop_works() { - let mut bitvec = [true, false, true].iter().collect::(); - assert_eq!(bitvec.len(), 3); - assert_eq!(bitvec.capacity(), 256); - // Pop `1` (true) - assert_eq!(bitvec.pop(), Some(true)); - assert_eq!(bitvec.len(), 2); - assert_eq!(bitvec.capacity(), 256); - assert_eq!(bitvec.first(), Some(true)); - assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); - assert_eq!(bitvec.last(), Some(false)); - assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(false)); - // Pop `0` (false) - assert_eq!(bitvec.pop(), Some(false)); - assert_eq!(bitvec.len(), 1); - assert_eq!(bitvec.capacity(), 256); - assert_eq!(bitvec.first(), Some(true)); - assert_eq!(bitvec.first_mut().map(|access| access.get()), Some(true)); - assert_eq!(bitvec.last(), Some(true)); - assert_eq!(bitvec.last_mut().map(|access| access.get()), Some(true)); - // Pop `1` (true) - assert_eq!(bitvec.pop(), Some(true)); - assert_eq!(bitvec.len(), 0); - assert_eq!(bitvec.capacity(), 256); - assert!(bitvec.first().is_none()); - assert!(bitvec.first_mut().is_none()); - assert!(bitvec.last().is_none()); - assert!(bitvec.last_mut().is_none()); -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let bv1 = bitvec_600(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&bv1, &mut KeyPtr::from(root_key)); - // Load the pushed storage vector into another instance and check that - // both instances are equal: - let bv2 = - ::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(bv1, bv2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let bv1 = bitvec_600(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&bv1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `bv1` and check whether - // loading another instance from this storage will panic since the - // vector's length property cannot read a value: - SpreadLayout::clear_spread(&bv1, &mut KeyPtr::from(root_key)); - let _ = ::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/hashmap/fuzz_tests.rs b/crates/storage/src/collections/hashmap/fuzz_tests.rs deleted file mode 100644 index add82c4870..0000000000 --- a/crates/storage/src/collections/hashmap/fuzz_tests.rs +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The fuzz tests are testing complex types. -#![allow(clippy::type_complexity)] - -use super::HashMap as StorageHashMap; -use crate::{ - test_utils::FuzzCollection, - traits::{ - KeyPtr, - PackedLayout, - SpreadLayout, - }, - Pack, -}; -use ink_primitives::Key; -use itertools::Itertools; -use quickcheck::{ - Arbitrary, - Gen, -}; -use std::collections::HashMap; - -/// Conducts repeated insert and remove operations into the map by iterating -/// over `xs`. For each odd `x` in `xs` a defined number of insert operations -/// (`inserts_each`) is executed. For each even `x` it's asserted that the -/// previously inserted elements are in the map, and they are removed subsequently. -/// -/// The reasoning behind this even/odd sequence is to introduce some -/// randomness into when elements are inserted/removed. -/// -/// `inserts_each` was chosen as `u8` to keep the number of inserts per `x` in -/// a reasonable range. -fn insert_and_remove(xs: Vec, inserts_each: u8) -> StorageHashMap { - let mut map = >::new(); - let mut cnt_inserts = 0; - let mut previous_even_x = None; - let inserts_each = inserts_each as i32; - - for x in 0..xs.len() as i32 { - if x % 2 == 0 { - // On even numbers we insert - for key in x..x + inserts_each { - let val = key.saturating_mul(10); - if map.insert(key, val).is_none() { - assert_eq!(map.get(&key), Some(&val)); - cnt_inserts += 1; - } - assert_eq!(map.len(), cnt_inserts); - } - if previous_even_x.is_none() { - previous_even_x = Some(x); - } - } else if previous_even_x.is_some() { - // If it's an odd number and we inserted in a previous run we assert - // that the last insert worked correctly and remove the elements again. - // - // It can happen that after one insert run there are many more - // insert runs (i.e. even `x` in `xs`) before we remove the numbers - // of the last run again. This is intentional, as to include testing - // if subsequent insert operations have an effect on already inserted - // items. - let x = previous_even_x.unwrap(); - for key in x..x + inserts_each { - let val = key.saturating_mul(10); - assert_eq!(map.get(&key), Some(&val)); - assert_eq!(map.take(&key), Some(val)); - assert_eq!(map.get(&key), None); - cnt_inserts -= 1; - assert_eq!(map.len(), cnt_inserts); - } - previous_even_x = None; - } - } - map -} - -#[quickcheck] -fn fuzz_inserts_and_removes(xs: Vec, inserts_each: u8) { - ink_env::test::run_test::(|_| { - let _ = insert_and_remove(xs, inserts_each); - Ok(()) - }) - .unwrap() -} - -/// Inserts all elements from `xs`. Then removes each `xth` element from the map -/// and asserts that all non-`xth` elements are still in the map. -#[quickcheck] -fn fuzz_removes(xs: Vec, xth: usize) { - ink_env::test::run_test::(|_| { - // given - let xs: Vec = xs.into_iter().unique().collect(); - let xth = xth.max(1); - let mut map = >::new(); - let mut len = map.len(); - - // when - // 1) insert all - for x in 0..xs.len() { - let i = xs.get(x).expect( - "x is always in bounds since we iterate over the vec length; qed", - ); - assert_eq!(map.insert(*i, i.saturating_mul(10)), None); - len += 1; - assert_eq!(map.len(), len); - } - - // 2) remove every `xth` element of `xs` from the map - for x in 0..xs.len() { - if x % xth == 0 { - let i = xs.get(x).expect( - "x is always in bounds since we iterate over the vec length; qed", - ); - assert_eq!(map.take(i), Some(i.saturating_mul(10))); - len -= 1; - } - assert_eq!(map.len(), len); - } - - // then - // everything else must still be get-able - for x in 0..xs.len() { - if x % xth != 0 { - let i = xs.get(x).expect( - "x is always in bounds since we iterate over the vec length; qed", - ); - assert_eq!(map.get(i), Some(&(i.saturating_mul(10)))); - } - } - - Ok(()) - }) - .unwrap() -} - -#[quickcheck] -fn fuzz_defrag(xs: Vec, inserts_each: u8) { - ink_env::test::run_test::(|_| { - // Create a `HashMap` and execute some pseudo-randomized - // insert/remove operations on it. - let mut map = insert_and_remove(xs, inserts_each); - - // Build a collection of the keys/values in this hash map - let kv_pairs: Vec<(i32, i32)> = map - .keys - .iter() - .map(|key| { - ( - key.to_owned(), - map.get(key).expect("value must exist").to_owned(), - ) - }) - .collect(); - assert_eq!(map.len(), kv_pairs.len() as u32); - - // Then defragment the hash map - map.defrag(None); - - // Then we push the defragmented hash map to storage and pull it again - let root_key = Key::from([0x00; 32]); - SpreadLayout::push_spread(&map, &mut KeyPtr::from(root_key)); - let map2: StorageHashMap = - SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); - - // Assert that everything that should be is still in the hash map - assert_eq!(map2.len(), kv_pairs.len() as u32); - for (key, val) in kv_pairs { - assert_eq!(map2.get(&key), Some(&val)); - } - - Ok(()) - }) - .unwrap() -} - -impl Arbitrary for StorageHashMap -where - K: Arbitrary + Ord + PackedLayout + Send + Clone + std::hash::Hash + 'static, - V: Arbitrary + PackedLayout + Send + Clone + 'static, -{ - fn arbitrary(g: &mut Gen) -> StorageHashMap { - let hmap = HashMap::::arbitrary(g); - StorageHashMap::::from_iter(hmap) - } -} - -impl Clone for StorageHashMap -where - K: Ord + PackedLayout + Clone + std::hash::Hash, - V: PackedLayout + Clone, -{ - fn clone(&self) -> Self { - let mut shmap = StorageHashMap::::new(); - self.iter().for_each(|(k, v)| { - let _ = shmap.insert(k.clone(), v.clone()); - }); - shmap - } -} - -impl<'a, K, V> FuzzCollection for &'a mut StorageHashMap -where - V: Clone + PackedLayout + 'a, - K: PackedLayout + Ord + Clone + 'a, -{ - type Collection = StorageHashMap; - type Item = (&'a K, &'a mut V); - - /// Makes `self` equal to `instance2` by executing a series of operations - /// on `self`. - fn equalize(&mut self, instance2: &Self::Collection) { - let hmap_keys = self.keys().cloned().collect::>(); - for k in hmap_keys { - if !instance2.contains_key(&k) { - let _ = self.take(&k); - } - } - - let template_keys = instance2.keys().cloned(); - for k in template_keys { - if let Some(template_val) = instance2.get(&k) { - let _ = self.insert(k, template_val.clone()); - } - } - } - - /// `item` is an item from the hash map. We check if `item.key` is - /// in `self` and if existent assign its value to `item.value` - /// of `self` and assign it to `val`. - /// - /// Hence this method only might modify values of `item`, leaving - /// others intact. - fn assign(&mut self, item: Self::Item) { - let (key, value) = item; - if let Some(existent_value) = self.get(key) { - *value = existent_value.clone(); - } - } -} - -crate::fuzz_storage!("hashmap_1", StorageHashMap); -crate::fuzz_storage!("hashmap_2", StorageHashMap>>>); -crate::fuzz_storage!( - "hashmap_3", - StorageHashMap>, Option>>> -); -crate::fuzz_storage!( - "hashmap_4", - StorageHashMap, (bool, (u32, u128))> -); -crate::fuzz_storage!("hashmap_5", StorageHashMap, u32)>); diff --git a/crates/storage/src/collections/hashmap/impls.rs b/crates/storage/src/collections/hashmap/impls.rs deleted file mode 100644 index 6989843ed2..0000000000 --- a/crates/storage/src/collections/hashmap/impls.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - HashMap as StorageHashMap, - Iter, - IterMut, -}; -use crate::traits::PackedLayout; -use core::{ - cmp::{ - Eq, - Ord, - PartialEq, - }, - ops, -}; -use ink_env::hash::{ - CryptoHash, - HashOutput, -}; -use ink_prelude::borrow::{ - Borrow, - ToOwned, -}; -use ink_primitives::Key; - -impl Drop for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn drop(&mut self) { - self.clear_cells(); - } -} - -impl Default for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn default() -> Self { - Self::new() - } -} - -impl<'a, K, V, H, Q> ops::Index<&'a Q> for StorageHashMap -where - Q: Ord + scale::Encode + ToOwned, - K: Borrow + Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Output = V; - - fn index(&self, index: &Q) -> &Self::Output { - self.get(index).expect("index out of bounds") - } -} - -impl<'a, K, V, H, Q> ops::IndexMut<&'a Q> for StorageHashMap -where - Q: Ord + scale::Encode + ToOwned, - K: Borrow + Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn index_mut(&mut self, index: &Q) -> &mut Self::Output { - self.get_mut(index).expect("index out of bounds") - } -} - -impl<'a, K: 'a, V: 'a, H> IntoIterator for &'a StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V, H>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, K: 'a, V: 'a, H> IntoIterator for &'a mut StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V, H>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl Extend<(K, V)> for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for (key, value) in iter { - self.insert(key, value); - } - } -} - -impl FromIterator<(K, V)> for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut vec = StorageHashMap::new(); - vec.extend(iter); - vec - } -} - -impl PartialEq for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PartialEq + PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false - } - self.iter() - .map(|(key, value)| (value, other.get(key))) - .all(|(lhs, maybe_rhs)| maybe_rhs.map(|rhs| rhs == lhs).unwrap_or(false)) - } -} - -impl Eq for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: Eq + PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ -} diff --git a/crates/storage/src/collections/hashmap/iter.rs b/crates/storage/src/collections/hashmap/iter.rs deleted file mode 100644 index 90dd3e319c..0000000000 --- a/crates/storage/src/collections/hashmap/iter.rs +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::ValueEntry; -use crate::{ - collections::{ - extend_lifetime, - stash::Iter as StashIter, - HashMap as StorageHashMap, - }, - lazy::LazyHashMap, - traits::PackedLayout, -}; -use ink_env::hash::{ - CryptoHash, - HashOutput, -}; -use ink_primitives::Key; - -/// An iterator over shared references to the elements of a storage hash map. -#[derive(Debug, Copy, Clone)] -pub struct Iter<'a, K, V, H> -where - K: PackedLayout, -{ - /// The iterator over the map's keys. - keys_iter: StashIter<'a, K>, - /// The lazy hash map to query the values. - values: &'a LazyHashMap, H>, -} - -impl<'a, K, V, H> Iter<'a, K, V, H> -where - K: Ord + Clone + PackedLayout, -{ - /// Creates a new iterator for the given storage hash map. - pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self - where - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, - { - Self { - keys_iter: hash_map.keys.iter(), - values: &hash_map.values, - } - } -} - -impl<'a, K, V, H> Iter<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Queries the value for the given key and returns the key/value pair. - /// - /// # Panics - /// - /// If the key refers to an invalid element. - fn query_value(&self, key: &'a K) -> ::Item { - let entry = self - .values - .get(key) - .expect("a key must always refer to an existing entry"); - (key, &entry.value) - } -} - -impl<'a, K, V, H> Iterator for Iter<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = (&'a K, &'a V); - - fn count(self) -> usize { - self.keys_iter.count() - } - - fn next(&mut self) -> Option { - let key = self.keys_iter.next()?; - Some(self.query_value(key)) - } - - fn size_hint(&self) -> (usize, Option) { - self.keys_iter.size_hint() - } -} - -impl<'a, K, V, H> ExactSizeIterator for Iter<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ -} - -impl<'a, K, V, H> DoubleEndedIterator for Iter<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn next_back(&mut self) -> Option { - let key = self.keys_iter.next_back()?; - Some(self.query_value(key)) - } -} - -/// An iterator over shared references to the elements of a storage hash map. -#[derive(Debug)] -pub struct IterMut<'a, K, V, H> -where - K: PackedLayout, -{ - /// The iterator over the map's keys. - keys_iter: StashIter<'a, K>, - /// The lazy hash map to query the values. - values: &'a mut LazyHashMap, H>, -} - -impl<'a, K, V, H> IterMut<'a, K, V, H> -where - K: Ord + Clone + PackedLayout, -{ - /// Creates a new iterator for the given storage hash map. - pub(crate) fn new(hash_map: &'a mut StorageHashMap) -> Self - where - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, - { - Self { - keys_iter: hash_map.keys.iter(), - values: &mut hash_map.values, - } - } -} - -impl<'a, K, V, H> IterMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Queries the value for the given key and returns the key/value pair. - /// - /// # Panics - /// - /// If the key refers to an invalid element. - fn query_value<'b>(&'b mut self, key: &'a K) -> ::Item { - let entry = self - .values - .get_mut(key) - .expect("a key must always refer to an existing entry"); - (key, unsafe { - extend_lifetime::<'b, 'a, V>(&mut entry.value) - }) - } -} - -impl<'a, K, V, H> Iterator for IterMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = (&'a K, &'a mut V); - - fn count(self) -> usize { - self.keys_iter.count() - } - - fn next(&mut self) -> Option { - let key = self.keys_iter.next()?; - Some(self.query_value(key)) - } - - fn size_hint(&self) -> (usize, Option) { - self.keys_iter.size_hint() - } -} - -impl<'a, K, V, H> ExactSizeIterator for IterMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ -} - -impl<'a, K, V, H> DoubleEndedIterator for IterMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn next_back(&mut self) -> Option { - let key = self.keys_iter.next_back()?; - Some(self.query_value(key)) - } -} - -/// An iterator over shared references to the values of a storage hash map. -#[derive(Debug, Copy, Clone)] -pub struct Values<'a, K, V, H> -where - K: PackedLayout, -{ - /// The key/values pair iterator. - iter: Iter<'a, K, V, H>, -} - -impl<'a, K, V, H> Values<'a, K, V, H> -where - K: Ord + Clone + PackedLayout, -{ - /// Creates a new iterator for the given storage hash map. - pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self - where - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, - { - Self { - iter: hash_map.iter(), - } - } -} - -impl<'a, K, V, H> Iterator for Values<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = &'a V; - - fn count(self) -> usize { - self.iter.count() - } - - fn next(&mut self) -> Option { - self.iter.next().map(|(_key, value)| value) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, K, V, H> ExactSizeIterator for Values<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ -} - -impl<'a, K, V, H> DoubleEndedIterator for Values<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|(_key, value)| value) - } -} - -/// An iterator over exclusive references to the values of a storage hash map. -#[derive(Debug)] -pub struct ValuesMut<'a, K, V, H> -where - K: PackedLayout, -{ - /// The key/values pair iterator. - iter: IterMut<'a, K, V, H>, -} - -impl<'a, K, V, H> ValuesMut<'a, K, V, H> -where - K: Ord + Clone + PackedLayout, -{ - /// Creates a new iterator for the given storage hash map. - pub(crate) fn new(hash_map: &'a mut StorageHashMap) -> Self - where - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, - { - Self { - iter: hash_map.iter_mut(), - } - } -} - -impl<'a, K, V, H> Iterator for ValuesMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - type Item = &'a mut V; - - fn count(self) -> usize { - self.iter.count() - } - - fn next(&mut self) -> Option { - self.iter.next().map(|(_key, value)| value) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, K, V, H> ExactSizeIterator for ValuesMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ -} - -impl<'a, K, V, H> DoubleEndedIterator for ValuesMut<'a, K, V, H> -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|(_key, value)| value) - } -} - -/// An iterator over references to the keys of a storage hash map. -#[derive(Debug, Copy, Clone)] -pub struct Keys<'a, K> -where - K: PackedLayout, -{ - /// The key iterator. - iter: StashIter<'a, K>, -} - -impl<'a, K> Keys<'a, K> -where - K: Ord + Clone + PackedLayout, -{ - /// Creates a new iterator for the given storage hash map. - pub(crate) fn new(hash_map: &'a StorageHashMap) -> Self - where - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, - { - Self { - iter: hash_map.keys.iter(), - } - } -} - -impl<'a, K> Iterator for Keys<'a, K> -where - K: PackedLayout, -{ - type Item = &'a K; - - fn count(self) -> usize { - self.iter.count() - } - - fn next(&mut self) -> Option { - self.iter.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, K> ExactSizeIterator for Keys<'a, K> where K: PackedLayout {} - -impl<'a, K> DoubleEndedIterator for Keys<'a, K> -where - K: PackedLayout, -{ - fn next_back(&mut self) -> Option { - self.iter.next_back() - } -} diff --git a/crates/storage/src/collections/hashmap/mod.rs b/crates/storage/src/collections/hashmap/mod.rs deleted file mode 100644 index c235cea22a..0000000000 --- a/crates/storage/src/collections/hashmap/mod.rs +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A storage hash map that allows to associate keys with values. - -mod impls; -mod iter; -mod storage; - -#[cfg(test)] -mod tests; - -#[cfg(all(test, feature = "ink-fuzz-tests"))] -mod fuzz_tests; - -pub use self::iter::{ - Iter, - IterMut, - Keys, - Values, - ValuesMut, -}; -use crate::{ - collections::Stash, - lazy::lazy_hmap::{ - Entry as LazyEntry, - LazyHashMap, - OccupiedEntry as LazyOccupiedEntry, - VacantEntry as LazyVacantEntry, - }, - traits::PackedLayout, -}; -use core::{ - borrow::Borrow, - cmp::Eq, -}; -use ink_env::hash::{ - Blake2x256, - CryptoHash, - HashOutput, -}; -use ink_prelude::borrow::ToOwned; -use ink_primitives::Key; - -/// The index type within a hashmap. -/// -/// # Note -/// -/// Used for key indices internal to the hashmap. -type KeyIndex = u32; - -/// A hash map operating on the contract storage. -/// -/// Stores a mapping between keys and values. -/// -/// # Note -/// -/// Unlike Rust's standard `HashMap` that uses the [`core::hash::Hash`] trait -/// in order to hash its keys the storage hash map uses the [`scale::Encode`] -/// encoding in order to hash its keys using a built-in cryptographic -/// hash function provided by the chain runtime. -/// -/// The main difference between the lower-level `LazyHashMap` and the -/// `storage::HashMap` is that the latter is aware of its associated keys and -/// values and operates on those instances directly as opposed to `Option` -/// instances of them. Also it provides a more high-level and user focused -/// API. -/// -/// Users should generally prefer using this storage hash map over the low-level -/// `LazyHashMap` for direct usage in their smart contracts. -#[derive(Debug)] -pub struct HashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// The keys of the storage hash map. - keys: Stash, - /// The values of the storage hash map. - values: LazyHashMap, H>, -} - -/// An entry within the storage hash map. -/// -/// Stores the value as well as the index to its associated key. -#[derive(Debug, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -struct ValueEntry { - /// The value stored in this entry. - value: V, - /// The index of the key associated with this value. - key_index: KeyIndex, -} - -/// An occupied entry that holds the value. -pub struct OccupiedEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// A reference to the `Stash` instance, containing the keys. - keys: &'a mut Stash, - /// The `LazyHashMap::OccupiedEntry`. - values_entry: LazyOccupiedEntry<'a, K, ValueEntry>, -} - -/// A vacant entry with previous and next vacant indices. -pub struct VacantEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// A reference to the `Stash` instance, containing the keys. - keys: &'a mut Stash, - /// The `LazyHashMap::VacantEntry`. - values_entry: LazyVacantEntry<'a, K, ValueEntry>, -} - -/// An entry within the stash. -/// -/// The vacant entries within a storage stash form a doubly linked list of -/// vacant entries that is used to quickly re-use their vacant storage. -pub enum Entry<'a, K: 'a, V: 'a> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// A vacant entry that holds the index to the next and previous vacant entry. - Vacant(VacantEntry<'a, K, V>), - /// An occupied entry that holds the value. - Occupied(OccupiedEntry<'a, K, V>), -} - -impl HashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Creates a new empty storage hash map. - pub fn new() -> Self { - Self { - keys: Stash::new(), - values: LazyHashMap::new(), - } - } - - /// Returns the number of key-value pairs stored in the hash map. - pub fn len(&self) -> u32 { - self.keys.len() - } - - /// Returns the number of key-value pairs stored in the cache. - #[cfg(test)] - pub(crate) fn len_cached_entries(&self) -> u32 { - self.keys.len() - } - - /// Returns `true` if the hash map is empty. - pub fn is_empty(&self) -> bool { - self.keys.is_empty() - } - - /// Returns an iterator yielding shared references to all key/value pairs - /// of the hash map. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage hash maps. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - Iter::new(self) - } - - /// Returns an iterator yielding exclusive references to all key/value pairs - /// of the hash map. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage hash maps. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter_mut(&mut self) -> IterMut { - IterMut::new(self) - } - - /// Returns an iterator yielding shared references to all values of the hash map. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage hash maps. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn values(&self) -> Values { - Values::new(self) - } - - /// Returns an iterator yielding shared references to all values of the hash map. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage hash maps. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn values_mut(&mut self) -> ValuesMut { - ValuesMut::new(self) - } - - /// Returns an iterator yielding shared references to all keys of the hash map. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage hash maps. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn keys(&self) -> Keys { - Keys::new(self) - } -} - -impl HashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn clear_cells(&self) { - if self.values.key().is_none() { - // We won't clear any storage if we are in lazy state since there - // probably has not been any state written to storage, yet. - return - } - for key in self.keys() { - // It might seem wasteful to clear all entries instead of just - // the occupied ones. However this spares us from having one extra - // read for every element in the storage stash to filter out vacant - // entries. So this is actually a trade-off and at the time of this - // implementation it is unclear which path is more efficient. - // - // The bet is that clearing a storage cell is cheaper than reading one. - self.values.clear_packed_at(key); - } - } -} - -impl HashMap -where - K: Ord + Eq + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Inserts a key-value pair into the map. - /// - /// Returns the previous value associated with the same key if any. - /// If the map did not have this key present, `None` is returned. - /// - /// # Note - /// - /// - If the map did have this key present, the value is updated, - /// and the old value is returned. The key is not updated, though; - /// this matters for types that can be `==` without being identical. - pub fn insert(&mut self, key: K, new_value: V) -> Option { - if let Some(occupied) = self.values.get_mut(&key) { - // Update value, don't update key. - let old_value = core::mem::replace(&mut occupied.value, new_value); - return Some(old_value) - } - // At this point we know that `key` does not yet exist in the map. - let key_index = self.keys.put(key.to_owned()); - self.values.put( - key, - Some(ValueEntry { - value: new_value, - key_index, - }), - ); - None - } - - /// Removes the key/value pair from the map associated with the given key. - /// - /// - Returns the removed value if any. - /// - /// # Note - /// - /// The key may be any borrowed form of the map's key type, - /// but `Hash` and `Eq` on the borrowed form must match those for the key type. - pub fn take(&mut self, key: &Q) -> Option - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - let entry = self.values.put_get(key, None)?; - self.keys - .take(entry.key_index) - .expect("`key_index` must point to a valid key entry"); - Some(entry.value) - } - - /// Returns a shared reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, - /// but `Hash` and `Eq` on the borrowed form must match those for the key type. - pub fn get(&self, key: &Q) -> Option<&V> - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - self.values.get(key).map(|entry| &entry.value) - } - - /// Returns a mutable reference to the value corresponding to the key. - /// - /// The key may be any borrowed form of the map's key type, - /// but `Hash` and `Eq` on the borrowed form must match those for the key type. - pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - self.values.get_mut(key).map(|entry| &mut entry.value) - } - - /// Returns `true` if there is an entry corresponding to the key in the map. - pub fn contains_key(&self, key: &Q) -> bool - where - K: Borrow, - Q: Ord + PartialEq + Eq + scale::Encode + ToOwned, - { - // We do not check if the given key is equal to the queried key which is - // what normally a hash map implementation does because we do not resolve - // or prevent collisions in this hash map implementation at any level. - // Having a collision is virtually impossible since we - // are using a keyspace of `2^256` bit. - self.values.get(key).is_some() - } - - /// Defragments storage used by the storage hash map. - /// - /// Returns the number of storage cells freed this way. - /// - /// A `max_iterations` parameter of `None` means that there is no limit - /// to the number of iterations performed. This is generally not advised. - /// - /// # Note - /// - /// This frees storage that is held but not necessary for the hash map to hold. - /// This operation might be expensive, especially for big `max_iteration` - /// parameters. The `max_iterations` parameter can be used to limit the - /// expensiveness for this operation and instead free up storage incrementally. - pub fn defrag(&mut self, max_iterations: Option) -> u32 { - // This method just defrags the underlying `storage::Stash` used to - // store the keys as it can sometimes take a lot of unused storage - // if many keys have been removed at some point. Some hash map - // implementations might even prefer to perform this operation with a - // limit set to 1 after every successful removal. - if let Some(0) = max_iterations { - // Bail out early if the iteration limit is set to 0 anyways to - // completely avoid doing work in this case. - return 0 - } - let len_vacant = self.keys.capacity() - self.keys.len(); - let max_iterations = max_iterations.unwrap_or(len_vacant); - let values = &mut self.values; - let callback = |old_index, new_index, key: &K| { - let value_entry = values.get_mut(key).expect("key must be valid"); - debug_assert_eq!(value_entry.key_index, old_index); - value_entry.key_index = new_index; - }; - self.keys.defrag(Some(max_iterations), callback) - } - - /// Gets the given key's corresponding entry in the map for in-place manipulation. - pub fn entry(&mut self, key: K) -> Entry { - let entry = self.values.entry(key); - match entry { - LazyEntry::Occupied(o) => { - Entry::Occupied(OccupiedEntry { - keys: &mut self.keys, - values_entry: o, - }) - } - LazyEntry::Vacant(v) => { - Entry::Vacant(VacantEntry { - keys: &mut self.keys, - values_entry: v, - }) - } - } - } -} - -impl<'a, K, V> Entry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout + core::fmt::Debug + core::cmp::Eq + Default, -{ - /// Returns a reference to this entry's key. - pub fn key(&self) -> &K { - match self { - Entry::Occupied(entry) => entry.values_entry.key(), - Entry::Vacant(entry) => entry.values_entry.key(), - } - } - - /// Ensures a value is in the entry by inserting the default value if empty, and returns - /// a reference to the value in the entry. - pub fn or_default(self) -> &'a V { - match self { - Entry::Occupied(entry) => &mut entry.values_entry.into_mut().value, - Entry::Vacant(entry) => entry.insert(V::default()), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// a mutable reference to the value in the entry. - pub fn or_insert(self, default: V) -> &'a mut V { - match self { - Entry::Occupied(entry) => &mut entry.values_entry.into_mut().value, - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns mutable references to the key and value in the entry. - pub fn or_insert_with(self, default: F) -> &'a mut V - where - F: FnOnce() -> V, - { - match self { - Entry::Occupied(entry) => &mut entry.values_entry.into_mut().value, - Entry::Vacant(entry) => Entry::insert(default(), entry), - } - } - - /// Ensures a value is in the entry by inserting, if empty, the result of the default - /// function, which takes the key as its argument, and returns a mutable reference to - /// the value in the entry. - pub fn or_insert_with_key(self, default: F) -> &'a mut V - where - F: FnOnce(&K) -> V, - { - match self { - Entry::Occupied(entry) => &mut entry.values_entry.into_mut().value, - Entry::Vacant(entry) => Entry::insert(default(entry.key()), entry), - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - #[must_use] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Entry::Occupied(mut entry) => { - { - let v = entry.values_entry.get_mut(); - f(&mut v.value); - } - Entry::Occupied(entry) - } - Entry::Vacant(entry) => Entry::Vacant(entry), - } - } - - /// Inserts `value` into `entry`. - fn insert(value: V, entry: VacantEntry<'a, K, V>) -> &'a mut V { - entry.insert(value) - } -} - -impl<'a, K, V> VacantEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// Gets a reference to the key that would be used when inserting a value through the `VacantEntry`. - pub fn key(&self) -> &K { - self.values_entry.key() - } - - /// Take ownership of the key. - pub fn into_key(self) -> K { - self.values_entry.into_key() - } - - /// Sets the value of the entry with the `VacantEntry`s key, and returns a mutable reference to it. - pub fn insert(self, value: V) -> &'a mut V { - // At this point we know that `key` does not yet exist in the map. - let key_index = self.keys.put(self.key().to_owned()); - &mut self - .values_entry - .insert(ValueEntry { value, key_index }) - .value - } -} - -impl<'a, K, V> OccupiedEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// Gets a reference to the key in the entry. - pub fn key(&self) -> &K { - self.values_entry.key() - } - - /// Take the ownership of the key and value from the map. - pub fn remove_entry(self) -> (K, V) { - let k = self.values_entry.key().to_owned(); - let v = self.values_entry.remove(); - self.keys - .take(v.key_index) - .expect("`key_index` must point to a valid key entry"); - (k, v.value) - } - - /// Gets a reference to the value in the entry. - pub fn get(&self) -> &V { - &self.values_entry.get().value - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `OccupiedEntry` which may outlive the destruction of the - /// `Entry` value, see `into_mut`. - pub fn get_mut(&mut self) -> &mut V { - &mut self.values_entry.get_mut().value - } - - /// Sets the value of the entry, and returns the entry's old value. - pub fn insert(&mut self, new_value: V) -> V { - core::mem::replace(&mut self.values_entry.get_mut().value, new_value) - } - - /// Takes the value out of the entry, and returns it. - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - pub fn into_mut(self) -> &'a mut V { - &mut self.values_entry.into_mut().value - } -} diff --git a/crates/storage/src/collections/hashmap/storage.rs b/crates/storage/src/collections/hashmap/storage.rs deleted file mode 100644 index a80e9f18a1..0000000000 --- a/crates/storage/src/collections/hashmap/storage.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of ink! storage traits. - -use super::{ - HashMap as StorageHashMap, - ValueEntry, -}; -use crate::{ - collections::Stash as StorageStash, - traits::{ - forward_clear_packed, - forward_pull_packed, - forward_push_packed, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, -}; -use ink_env::hash::{ - CryptoHash, - HashOutput, -}; -use ink_primitives::Key; - -#[cfg(feature = "std")] -const _: () = { - use crate::{ - lazy::LazyHashMap, - traits::{ - LayoutCryptoHasher, - StorageLayout, - }, - }; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for StorageHashMap - where - K: TypeInfo + Ord + Clone + PackedLayout + 'static, - V: TypeInfo + PackedLayout + 'static, - H: LayoutCryptoHasher + CryptoHash, - Key: From<::Type>, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new( - "keys", - as StorageLayout>::layout(key_ptr), - ), - FieldLayout::new( - "values", - , H> as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for ValueEntry -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl PackedLayout for ValueEntry -where - T: PackedLayout, -{ - fn pull_packed(&mut self, at: &Key) { - ::pull_packed(&mut self.value, at) - } - - fn push_packed(&self, at: &Key) { - ::push_packed(&self.value, at) - } - - fn clear_packed(&self, at: &Key) { - ::clear_packed(&self.value, at) - } -} - -impl SpreadLayout for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - keys: SpreadLayout::pull_spread(ptr), - values: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.keys, ptr); - SpreadLayout::push_spread(&self.values, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - self.clear_cells(); - SpreadLayout::clear_spread(&self.keys, ptr); - SpreadLayout::clear_spread(&self.values, ptr); - } -} - -impl SpreadAllocate for StorageHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - keys: SpreadAllocate::allocate_spread(ptr), - values: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/hashmap/tests.rs b/crates/storage/src/collections/hashmap/tests.rs deleted file mode 100644 index 10eb787664..0000000000 --- a/crates/storage/src/collections/hashmap/tests.rs +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::HashMap as StorageHashMap; -use crate::{ - traits::{ - KeyPtr, - SpreadLayout, - }, - Lazy, -}; -use ink_primitives::Key; - -/// Returns always the same `KeyPtr`. -fn key_ptr() -> KeyPtr { - let root_key = Key::from([0x42; 32]); - KeyPtr::from(root_key) -} - -/// Pushes a `HashMap` instance into the contract storage. -fn push_hmap(hmap: &StorageHashMap) { - SpreadLayout::push_spread(hmap, &mut key_ptr()); -} - -/// Pulls a `HashMap` instance from the contract storage. -fn pull_hmap() -> StorageHashMap { - as SpreadLayout>::pull_spread(&mut key_ptr()) -} - -fn filled_hmap() -> StorageHashMap { - [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] - .iter() - .copied() - .collect::>() -} - -#[test] -fn new_works() { - // `StorageHashMap::new` - let hmap = >::new(); - assert!(hmap.is_empty()); - assert_eq!(hmap.len(), 0); - assert!(hmap.iter().next().is_none()); - // `StorageHashMap::default` - let default = as Default>::default(); - assert!(default.is_empty()); - assert_eq!(default.len(), 0); - assert!(default.iter().next().is_none()); - // `StorageHashMap::new` and `StorageHashMap::default` should be equal. - assert_eq!(hmap, default); -} - -#[test] -fn from_iterator_works() { - let test_values = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)]; - let hmap = test_values - .iter() - .copied() - .collect::>(); - assert!(!hmap.is_empty()); - assert_eq!(hmap.len(), 4); - assert_eq!(hmap, { - let mut hmap = >::new(); - for (key, value) in &test_values { - assert_eq!(hmap.insert(*key, *value), None); - } - hmap - }); -} - -#[test] -fn from_empty_iterator_works() { - assert_eq!( - [].iter().copied().collect::>(), - >::new(), - ); -} - -#[test] -fn contains_key_works() { - // Empty hash map. - let hmap = >::new(); - assert!(!hmap.contains_key(&b'A')); - assert!(!hmap.contains_key(&b'E')); - // Filled hash map. - let hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] - .iter() - .copied() - .collect::>(); - assert!(hmap.contains_key(&b'A')); - assert!(hmap.contains_key(&b'B')); - assert!(hmap.contains_key(&b'C')); - assert!(hmap.contains_key(&b'D')); - assert!(!hmap.contains_key(&b'E')); -} - -#[test] -fn get_works() { - // Empty hash map. - let hmap = >::new(); - assert_eq!(hmap.get(&b'A'), None); - assert_eq!(hmap.get(&b'E'), None); - // Filled hash map: `get` - let hmap = filled_hmap(); - assert_eq!(hmap.get(&b'A'), Some(&1)); - assert_eq!(hmap.get(&b'B'), Some(&2)); - assert_eq!(hmap.get(&b'C'), Some(&3)); - assert_eq!(hmap.get(&b'D'), Some(&4)); - assert_eq!(hmap.get(&b'E'), None); - // Filled hash map: `get_mut` - let mut hmap = hmap; - assert_eq!(hmap.get_mut(&b'A'), Some(&mut 1)); - assert_eq!(hmap.get_mut(&b'B'), Some(&mut 2)); - assert_eq!(hmap.get_mut(&b'C'), Some(&mut 3)); - assert_eq!(hmap.get_mut(&b'D'), Some(&mut 4)); - assert_eq!(hmap.get_mut(&b'E'), None); -} - -#[test] -fn insert_works() { - let mut hmap = >::new(); - // Start with an empty hash map. - assert_eq!(hmap.len(), 0); - assert_eq!(hmap.get(&b'A'), None); - // Insert first value. - hmap.insert(b'A', 1); - assert_eq!(hmap.len(), 1); - assert_eq!(hmap.get(&b'A'), Some(&1)); - assert_eq!(hmap.get_mut(&b'A'), Some(&mut 1)); - // Update the inserted value. - hmap.insert(b'A', 2); - assert_eq!(hmap.len(), 1); - assert_eq!(hmap.get(&b'A'), Some(&2)); - assert_eq!(hmap.get_mut(&b'A'), Some(&mut 2)); - // Insert another value. - hmap.insert(b'B', 3); - assert_eq!(hmap.len(), 2); - assert_eq!(hmap.get(&b'B'), Some(&3)); - assert_eq!(hmap.get_mut(&b'B'), Some(&mut 3)); -} - -#[test] -fn take_works() { - // Empty hash map. - let mut hmap = >::new(); - assert_eq!(hmap.take(&b'A'), None); - assert_eq!(hmap.take(&b'E'), None); - // Filled hash map: `get` - let mut hmap = filled_hmap(); - assert_eq!(hmap.len(), 4); - assert_eq!(hmap.take(&b'A'), Some(1)); - assert_eq!(hmap.len(), 3); - assert_eq!(hmap.take(&b'A'), None); - assert_eq!(hmap.len(), 3); - assert_eq!(hmap.take(&b'B'), Some(2)); - assert_eq!(hmap.len(), 2); - assert_eq!(hmap.take(&b'C'), Some(3)); - assert_eq!(hmap.len(), 1); - assert_eq!(hmap.take(&b'D'), Some(4)); - assert_eq!(hmap.len(), 0); - assert_eq!(hmap.take(&b'E'), None); - assert_eq!(hmap.len(), 0); -} - -#[test] -fn iter_next_works() { - let hmap = filled_hmap(); - // Test iterator over shared references: - let mut iter = hmap.iter(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some((&b'A', &1))); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some((&b'B', &2))); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some((&b'C', &3))); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some((&b'D', &4))); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references: - let mut hmap = hmap; - let mut iter = hmap.iter_mut(); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some((&b'A', &mut 1))); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some((&b'B', &mut 2))); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.next(), Some((&b'C', &mut 3))); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some((&b'D', &mut 4))); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn values_next_works() { - let hmap = filled_hmap(); - // Test iterator over shared references: - let mut iter = hmap.values(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some(&1)); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some(&2)); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&3)); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some(&4)); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references: - let mut hmap = hmap; - let mut iter = hmap.values_mut(); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some(&mut 1)); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some(&mut 2)); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.next(), Some(&mut 3)); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some(&mut 4)); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn keys_next_works() { - let hmap = filled_hmap(); - let mut iter = hmap.keys(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some(&b'A')); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&b'C')); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some(&b'D')); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); -} - -#[test] -fn defrag_works() { - let expected = [(b'A', 1), (b'D', 4)] - .iter() - .copied() - .collect::>(); - // Defrag without limits: - let mut hmap = filled_hmap(); - assert_eq!(hmap.defrag(None), 0); - assert_eq!(hmap.take(&b'B'), Some(2)); - assert_eq!(hmap.take(&b'C'), Some(3)); - assert_eq!(hmap.defrag(None), 2); - assert_eq!(hmap.defrag(None), 0); - assert_eq!(hmap, expected); - // Defrag with limits: - let mut hmap = [(b'A', 1), (b'B', 2), (b'C', 3), (b'D', 4)] - .iter() - .copied() - .collect::>(); - assert_eq!(hmap.defrag(None), 0); - assert_eq!(hmap.take(&b'B'), Some(2)); - assert_eq!(hmap.take(&b'C'), Some(3)); - assert_eq!(hmap.defrag(Some(1)), 1); - assert_eq!(hmap.defrag(Some(1)), 1); - assert_eq!(hmap.defrag(Some(1)), 0); - assert_eq!(hmap, expected); -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let hmap1 = filled_hmap(); - push_hmap(&hmap1); - // Load the pushed storage hmap into another instance and check that - // both instances are equal: - let hmap2 = pull_hmap(); - assert_eq!(hmap1, hmap2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "storage entry was empty")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let hmap1 = filled_hmap(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&hmap1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `hmap1` and check whether - // loading another instance from this storage will panic since the - // hmap's length property cannot read a value: - SpreadLayout::clear_spread(&hmap1, &mut KeyPtr::from(root_key)); - let _ = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - Ok(()) - }) - .unwrap() -} - -#[test] -fn storage_is_cleared_completely_after_pull_lazy() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - let lazy_hmap = Lazy::new(filled_hmap()); - SpreadLayout::push_spread(&lazy_hmap, &mut KeyPtr::from(root_key)); - let pulled_hmap = > as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - - // when - SpreadLayout::clear_spread(&pulled_hmap, &mut KeyPtr::from(root_key)); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - Ok(()) - }) - .unwrap() -} - -#[test] -#[should_panic(expected = "storage entry was empty")] -fn drop_works() { - ink_env::test::run_test::(|_| { - let root_key = Key::from([0x42; 32]); - - // if the setup panics it should not cause the test to pass - let setup_result = std::panic::catch_unwind(|| { - let hmap = filled_hmap(); - SpreadLayout::push_spread(&hmap, &mut KeyPtr::from(root_key)); - let _ = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - // hmap is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - let _ = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/mod.rs b/crates/storage/src/collections/mod.rs deleted file mode 100644 index e75fb8d3f9..0000000000 --- a/crates/storage/src/collections/mod.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! High-level collections used to manage storage entities in the persisted -//! contract storage. -//! -//! Users should generally use these collections in their contracts directly -//! or as building blocks for their collections and algorithms. - -pub mod binary_heap; -pub mod bitstash; -pub mod bitvec; -pub mod hashmap; -pub mod smallvec; -pub mod stash; -pub mod vec; - -#[doc(inline)] -pub use self::{ - binary_heap::BinaryHeap, - bitstash::BitStash, - bitvec::Bitvec, - hashmap::HashMap, - stash::Stash, - vec::Vec, -}; - -#[doc(inline)] -pub use self::smallvec::SmallVec; - -/// Extends the lifetime `'a` to the outliving lifetime `'b` for the given reference. -/// -/// # Note -/// -/// This interface is a bit more constraint than a simple -/// [transmute](`core::mem::transmute`) and therefore preferred -/// for extending lifetimes only. -/// -/// # Safety -/// -/// This function is `unsafe` because lifetimes can be extended beyond the -/// lifetimes of the objects they are referencing and thus potentially create -/// dangling references if not used carefully. -pub(crate) unsafe fn extend_lifetime<'a, 'b: 'a, T>(reference: &'a mut T) -> &'b mut T { - core::mem::transmute::<&'a mut T, &'b mut T>(reference) -} diff --git a/crates/storage/src/collections/smallvec/impls.rs b/crates/storage/src/collections/smallvec/impls.rs deleted file mode 100644 index b9d25f3d39..0000000000 --- a/crates/storage/src/collections/smallvec/impls.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - Iter, - SmallVec, -}; -use crate::traits::PackedLayout; -use core::iter::{ - Extend, - FromIterator, -}; - -impl Drop for SmallVec -where - T: PackedLayout, -{ - fn drop(&mut self) { - self.clear_cells() - } -} - -impl core::ops::Index for SmallVec -where - T: PackedLayout, -{ - type Output = T; - - fn index(&self, index: u32) -> &Self::Output { - match self.get(index) { - Some(value) => value, - None => { - panic!( - "index out of bounds: the len is {} but the index is {}", - self.len(), - index - ) - } - } - } -} - -impl core::ops::IndexMut for SmallVec -where - T: PackedLayout, -{ - fn index_mut(&mut self, index: u32) -> &mut Self::Output { - let len = self.len(); - match self.get_mut(index) { - Some(value) => value, - None => { - panic!( - "index out of bounds: the len is {} but the index is {}", - len, index - ) - } - } - } -} - -impl<'a, T: 'a, const N: usize> IntoIterator for &'a SmallVec -where - T: PackedLayout, -{ - type Item = &'a T; - type IntoIter = Iter<'a, T, N>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl Extend for SmallVec -where - T: PackedLayout, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.push(item) - } - } -} - -impl FromIterator for SmallVec -where - T: PackedLayout, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut vec = SmallVec::new(); - vec.extend(iter); - vec - } -} - -impl core::cmp::PartialEq for SmallVec -where - T: PartialEq + PackedLayout, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false - } - self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) - } -} - -impl core::cmp::Eq for SmallVec where T: Eq + PackedLayout {} diff --git a/crates/storage/src/collections/smallvec/iter.rs b/crates/storage/src/collections/smallvec/iter.rs deleted file mode 100644 index 48060f13a3..0000000000 --- a/crates/storage/src/collections/smallvec/iter.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::SmallVec; -use crate::{ - collections::extend_lifetime, - traits::PackedLayout, -}; - -/// An iterator over shared references to the elements of a small storage vector. -#[derive(Debug, Clone, Copy)] -pub struct Iter<'a, T, const N: usize> -where - T: PackedLayout, -{ - /// The storage vector to iterate over. - vec: &'a SmallVec, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T, const N: usize> Iter<'a, T, N> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage vector. - pub(crate) fn new(vec: &'a SmallVec) -> Self { - Self { - vec, - begin: 0, - end: vec.len(), - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.end - self.begin - } -} - -impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> -where - T: PackedLayout, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } - - fn nth(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin + n >= self.end { - return None - } - let cur = self.begin + n; - self.begin += 1 + n; - self.vec.get(cur).expect("access is within bounds").into() - } -} - -impl<'a, T, const N: usize> ExactSizeIterator for Iter<'a, T, N> where T: PackedLayout {} - -impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin >= self.end.saturating_sub(n) { - return None - } - self.end -= 1 + n; - self.vec - .get(self.end) - .expect("access is within bounds") - .into() - } -} - -/// An iterator over exclusive references to the elements of a small storage vector. -#[derive(Debug)] -pub struct IterMut<'a, T, const N: usize> -where - T: PackedLayout, -{ - /// The storage vector to iterate over. - vec: &'a mut SmallVec, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T, const N: usize> IterMut<'a, T, N> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage vector. - pub(crate) fn new(vec: &'a mut SmallVec) -> Self { - let len = vec.len(); - Self { - vec, - begin: 0, - end: len, - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.end - self.begin - } -} - -impl<'a, T, const N: usize> IterMut<'a, T, N> -where - T: PackedLayout, -{ - fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { - self.vec.get_mut(at).map(|value| { - // SAFETY: We extend the lifetime of the reference here. - // - // This is safe because the iterator yields an exclusive - // reference to every element in the iterated vector - // just once and also there can be only one such iterator - // for the same vector at the same time which is - // guaranteed by the constructor of the iterator. - unsafe { extend_lifetime::<'b, 'a, T>(value) } - }) - } -} - -impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> -where - T: PackedLayout, -{ - type Item = &'a mut T; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } - - fn nth(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin + n >= self.end { - return None - } - let cur = self.begin + n; - self.begin += 1 + n; - self.get_mut(cur).expect("access is within bounds").into() - } -} - -impl<'a, T, const N: usize> ExactSizeIterator for IterMut<'a, T, N> where T: PackedLayout {} - -impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin >= self.end.saturating_sub(n) { - return None - } - self.end -= 1 + n; - self.get_mut(self.end) - .expect("access is within bounds") - .into() - } -} diff --git a/crates/storage/src/collections/smallvec/mod.rs b/crates/storage/src/collections/smallvec/mod.rs deleted file mode 100644 index 436c3a9453..0000000000 --- a/crates/storage/src/collections/smallvec/mod.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A small storage vector that allows to store a limited amount of elements. -//! -//! Prefer using [`SmallVec`] over [`Vec`][`crate::Vec`] if you know up front -//! the maximum amount of unique elements that have to be stored in the vector -//! at the same time, given the number is fairly low: e.g. not exceeding several -//! hundreds of elements. - -mod impls; -mod iter; -mod storage; - -#[cfg(test)] -mod tests; - -pub use self::iter::{ - Iter, - IterMut, -}; -use crate::{ - lazy::{ - Lazy, - LazyArray, - }, - traits::PackedLayout, -}; - -/// The used index type. -type Index = u32; - -/// A contiguous growable array type. -/// -/// # Note -/// -/// - The `storage::SmallVec` has a very similar API compared to a `storage::Vec`. -/// The major difference between both data structures is that the `SmallVec` -/// can only contain up to a fixed amount of elements given by `N` whereas the -/// `Vec` can contain up to `2^32` elements which is the maximum for 32-bit Wasm -/// targets. -/// - The performance characteristics may be different from Rust's -/// `Vec` due to the internal differences. -/// - Allows to store up to N elements. -#[derive(Debug)] -pub struct SmallVec -where - T: PackedLayout, -{ - /// The current length of the small vector. - len: Lazy, - /// The entries of the small vector. - elems: LazyArray, -} - -impl Default for SmallVec -where - T: PackedLayout, -{ - fn default() -> Self { - Self::new() - } -} - -impl SmallVec -where - T: PackedLayout, -{ - /// Clears the underlying storage cells of the storage vector. - /// - /// # Note - /// - /// This completely invalidates the storage vector's invariants about - /// the contents of its associated storage region. - /// - /// This API is used for the `Drop` implementation of [`Vec`] as well as - /// for the [`SpreadLayout::clear_spread`][`crate::traits::SpreadLayout::clear_spread`] - /// trait implementation. - fn clear_cells(&self) { - if self.elems.key().is_none() { - // We won't clear any storage if we are in lazy state since there - // probably has not been any state written to storage, yet. - return - } - for index in 0..self.len() { - self.elems.clear_packed_at(index); - } - } -} - -impl SmallVec -where - T: PackedLayout, -{ - /// Creates a new empty vector. - pub fn new() -> Self { - Self { - len: Lazy::new(0), - elems: Default::default(), - } - } - - /// Returns the capacity of the small vector. - #[inline] - pub fn capacity(&self) -> u32 { - self.elems.capacity() - } - - /// Returns the number of elements in the vector, also referred to as its length. - #[inline] - pub fn len(&self) -> u32 { - *self.len - } - - /// Returns `true` if the vector contains no elements. - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -impl SmallVec -where - T: PackedLayout, -{ - /// Returns an iterator yielding shared references to all elements. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage vectors. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - Iter::new(self) - } - - /// Returns an iterator yielding exclusive references to all elements. - /// - /// # Note - /// - /// - Avoid unbounded iteration over big storage vectors. - /// - Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter_mut(&mut self) -> IterMut { - IterMut::new(self) - } - - /// Returns the index if it is within bounds or `None` otherwise. - fn within_bounds(&self, index: Index) -> Option { - if index < self.len() { - return Some(index) - } - None - } - - /// Returns a shared reference to the first element if any. - pub fn first(&self) -> Option<&T> { - if self.is_empty() { - return None - } - self.get(0) - } - - /// Returns a shared reference to the last element if any. - pub fn last(&self) -> Option<&T> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - self.get(last_index) - } - - /// Returns a shared reference to the indexed element. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get(&self, index: u32) -> Option<&T> { - self.within_bounds(index) - .and_then(|index| self.elems.get(index)) - } -} - -impl SmallVec -where - T: PackedLayout, -{ - /// Appends an element to the back of the vector. - pub fn push(&mut self, value: T) { - assert!( - self.len() < self.capacity(), - "cannot push more elements into the vector" - ); - let last_index = self.len(); - *self.len += 1; - self.elems.put(last_index, Some(value)); - } -} - -impl SmallVec -where - T: PackedLayout, -{ - /// Pops the last element from the vector and returns it. - // - /// Returns `None` if the vector is empty. - pub fn pop(&mut self) -> Option { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - *self.len = last_index; - self.elems.put_get(last_index, None) - } - - /// Pops the last element from the vector and immediately drops it. - /// - /// Returns `Some(())` if an element has been removed and `None` otherwise. - /// - /// # Note - /// - /// This operation is a bit more efficient than [`SmallVec::pop`] - /// since it avoids reading from contract storage in some use cases. - pub fn pop_drop(&mut self) -> Option<()> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - *self.len = last_index; - self.elems.put(last_index, None); - Some(()) - } - - /// Returns an exclusive reference to the first element if any. - pub fn first_mut(&mut self) -> Option<&mut T> { - if self.is_empty() { - return None - } - self.get_mut(0) - } - - /// Returns an exclusive reference to the last element if any. - pub fn last_mut(&mut self) -> Option<&mut T> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - self.get_mut(last_index) - } - - /// Returns an exclusive reference to the indexed element. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { - self.within_bounds(index) - .and_then(move |index| self.elems.get_mut(index)) - } - - /// Swaps the elements at the given indices. - /// - /// # Panics - /// - /// If one or both indices are out of bounds. - pub fn swap(&mut self, a: u32, b: u32) { - assert!( - a < self.len() && b < self.len(), - "indices are out of bounds" - ); - self.elems.swap(a, b) - } - - /// Removes the indexed element from the vector and returns it. - /// - /// The last element of the vector is put into the indexed slot. - /// Returns `None` and does not mutate the vector if the index is out of bounds. - /// - /// # Note - /// - /// This operation does not preserve ordering but is constant time. - pub fn swap_remove(&mut self, n: u32) -> Option { - if self.is_empty() { - return None - } - self.elems.swap(n, self.len() - 1); - self.pop() - } - - /// Removes the indexed element from the vector. - /// - /// The last element of the vector is put into the indexed slot. - /// Returns `Some(())` if an element has been removed and `None` otherwise. - /// - /// # Note - /// - /// This operation should be preferred over [`Vec::swap_remove`] if there is - /// no need to return the removed element since it avoids a contract storage - /// read for some use cases. - pub fn swap_remove_drop(&mut self, n: u32) -> Option<()> { - if self.is_empty() { - return None - } - self.elems.put(n, None); - let last_index = self.len() - 1; - let last = self.elems.put_get(last_index, None); - self.elems.put(n, last); - *self.len = last_index; - Some(()) - } -} diff --git a/crates/storage/src/collections/smallvec/storage.rs b/crates/storage/src/collections/smallvec/storage.rs deleted file mode 100644 index c7d802dc61..0000000000 --- a/crates/storage/src/collections/smallvec/storage.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::SmallVec; -use crate::traits::{ - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; - -#[cfg(feature = "std")] -const _: () = { - use crate::{ - lazy::LazyArray, - traits::StorageLayout, - }; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for SmallVec - where - T: PackedLayout + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new("len", ::layout(key_ptr)), - FieldLayout::new( - "elems", - as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for SmallVec -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = 1 + N as u64; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadLayout::pull_spread(ptr), - elems: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.len, ptr); - SpreadLayout::push_spread(&self.elems, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - self.clear_cells(); - SpreadLayout::clear_spread(&self.len, ptr); - SpreadLayout::clear_spread(&self.elems, ptr); - } -} - -impl SpreadAllocate for SmallVec -where - T: PackedLayout, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadAllocate::allocate_spread(ptr), - elems: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/smallvec/tests.rs b/crates/storage/src/collections/smallvec/tests.rs deleted file mode 100644 index 0bf5f096ac..0000000000 --- a/crates/storage/src/collections/smallvec/tests.rs +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::SmallVec; -use crate::{ - traits::{ - KeyPtr, - SpreadLayout, - }, - Lazy, -}; -use ink_primitives::Key; - -#[test] -fn new_vec_works() { - let vec = >::new(); - assert!(vec.is_empty()); - assert_eq!(vec.len(), 0); - assert_eq!(vec.get(0), None); - assert!(vec.iter().next().is_none()); - let default = as Default>::default(); - assert!(default.is_empty()); - assert_eq!(default.len(), 0); - assert_eq!(vec.get(0), None); - assert!(default.iter().next().is_none()); -} - -#[test] -fn from_iterator_works() { - let some_primes = [b'A', b'B', b'C', b'D']; - assert_eq!(some_primes.iter().copied().collect::>(), { - let mut vec = SmallVec::new(); - for prime in &some_primes { - vec.push(*prime) - } - vec - }); -} - -#[test] -#[should_panic] -fn from_iterator_too_many() { - let some_primes = [b'A', b'B', b'C', b'D', b'E']; - let _ = some_primes.iter().copied().collect::>(); -} - -#[test] -fn from_empty_iterator_works() { - assert_eq!( - [].iter().copied().collect::>(), - SmallVec::new(), - ); -} - -#[test] -fn first_last_of_empty() { - let mut vec = >::new(); - assert_eq!(vec.first(), None); - assert_eq!(vec.first_mut(), None); - assert_eq!(vec.last(), None); - assert_eq!(vec.last_mut(), None); -} - -#[test] -fn pop_on_empty_works() { - let mut vec = >::new(); - assert_eq!(vec.pop(), None); -} - -#[test] -fn push_pop_first_last_works() { - /// Asserts conditions are met for the given storage vector. - fn assert_vec(vec: &SmallVec, len: u32, first: F, last: L) - where - F: Into>, - L: Into>, - { - assert_eq!(vec.is_empty(), len == 0); - assert_eq!(vec.len(), len); - assert_eq!(vec.first().copied(), first.into()); - assert_eq!(vec.last().copied(), last.into()); - } - - let mut vec = SmallVec::new(); - assert_vec(&vec, 0, None, None); - - // Sequence of `push` - vec.push(b'A'); - assert_vec(&vec, 1, b'A', b'A'); - vec.push(b'B'); - assert_vec(&vec, 2, b'A', b'B'); - vec.push(b'C'); - assert_vec(&vec, 3, b'A', b'C'); - vec.push(b'D'); - assert_vec(&vec, 4, b'A', b'D'); - - // Sequence of `pop` - assert_eq!(vec.pop(), Some(b'D')); - assert_vec(&vec, 3, b'A', b'C'); - assert_eq!(vec.pop(), Some(b'C')); - assert_vec(&vec, 2, b'A', b'B'); - assert_eq!(vec.pop(), Some(b'B')); - assert_vec(&vec, 1, b'A', b'A'); - assert_eq!(vec.pop(), Some(b'A')); - assert_vec(&vec, 0, None, None); - - // Pop from empty vector. - assert_eq!(vec.pop(), None); - assert_vec(&vec, 0, None, None); -} - -#[test] -#[should_panic] -fn push_beyond_limits_fails() { - let mut vec = [b'A', b'B', b'C', b'D'] - .iter() - .copied() - .collect::>(); - vec.push(b'E'); -} - -/// Creates a storage vector from the given slice. -fn vec_from_slice(slice: &[u8]) -> SmallVec { - slice.iter().copied().collect::>() -} - -/// Asserts that the given ordered storage vector elements are equal to the -/// ordered elements of the given slice. -fn assert_eq_slice(vec: &SmallVec, slice: &[u8]) { - assert_eq!(vec.len() as usize, slice.len()); - let vec_copy = vec.iter().copied().collect::>(); - assert_eq!(vec_copy.as_slice(), slice); -} - -#[test] -fn pop_drop_works() { - let elems = [b'A', b'B', b'C', b'D']; - let mut vec = vec_from_slice(&elems); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..3]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..2]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..1]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &[]); - assert_eq!(vec.pop_drop(), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn get_works() { - let elems = [b'A', b'B', b'C', b'D']; - let mut vec = vec_from_slice(&elems); - for (n, mut expected) in elems.iter().copied().enumerate() { - let n = n as u32; - assert_eq!(vec.get(n), Some(&expected)); - assert_eq!(vec.get_mut(n), Some(&mut expected)); - assert_eq!(&vec[n], &expected); - assert_eq!(&mut vec[n], &mut expected); - } - let len = vec.len(); - assert_eq!(vec.get(len), None); - assert_eq!(vec.get_mut(len), None); -} - -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let vec = vec_from_slice(&test_values); - let _ = &vec[test_values.len() as u32]; -} - -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_mut_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let mut vec = vec_from_slice(&test_values); - let _ = &mut vec[test_values.len() as u32]; -} - -#[test] -fn iter_next_works() { - let elems = [b'A', b'B', b'C', b'D']; - let vec = vec_from_slice(&elems); - // Test iterator over shared references. - let mut iter = vec.iter(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.next(), Some(&b'A')); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&b'C')); - assert_eq!(iter.next(), Some(&b'D')); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut vec = vec; - let mut iter = vec.iter_mut(); - assert_eq!(iter.next(), Some(&mut b'A')); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), Some(&mut b'C')); - assert_eq!(iter.next(), Some(&mut b'D')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_nth_works() { - let elems = [b'A', b'B', b'C', b'D']; - let vec = vec_from_slice(&elems); - // Test iterator over shared references. - let mut iter = vec.iter(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.nth(1), Some(&b'B')); - assert_eq!(iter.count(), 2); - assert_eq!(iter.nth(1), Some(&b'D')); - assert_eq!(iter.count(), 0); - assert_eq!(iter.nth(1), None); - // Test iterator over exclusive references. - let mut vec = vec; - let mut iter = vec.iter_mut(); - assert_eq!(iter.nth(1), Some(&mut b'B')); - assert_eq!(iter.nth(1), Some(&mut b'D')); - assert_eq!(iter.nth(1), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_next_back_works() { - let elems = [b'A', b'B', b'C', b'D']; - let vec = vec_from_slice(&elems); - // Test iterator over shared references. - let mut iter = vec.iter().rev(); - assert_eq!(iter.clone().count(), 4); - assert_eq!(iter.next(), Some(&b'D')); - assert_eq!(iter.next(), Some(&b'C')); - assert_eq!(iter.clone().count(), 2); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.next(), Some(&b'A')); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut vec = vec; - let mut iter = vec.iter_mut().rev(); - assert_eq!(iter.next(), Some(&mut b'D')); - assert_eq!(iter.next(), Some(&mut b'C')); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), Some(&mut b'A')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_nth_back_works() { - let elems = [b'A', b'B', b'C', b'D']; - let vec = vec_from_slice(&elems); - // Test iterator over shared references. - let mut iter = vec.iter().rev(); - assert_eq!(iter.clone().count(), 4); - assert_eq!(iter.nth(1), Some(&b'C')); - assert_eq!(iter.clone().count(), 2); - assert_eq!(iter.nth(1), Some(&b'A')); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.nth(1), None); - // Test iterator over exclusive references. - let mut vec = vec; - let mut iter = vec.iter_mut().rev(); - assert_eq!(iter.nth(1), Some(&mut b'C')); - assert_eq!(iter.nth(1), Some(&mut b'A')); - assert_eq!(iter.nth(1), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn swap_works() { - let elems = [b'A', b'B', b'C', b'D']; - let mut vec = vec_from_slice(&elems); - - // Swap at same position is a no-op. - for index in 0..elems.len() as u32 { - vec.swap(index, index); - assert_eq_slice(&vec, &elems); - } - - // Swap first and second - vec.swap(0, 1); - assert_eq_slice(&vec, &[b'B', b'A', b'C', b'D']); - // Swap third and last - vec.swap(2, 3); - assert_eq_slice(&vec, &[b'B', b'A', b'D', b'C']); - // Swap first and last - vec.swap(0, 3); - assert_eq_slice(&vec, &[b'C', b'A', b'D', b'B']); -} - -#[test] -#[should_panic] -fn swap_one_invalid_index() { - let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); - vec.swap(0, vec.len()); -} - -#[test] -#[should_panic] -fn swap_both_invalid_indices() { - let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); - vec.swap(vec.len(), vec.len()); -} - -#[test] -fn swap_remove_works() { - let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); - - // Swap remove first element. - assert_eq!(vec.swap_remove(0), Some(b'A')); - assert_eq_slice(&vec, &[b'D', b'B', b'C']); - // Swap remove middle element. - assert_eq!(vec.swap_remove(1), Some(b'B')); - assert_eq_slice(&vec, &[b'D', b'C']); - // Swap remove last element. - assert_eq!(vec.swap_remove(1), Some(b'C')); - assert_eq_slice(&vec, &[b'D']); - // Swap remove only element. - assert_eq!(vec.swap_remove(0), Some(b'D')); - assert_eq_slice(&vec, &[]); - // Swap remove from empty vector. - assert_eq!(vec.swap_remove(0), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn swap_remove_drop_works() { - let mut vec = vec_from_slice(&[b'A', b'B', b'C', b'D']); - - // Swap remove first element. - assert_eq!(vec.swap_remove_drop(0), Some(())); - assert_eq_slice(&vec, &[b'D', b'B', b'C']); - // Swap remove middle element. - assert_eq!(vec.swap_remove_drop(1), Some(())); - assert_eq_slice(&vec, &[b'D', b'C']); - // Swap remove last element. - assert_eq!(vec.swap_remove_drop(1), Some(())); - assert_eq_slice(&vec, &[b'D']); - // Swap remove only element. - assert_eq!(vec.swap_remove_drop(0), Some(())); - assert_eq_slice(&vec, &[]); - // Swap remove from empty vector. - assert_eq!(vec.swap_remove_drop(0), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); - // Load the pushed storage vector into another instance and check that - // both instances are equal: - let vec2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(vec1, vec2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `vec1` and check whether - // loading another instance from this storage will panic since the - // vector's length property cannot read a value: - SpreadLayout::clear_spread(&vec1, &mut KeyPtr::from(root_key)); - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} - -#[test] -fn storage_is_cleared_completely_after_pull_lazy() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - let lazy_vec = Lazy::new(vec_from_slice(&[b'a', b'b', b'c', b'd'])); - SpreadLayout::push_spread(&lazy_vec, &mut KeyPtr::from(root_key)); - let pulled_vec = > as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - - // when - SpreadLayout::clear_spread(&pulled_vec, &mut KeyPtr::from(root_key)); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - Ok(()) - }) - .unwrap() -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn drop_works() { - ink_env::test::run_test::(|_| { - let root_key = Key::from([0x42; 32]); - - // if the setup panics it should not cause the test to pass - let setup_result = std::panic::catch_unwind(|| { - let vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - SpreadLayout::push_spread(&vec, &mut KeyPtr::from(root_key)); - let _ = as SpreadLayout>::pull_spread(&mut KeyPtr::from( - root_key, - )); - // vec is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/stash/impls.rs b/crates/storage/src/collections/stash/impls.rs deleted file mode 100644 index c11e85c712..0000000000 --- a/crates/storage/src/collections/stash/impls.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of generic traits that are useful for the storage stash. - -use super::{ - Iter, - IterMut, - Stash as StorageStash, -}; -use crate::traits::PackedLayout; -use core::iter::{ - Extend, - FromIterator, -}; - -impl Drop for StorageStash -where - T: PackedLayout, -{ - fn drop(&mut self) { - self.clear_cells(); - } -} - -impl Default for StorageStash -where - T: PackedLayout, -{ - fn default() -> Self { - StorageStash::new() - } -} - -impl StorageStash -where - T: PackedLayout, -{ - fn assert_index_within_bounds(&self, index: u32) { - if cfg!(debug_assertions) { - assert!( - index < self.len(), - "index out of bounds: the len is {} but the index is {}", - self.len(), - index - ) - } - } -} - -impl core::ops::Index for StorageStash -where - T: PackedLayout, -{ - type Output = T; - - fn index(&self, index: u32) -> &Self::Output { - self.assert_index_within_bounds(index); - match self.get(index) { - Some(value) => value, - None => panic!("indexed vacant entry: at index {}", index), - } - } -} - -impl core::ops::IndexMut for StorageStash -where - T: PackedLayout, -{ - fn index_mut(&mut self, index: u32) -> &mut Self::Output { - self.assert_index_within_bounds(index); - match self.get_mut(index) { - Some(value) => value, - None => panic!("indexed vacant entry: at index {}", index), - } - } -} - -impl<'a, T: 'a> IntoIterator for &'a StorageStash -where - T: PackedLayout, -{ - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, T: 'a> IntoIterator for &'a mut StorageStash -where - T: PackedLayout, -{ - type Item = &'a mut T; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl Extend for StorageStash -where - T: PackedLayout, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.put(item); - } - } -} - -impl FromIterator for StorageStash -where - T: PackedLayout, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut vec = StorageStash::new(); - vec.extend(iter); - vec - } -} - -impl core::cmp::PartialEq for StorageStash -where - T: PartialEq + PackedLayout, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false - } - self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) - } -} - -impl core::cmp::Eq for StorageStash where T: scale::Decode + Eq + PackedLayout {} diff --git a/crates/storage/src/collections/stash/iter.rs b/crates/storage/src/collections/stash/iter.rs deleted file mode 100644 index 07ff186667..0000000000 --- a/crates/storage/src/collections/stash/iter.rs +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - Entry, - Stash, -}; -use crate::{ - collections::extend_lifetime, - traits::PackedLayout, -}; - -/// An iterator over shared references to the elements of a storage stash. -#[derive(Debug, Clone, Copy)] -pub struct Iter<'a, T> -where - T: PackedLayout, -{ - /// The storage stash to iterate over. - stash: &'a Stash, - /// The number of already yielded elements. - /// - /// # Note - /// - /// This is important to make this iterator an `ExactSizeIterator`. - yielded: u32, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> Iter<'a, T> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage stash. - pub(crate) fn new(stash: &'a Stash) -> Self { - Self { - stash, - yielded: 0, - begin: 0, - end: stash.len_entries(), - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.stash.len() - self.yielded - } -} - -impl<'a, T> Iterator for Iter<'a, T> -where - T: PackedLayout, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - loop { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - let cur = self.begin; - self.begin += 1; - match self.stash.get(cur) { - Some(value) => { - self.yielded += 1; - return Some(value) - } - None => continue, - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } -} - -impl<'a, T> ExactSizeIterator for Iter<'a, T> where T: PackedLayout {} - -impl<'a, T> DoubleEndedIterator for Iter<'a, T> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - loop { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - debug_assert_ne!(self.end, 0); - self.end -= 1; - match self.stash.get(self.end) { - Some(value) => { - self.yielded += 1; - return Some(value) - } - None => continue, - } - } - } -} - -/// An iterator over exclusive references to the elements of a storage stash. -#[derive(Debug)] -pub struct IterMut<'a, T> -where - T: PackedLayout, -{ - /// The storage stash to iterate over. - stash: &'a mut Stash, - /// The number of already yielded elements. - /// - /// # Note - /// - /// This is important to make this iterator an `ExactSizeIterator`. - yielded: u32, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> IterMut<'a, T> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage stash. - pub(crate) fn new(stash: &'a mut Stash) -> Self { - let len = stash.len_entries(); - Self { - stash, - yielded: 0, - begin: 0, - end: len, - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.stash.len() - self.yielded - } -} - -impl<'a, T> IterMut<'a, T> -where - T: PackedLayout, -{ - fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { - self.stash.get_mut(at).map(|value| { - // SAFETY: We extend the lifetime of the reference here. - // - // This is safe because the iterator yields an exclusive - // reference to every element in the iterated vector - // just once and also there can be only one such iterator - // for the same vector at the same time which is - // guaranteed by the constructor of the iterator. - unsafe { extend_lifetime::<'b, 'a, T>(value) } - }) - } -} - -impl<'a, T> Iterator for IterMut<'a, T> -where - T: PackedLayout, -{ - type Item = &'a mut T; - - fn next(&mut self) -> Option { - loop { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - let cur = self.begin; - self.begin += 1; - match self.get_mut(cur) { - Some(value) => { - self.yielded += 1; - return Some(value) - } - None => continue, - } - } - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } -} - -impl<'a, T> ExactSizeIterator for IterMut<'a, T> where T: PackedLayout {} - -impl<'a, T> DoubleEndedIterator for IterMut<'a, T> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - loop { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - debug_assert_ne!(self.end, 0); - self.end -= 1; - match self.get_mut(self.end) { - Some(value) => { - self.yielded += 1; - return Some(value) - } - None => continue, - } - } - } -} - -/// An iterator over shared references to the entries of a storage stash. -/// -/// # Note -/// -/// This is an internal API and mainly used for testing the storage stash. -#[derive(Debug, Clone, Copy)] -pub struct Entries<'a, T> -where - T: PackedLayout, -{ - /// The storage stash to iterate over. - stash: &'a Stash, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> Entries<'a, T> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage stash. - pub(crate) fn new(stash: &'a Stash) -> Self { - let len = stash.len_entries(); - Self { - stash, - begin: 0, - end: len, - } - } -} - -impl<'a, T> Iterator for Entries<'a, T> -where - T: PackedLayout, -{ - type Item = &'a Entry; - - fn next(&mut self) -> Option { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - let cur = self.begin; - self.begin += 1; - let entry = self - .stash - .entries - .get(cur) - .expect("iterator indices are within bounds"); - Some(entry) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = (self.end - self.begin) as usize; - (remaining, Some(remaining)) - } -} - -impl<'a, T> ExactSizeIterator for Entries<'a, T> where T: PackedLayout {} - -impl<'a, T> DoubleEndedIterator for Entries<'a, T> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - debug_assert!(self.begin <= self.end); - if self.begin == self.end { - return None - } - debug_assert_ne!(self.end, 0); - self.end -= 1; - let entry = self - .stash - .entries - .get(self.end) - .expect("iterator indices are within bounds"); - Some(entry) - } -} diff --git a/crates/storage/src/collections/stash/mod.rs b/crates/storage/src/collections/stash/mod.rs deleted file mode 100644 index 52cf589ec1..0000000000 --- a/crates/storage/src/collections/stash/mod.rs +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A storage stash allowing to store indexed elements efficiently. - -mod impls; -mod iter; -mod storage; - -#[cfg(test)] -mod tests; - -use self::iter::Entries; -pub use self::iter::{ - Iter, - IterMut, -}; -use crate::{ - lazy::LazyIndexMap, - traits::PackedLayout, - Pack, -}; -use ink_primitives::Key; - -/// An index into the stash. -type Index = u32; - -/// A stash data structure operating on contract storage. -/// -/// This allows to store information similar to a vector but in unordered -/// fashion which enables constant time random deletion of elements. This allows -/// for efficient attachment of data to some numeric indices. -#[derive(Debug)] -pub struct Stash -where - T: PackedLayout, -{ - /// The combined and commonly used header data. - header: Pack
, - /// The storage entries of the stash. - entries: LazyIndexMap>, -} - -/// Stores general commonly required information about the storage stash. -#[derive(Debug, Default, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -struct Header { - /// The latest vacant index. - /// - /// - If all entries are occupied: - /// - Points to the entry at index `self.len`. - /// - If some entries are vacant: - /// - Points to the entry that has been vacated most recently. - last_vacant: Index, - /// The number of items stored in the stash. - /// - /// # Note - /// - /// We cannot simply use the underlying length of the vector - /// since it would include vacant slots as well. - len: u32, - /// The number of entries currently managed by the stash. - len_entries: u32, -} - -/// A vacant entry with previous and next vacant indices. -#[derive(Debug, Copy, Clone, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -pub struct VacantEntry { - /// The next vacant index. - next: Index, - /// The previous vacant index. - prev: Index, -} - -/// An entry within the stash. -/// -/// The vacant entries within a storage stash form a doubly linked list of -/// vacant entries that is used to quickly re-use their vacant storage. -#[derive(Debug, scale::Encode, scale::Decode)] -#[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] -pub enum Entry { - /// A vacant entry that holds the index to the next and previous vacant entry. - Vacant(VacantEntry), - /// An occupied entry that hold the value. - Occupied(T), -} - -impl Entry { - /// Returns `true` if the entry is occupied. - pub fn is_occupied(&self) -> bool { - if let Entry::Occupied(_) = self { - return true - } - false - } - - /// Returns `true` if the entry is vacant. - pub fn is_vacant(&self) -> bool { - !self.is_occupied() - } - - /// Returns the vacant entry if the entry is vacant, otherwise returns `None`. - fn try_to_vacant(&self) -> Option { - match self { - Entry::Occupied(_) => None, - Entry::Vacant(vacant_entry) => Some(*vacant_entry), - } - } - - /// Returns the vacant entry if the entry is vacant, otherwise returns `None`. - fn try_to_vacant_mut(&mut self) -> Option<&mut VacantEntry> { - match self { - Entry::Occupied(_) => None, - Entry::Vacant(vacant_entry) => Some(vacant_entry), - } - } -} - -impl Stash -where - T: PackedLayout, -{ - /// Creates a new empty stash. - pub fn new() -> Self { - Self { - header: Pack::new(Header { - last_vacant: 0, - len: 0, - len_entries: 0, - }), - entries: LazyIndexMap::new(), - } - } - - /// Returns the number of elements stored in the stash. - pub fn len(&self) -> u32 { - self.header.len - } - - /// Returns `true` if the stash contains no elements. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the number of entries the stash can hold without - /// allocating another storage cell. - /// - /// # Note - /// - /// This is the total number of occupied and vacant entries of the stash. - pub fn capacity(&self) -> u32 { - self.len_entries() - } - - /// Returns the number of entries currently managed by the storage stash. - fn len_entries(&self) -> u32 { - self.header.len_entries - } - - /// Returns the underlying key to the cells. - /// - /// # Note - /// - /// This is a low-level utility getter and should - /// normally not be required by users. - pub fn entries_key(&self) -> Option<&Key> { - self.entries.key() - } - - /// Returns an iterator yielding shared references to all elements of the stash. - /// - /// # Note - /// - /// Avoid unbounded iteration over big storage stashes. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - Iter::new(self) - } - - /// Returns an iterator yielding exclusive references to all elements of the stash. - /// - /// # Note - /// - /// Avoid unbounded iteration over big storage stashes. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter_mut(&mut self) -> IterMut { - IterMut::new(self) - } - - /// Returns an iterator yielding shared references to all entries of the stash. - pub fn entries(&self) -> Entries { - Entries::new(self) - } - - /// Returns `true` if the storage stash has vacant entries. - fn has_vacant_entries(&self) -> bool { - self.header.len != self.header.len_entries - } - - /// Returns the index of the last vacant entry if any. - fn last_vacant_index(&self) -> Option { - if self.has_vacant_entries() { - Some(self.header.last_vacant) - } else { - None - } - } -} - -impl Stash -where - T: PackedLayout, -{ - /// Returns a shared reference to the element at the given index. - pub fn get(&self, at: Index) -> Option<&T> { - if at >= self.len_entries() { - // Bail out early if the index is out of bounds. - return None - } - self.entries.get(at).and_then(|entry| { - match entry { - Entry::Occupied(val) => Some(val), - Entry::Vacant { .. } => None, - } - }) - } - - /// Returns an exclusive reference to the element at the given index. - pub fn get_mut(&mut self, at: Index) -> Option<&mut T> { - if at >= self.len_entries() { - // Bail out early if the index is out of bounds. - return None - } - self.entries.get_mut(at).and_then(|entry| { - match entry { - Entry::Occupied(val) => Some(val), - Entry::Vacant { .. } => None, - } - }) - } -} - -impl Stash -where - T: PackedLayout, -{ - /// Clears the underlying storage cells of the storage vector. - /// - /// # Note - /// - /// This completely invalidates the storage vector's invariants about - /// the contents of its associated storage region. - /// - /// This API is used for the `Drop` implementation of [`Vec`] as well as - /// for the [`SpreadLayout::clear_spread`][`crate::traits::SpreadLayout::clear_spread`] - /// trait implementation. - fn clear_cells(&self) { - if self.entries.key().is_none() { - // We won't clear any storage if we are in lazy state since there - // probably has not been any state written to storage, yet. - return - } - for index in 0..self.len_entries() { - // It might seem wasteful to clear all entries instead of just - // the occupied ones. However this spares us from having one extra - // read for every element in the storage stash to filter out vacant - // entries. So this is actually a trade-off and at the time of this - // implementation it is unclear which path is more efficient. - // - // The bet is that clearing a storage cell is cheaper than reading one. - self.entries.clear_packed_at(index); - } - } -} - -impl Stash -where - T: PackedLayout, -{ - /// Rebinds the `prev` and `next` bindings of the neighbors of the vacant entry. - /// - /// # Note - /// - /// The `removed_index` points to the index of the removed vacant entry. - fn remove_vacant_entry(&mut self, removed_index: Index, vacant_entry: VacantEntry) { - let prev_vacant = vacant_entry.prev; - let next_vacant = vacant_entry.next; - if prev_vacant == removed_index && next_vacant == removed_index { - // There is no other vacant entry left in the storage stash so - // there is nothing to update. Bail out early. - self.header.last_vacant = self.header.len; - return - } - let prev = self - .entries - .get_mut(prev_vacant) - .and_then(Entry::try_to_vacant_mut) - .expect("`prev` must point to an existing entry at this point"); - if prev_vacant == next_vacant { - // There is only one other vacant entry left. - // We can update the single vacant entry in a single look-up. - debug_assert_eq!(prev.prev, removed_index); - debug_assert_eq!(prev.next, removed_index); - prev.prev = prev_vacant; - prev.next = prev_vacant; - } else { - // There are multiple other vacant entries left. - debug_assert_eq!(prev.next, removed_index); - prev.next = next_vacant; - let next = self - .entries - .get_mut(next_vacant) - .and_then(Entry::try_to_vacant_mut) - .expect("`next` must point to an existing entry at this point"); - debug_assert_eq!(next.prev, removed_index); - next.prev = prev_vacant; - } - // Bind the last vacant pointer to the vacant position with the lower index. - // This has the effect that lower indices are refilled more quickly. - use core::cmp::min; - if removed_index == self.header.last_vacant { - self.header.last_vacant = min(prev_vacant, next_vacant); - } - } - - /// Returns the previous and next vacant entry for the entry at index `at`. - /// - /// If there exists a last vacant entry, the return value is a tuple - /// `(index_of_previous_vacant, index_of_next_vacant)`. - /// The two `index_` values hereby are selected in a way that makes it - /// more likely that the stash is refilled from low indices. - /// - /// If no vacant entry exists a self-referential tuple of `(at, at)` - /// is returned. - fn fetch_prev_and_next_vacant_entry(&self, at: Index) -> (Index, Index) { - if let Some(index) = self.last_vacant_index() { - let root_vacant = self - .entries - .get(index) - .and_then(|entry| entry.try_to_vacant()) - .expect("last_vacant must point to an existing vacant entry"); - // Form the linked vacant entries in a way that makes it more likely - // for them to refill the stash from low indices. - if at < index { - // Insert before root if new vacant index is smaller than root. - (root_vacant.prev, index) - } else if at < root_vacant.next { - // Insert between root and its next vacant entry if smaller than - // current root's next index. - (index, root_vacant.next) - } else { - // Insert before root entry if index is greater. But we won't - // update the new element to be the new root index in this case. - (root_vacant.prev, index) - } - } else { - // Default previous and next to the given at index. - // So the resulting vacant index is pointing to itself. - (at, at) - } - } - - /// Updates links from and to neighboring vacant entries. - fn update_neighboring_vacant_entry_links( - &mut self, - prev: Index, - next: Index, - at: Index, - ) { - if prev == next { - // Previous and next are the same so we can update the vacant - // neighbour with a single look-up. - let entry = self - .entries - .get_mut(next) - .and_then(Entry::try_to_vacant_mut) - .expect("`next` must point to an existing vacant entry at this point"); - entry.prev = at; - entry.next = at; - } else { - // Previous and next vacant entries are different and thus need - // different look-ups to update them. - self.entries - .get_mut(prev) - .and_then(Entry::try_to_vacant_mut) - .expect("`prev` must point to an existing vacant entry at this point") - .next = at; - self.entries - .get_mut(next) - .and_then(Entry::try_to_vacant_mut) - .expect("`next` must point to an existing vacant entry at this point") - .prev = at; - } - } - - /// Put the element into the stash at the next vacant position. - /// - /// Returns the stash index that the element was put into. - pub fn put(&mut self, new_value: T) -> Index { - let new_entry = Some(Entry::Occupied(new_value)); - let new_index = if let Some(index) = self.last_vacant_index() { - // Put the new element to the most recent vacant index if not all entries are occupied. - let old_entry = self - .entries - .put_get(index, new_entry) - .expect("a `last_vacant_index()` must point to an occupied cell"); - let vacant_entry = match old_entry { - Entry::Vacant(vacant_entry) => vacant_entry, - Entry::Occupied(_) => { - unreachable!("`last_vacant_index()` must point to a vacant entry") - } - }; - self.remove_vacant_entry(index, vacant_entry); - index - } else { - // Push the new element to the end if all entries are occupied. - let new_index = self.header.len_entries; - self.entries.put(new_index, new_entry); - self.header.last_vacant += 1; - self.header.len_entries += 1; - new_index - }; - self.header.len += 1; - new_index - } - - /// Takes the element stored at the given index if any. - pub fn take(&mut self, at: Index) -> Option { - // Cases: - // - There are vacant entries already. - // - There are no vacant entries before. - if at >= self.len_entries() { - // Early return since `at` index is out of bounds. - return None - } - // Precompute previous and next vacant entries as we might need them later. - // Due to borrow checker constraints we cannot have this at a later stage. - let (prev, next) = self.fetch_prev_and_next_vacant_entry(at); - let entry_mut = self.entries.get_mut(at).expect("index is out of bounds"); - if entry_mut.is_vacant() { - // Early return if the taken entry is already vacant. - return None - } - // At this point we know that the entry is occupied with a value. - let new_vacant_entry = Entry::Vacant(VacantEntry { next, prev }); - let taken_entry = core::mem::replace(entry_mut, new_vacant_entry); - self.update_neighboring_vacant_entry_links(prev, next, at); - // Take the value out of the taken occupied entry and return it. - match taken_entry { - Entry::Occupied(value) => { - use core::cmp::min; - self.header.last_vacant = - min(self.header.last_vacant, min(at, min(prev, next))); - self.header.len -= 1; - Some(value) - } - Entry::Vacant { .. } => { - unreachable!("the taken entry is known to be occupied") - } - } - } - - /// Removes the element stored at the given index if any. - /// - /// This method acts similar to the take API and even still returns an Option. - /// However, it guarantees to make no contract storage reads to the indexed - /// element and will only write to its internal low-level lazy cache that the - /// element at the given index is going to be removed at the end of the contract - /// execution. - /// - /// Calling this method with an index out of bounds for the returns `None` and - /// does not `remove` the element, otherwise it returns `Some(())`. - /// - /// # Safety - /// - /// The caller must ensure that `at` refers to an occupied index. Behavior is - /// unspecified if `at` refers to a vacant index and could seriously damage the - /// contract storage integrity. - pub unsafe fn remove_occupied(&mut self, at: Index) -> Option<()> { - // This function is written similar to [`Stash::take`], with the exception - // that the caller has to ensure that `at` refers to an occupied entry whereby - // the procedure can avoid loading the occupied entry which might be handy if - // the stored `T` is especially costly to load from contract storage. - if at >= self.len_entries() { - // Early return since `at` index is out of bounds. - return None - } - // Precompute previous and next vacant entries as we might need them later. - // Due to borrow checker constraints we cannot have this at a later stage. - let (prev, next) = self.fetch_prev_and_next_vacant_entry(at); - let new_vacant_entry = Entry::Vacant(VacantEntry { next, prev }); - self.entries.put(at, Some(new_vacant_entry)); - self.update_neighboring_vacant_entry_links(prev, next, at); - use core::cmp::min; - self.header.last_vacant = min(self.header.last_vacant, min(at, min(prev, next))); - self.header.len -= 1; - Some(()) - } - - /// Defragments the underlying storage to minimize footprint. - /// - /// Returns the number of storage cells freed this way. - /// - /// This might invalidate indices stored outside the stash. - /// - /// # Callback - /// - /// In order to keep those indices up-to-date the caller can provide - /// a callback function that is called for every moved entry - /// with a shared reference to the entries value and the old as well - /// as the new index. - /// - /// # Note - /// - /// - If `max_iterations` is `Some` concrete value it is used in order to - /// bound the number of iterations and won't try to defrag until the stash - /// is optimally compacted. - /// - Users are advised to call this method using `Some` concrete - /// value to keep gas costs within certain bounds. - /// - The call to the given callback takes place before the reinsertion - /// of the shifted occupied entry. - pub fn defrag(&mut self, max_iterations: Option, mut callback: C) -> u32 - where - C: FnMut(Index, Index, &T), - { - let len_entries = self.len_entries(); - let mut freed_cells = 0; - for index in (0..len_entries) - .rev() - .take(max_iterations.unwrap_or(len_entries) as usize) - { - if !self.has_vacant_entries() { - // Bail out as soon as there are no more vacant entries left. - return freed_cells - } - // In any case we are going to free yet another storage cell. - freed_cells += 1; - match self - .entries - .put_get(index, None) - .expect("index is out of bounds") - { - Entry::Vacant(vacant_entry) => { - // Remove the vacant entry and rebind its neighbors. - self.remove_vacant_entry(index, vacant_entry); - } - Entry::Occupied(value) => { - // Move the occupied entry into one of the remaining vacant - // entries. We do not re-use the `put` method to not update - // the length and other header information. - let vacant_index = self - .last_vacant_index() - .expect("it has been asserted that there are vacant entries"); - callback(index, vacant_index, &value); - let new_entry = Some(Entry::Occupied(value)); - let old_entry = self.entries.put_get(vacant_index, new_entry).expect( - "`last_vacant_index` index must point to an occupied cell", - ); - let vacant_entry = match old_entry { - Entry::Vacant(vacant_entry) => vacant_entry, - Entry::Occupied(_) => { - unreachable!( - "`last_vacant_index` must point to a vacant entry" - ) - } - }; - self.remove_vacant_entry(vacant_index, vacant_entry); - } - } - self.header.len_entries -= 1; - } - freed_cells - } -} diff --git a/crates/storage/src/collections/stash/storage.rs b/crates/storage/src/collections/stash/storage.rs deleted file mode 100644 index 952ee04776..0000000000 --- a/crates/storage/src/collections/stash/storage.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of ink! storage traits. - -use super::{ - Entry, - Header, - Stash as StorageStash, -}; -use crate::{ - lazy::LazyIndexMap, - traits::{ - forward_allocate_packed, - forward_clear_packed, - forward_pull_packed, - forward_push_packed, - KeyPtr, - PackedAllocate, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, -}; -use ink_primitives::Key; - -#[cfg(feature = "std")] -const _: () = { - use crate::{ - collections::Vec as StorageVec, - traits::StorageLayout, - }; - use ink_metadata::layout::{ - CellLayout, - FieldLayout, - Layout, - LayoutKey, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for Header { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Cell(CellLayout::new::
(LayoutKey::from( - key_ptr.advance_by(1), - ))) - } - } - - impl StorageLayout for StorageStash - where - T: PackedLayout + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new("header",
::layout(key_ptr)), - FieldLayout::new( - "entries", - > as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for Header { - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = false; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl SpreadAllocate for Header { - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - forward_allocate_packed::(ptr) - } -} - -impl PackedLayout for Header { - #[inline] - fn pull_packed(&mut self, _at: &Key) {} - #[inline] - fn push_packed(&self, _at: &Key) {} - #[inline] - fn clear_packed(&self, _at: &Key) {} -} - -impl PackedAllocate for Header { - #[inline] - fn allocate_packed(&mut self, _at: &Key) {} -} - -impl SpreadLayout for Entry -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = 1; - const REQUIRES_DEEP_CLEAN_UP: bool = ::REQUIRES_DEEP_CLEAN_UP; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - forward_pull_packed::(ptr) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - forward_push_packed::(self, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - forward_clear_packed::(self, ptr) - } -} - -impl PackedLayout for Entry -where - T: PackedLayout, -{ - fn pull_packed(&mut self, at: &Key) { - if let Entry::Occupied(value) = self { - ::pull_packed(value, at) - } - } - - fn push_packed(&self, at: &Key) { - if let Entry::Occupied(value) = self { - ::push_packed(value, at) - } - } - - fn clear_packed(&self, at: &Key) { - if let Entry::Occupied(value) = self { - ::clear_packed(value, at) - } - } -} - -impl SpreadLayout for StorageStash -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - header: SpreadLayout::pull_spread(ptr), - entries: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.header, ptr); - SpreadLayout::push_spread(&self.entries, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - self.clear_cells(); - SpreadLayout::clear_spread(&self.header, ptr); - SpreadLayout::clear_spread(&self.entries, ptr); - } -} - -impl SpreadAllocate for StorageStash -where - T: PackedLayout, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - header: SpreadAllocate::allocate_spread(ptr), - entries: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/stash/tests.rs b/crates/storage/src/collections/stash/tests.rs deleted file mode 100644 index 1b165ea7d7..0000000000 --- a/crates/storage/src/collections/stash/tests.rs +++ /dev/null @@ -1,812 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::Stash as StorageStash; -use crate::{ - traits::{ - KeyPtr, - SpreadLayout, - }, - Lazy, -}; -use ink_primitives::Key; - -#[test] -fn regression_stash_unreachable_minified() { - // This regression has been discovered in the ERC-721 example implementation - // `approved_for_all_works` unit test. The fix was to adjust - // `Stash::remove_vacant_entry` to update `header.last_vacant` if the - // removed index was the last remaining vacant index in the stash. - ink_env::test::run_test::(|_| { - let mut stash: StorageStash = StorageStash::new(); - stash.put(1); - stash.put(2); - stash.take(0); - stash.put(99); - stash.take(1); - stash.put(99); - Ok(()) - }) - .unwrap() -} - -#[test] -fn new_works() { - // `StorageVec::new` - let stash = >::new(); - assert!(stash.is_empty()); - assert_eq!(stash.len(), 0); - assert_eq!(stash.get(0), None); - assert!(stash.iter().next().is_none()); - // `StorageVec::default` - let default = as Default>::default(); - assert!(default.is_empty()); - assert_eq!(default.len(), 0); - assert_eq!(stash.get(0), None); - assert!(default.iter().next().is_none()); - // `StorageVec::new` and `StorageVec::default` should be equal. - assert_eq!(stash, default); -} - -#[test] -fn from_iterator_works() { - let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; - let stash = test_values.iter().copied().collect::>(); - assert_eq!(stash, { - let mut stash = StorageStash::new(); - for (index, value) in test_values.iter().enumerate() { - assert_eq!(index as u32, stash.put(*value)); - } - stash - }); - assert_eq!(stash.len(), test_values.len() as u32); - assert!(!stash.is_empty()); -} - -#[test] -fn from_empty_iterator_works() { - assert_eq!( - [].iter().copied().collect::>(), - StorageStash::new(), - ); -} - -#[test] -fn take_from_filled_works() { - let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; - let mut stash = test_values.iter().copied().collect::>(); - for (index, expected_value) in test_values.iter().enumerate() { - assert_eq!(stash.take(index as u32), Some(*expected_value)); - } -} - -#[test] -fn take_from_empty_works() { - let mut stash = >::new(); - assert_eq!(stash.take(0), None); -} - -#[test] -fn take_out_of_bounds_works() { - let mut stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - assert_eq!(stash.take(3), None); -} - -#[test] -fn remove_from_filled_works() { - let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; - let mut stash = test_values.iter().copied().collect::>(); - - let mut count = stash.len(); - for (index, val) in test_values.iter().enumerate() { - let index = index as u32; - assert_eq!(stash.get(index), Some(val)); - assert_eq!(unsafe { stash.remove_occupied(index) }, Some(())); - assert_eq!(stash.get(index), None); - count -= 1; - assert_eq!(stash.len(), count); - } - assert_eq!(stash.len(), 0); -} - -#[test] -fn remove_from_empty_works() { - let mut stash = >::new(); - assert_eq!(unsafe { stash.remove_occupied(0) }, None); -} - -#[test] -fn remove_out_of_bounds_works() { - let mut stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - assert_eq!(unsafe { stash.remove_occupied(3) }, None); -} - -#[test] -fn remove_works_with_spread_layout_push_pull() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // First populate some storage Stash and writes that to the contract storage using pull_spread - // and some known Key. - let stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - let root_key = Key::from([0x00; 32]); - SpreadLayout::push_spread(&stash, &mut KeyPtr::from(root_key)); - - // Then load another instance from the same key lazily and remove some of - // the known-to-be-populated entries from it. Afterwards push_spread this second instance and - // load yet another using pull_spread again. - let mut stash2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(unsafe { stash2.remove_occupied(0) }, Some(())); - SpreadLayout::push_spread(&stash2, &mut KeyPtr::from(root_key)); - - // This time we check from the third instance using - // get if the expected cells are still there or have been successfully removed. - let stash3 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(stash3.get(0), None); - assert_eq!(stash3.get(1), Some(&b'B')); - assert_eq!(stash3.get(2), Some(&b'C')); - assert_eq!(stash3.len(), 2); - - Ok(()) - }) -} - -#[test] -fn get_works() { - let test_values = [b'A', b'B', b'C', b'D', b'E', b'F']; - let mut stash = test_values.iter().copied().collect::>(); - for (index, &expected_value) in test_values.iter().enumerate() { - let mut expected_value = expected_value; - let index = index as u32; - assert_eq!(stash.get(index), Some(&expected_value)); - assert_eq!(stash.get_mut(index), Some(&mut expected_value)); - assert_eq!(&stash[index], &expected_value); - assert_eq!(&mut stash[index], &mut expected_value); - } - // Get out of bounds works: - let len = stash.len(); - assert_eq!(stash.get(len), None); - assert_eq!(stash.get_mut(len), None); - // Get vacant entry works: - assert_eq!(stash.get(1), Some(&b'B')); - assert_eq!(stash.get_mut(1), Some(&mut b'B')); - assert_eq!(stash.take(1), Some(b'B')); - assert_eq!(stash.get(1), None); - assert_eq!(stash.get_mut(1), None); -} - -#[cfg(debug_assertions)] -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let stash = test_values.iter().copied().collect::>(); - let _ = &stash[test_values.len() as u32]; -} - -#[cfg(debug_assertions)] -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_mut_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let mut stash = test_values.iter().copied().collect::>(); - let _ = &mut stash[test_values.len() as u32]; -} - -#[test] -#[should_panic(expected = "indexed vacant entry: at index 1")] -fn index_vacant_works() { - let test_values = [b'a', b'b', b'c']; - let mut stash = test_values.iter().copied().collect::>(); - assert_eq!(stash.take(1), Some(b'b')); - let _ = &stash[1]; -} - -#[test] -#[should_panic(expected = "indexed vacant entry: at index 1")] -fn index_mut_vacant_works() { - let test_values = [b'a', b'b', b'c']; - let mut stash = test_values.iter().copied().collect::>(); - assert_eq!(stash.take(1), Some(b'b')); - let _ = &mut stash[1]; -} - -#[test] -fn len_is_empty_works() { - let mut stash = StorageStash::new(); - assert_eq!(stash.len(), 0); - assert!(stash.is_empty()); - stash.put(b'A'); - assert_eq!(stash.len(), 1); - assert!(!stash.is_empty()); - stash.take(0); - assert_eq!(stash.len(), 0); - assert!(stash.is_empty()); -} - -#[test] -fn iter_works() { - let stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - // Test iterator over shared references. - let mut iter = stash.iter(); - assert_eq!(iter.count(), 3); - assert_eq!(iter.next(), Some(&b'A')); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.count(), 1); - assert_eq!(iter.next(), Some(&b'C')); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut(); - assert_eq!(iter.next(), Some(&mut b'A')); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), Some(&mut b'C')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -/// Create a stash that only has vacant entries. -fn create_vacant_stash() -> StorageStash { - let mut stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - for i in 0..stash.len() { - stash.take(i); - } - assert_eq!(stash.len(), 0); - assert!(stash.is_empty()); - assert_eq!(stash.len_entries(), 3); - stash -} - -/// Create a stash where every second entry is vacant. -fn create_holey_stash() -> StorageStash { - let elements = [b'A', b'B', b'C', b'D', b'E', b'F']; - let mut stash = elements.iter().copied().collect::>(); - for i in 0..stash.len() { - stash.take(i * 2); - } - assert_eq!(stash.len() as usize, elements.len() / 2); - assert!(!stash.is_empty()); - assert_eq!(stash.len_entries() as usize, elements.len()); - stash -} - -#[test] -fn iter_over_vacant_works() { - let stash = create_vacant_stash(); - // Test iterator over shared references. - let mut iter = stash.iter(); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut(); - assert_eq!(iter.next(), None); - // Test reverse iterator over shared references. - let mut iter = stash.iter().rev(); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.next(), None); - // Test reverse iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut().rev(); - assert_eq!(iter.next(), None); -} - -#[test] -fn iter_over_holey_works() { - let stash = create_holey_stash(); - // Test iterator over shared references. - let mut iter = stash.iter(); - assert_eq!(iter.count(), 3); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&b'D')); - assert_eq!(iter.count(), 1); - assert_eq!(iter.next(), Some(&b'F')); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut(); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), Some(&mut b'D')); - assert_eq!(iter.next(), Some(&mut b'F')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_rev_over_holey_works() { - let stash = create_holey_stash(); - // Test iterator over shared references. - let mut iter = stash.iter().rev(); - assert_eq!(iter.clone().count(), 3); - assert_eq!(iter.next(), Some(&b'F')); - assert_eq!(iter.clone().count(), 2); - assert_eq!(iter.next(), Some(&b'D')); - assert_eq!(iter.clone().count(), 1); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut().rev(); - assert_eq!(iter.next(), Some(&mut b'F')); - assert_eq!(iter.next(), Some(&mut b'D')); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_rev_works() { - let stash = [b'A', b'B', b'C'] - .iter() - .copied() - .collect::>(); - // Test iterator over shared references. - let mut iter = stash.iter().rev(); - assert_eq!(iter.next(), Some(&b'C')); - assert_eq!(iter.next(), Some(&b'B')); - assert_eq!(iter.next(), Some(&b'A')); - assert_eq!(iter.next(), None); - // Test iterator over exclusive references. - let mut stash = stash; - let mut iter = stash.iter_mut().rev(); - assert_eq!(iter.next(), Some(&mut b'C')); - assert_eq!(iter.next(), Some(&mut b'B')); - assert_eq!(iter.next(), Some(&mut b'A')); - assert_eq!(iter.next(), None); -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -struct EntryMove { - from: u32, - to: u32, - value: u8, -} - -#[test] -fn simple_defrag_works() { - let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F'] - .iter() - .copied() - .collect::>(); - assert_eq!(stash.len(), 6); - assert_eq!(stash.len_entries(), 6); - assert_eq!(stash.take(3), Some(b'D')); - assert_eq!(stash.take(1), Some(b'B')); - assert_eq!(stash.take(5), Some(b'F')); - assert_eq!(stash.take(4), Some(b'E')); - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 6); - // Now stash looks like this: - // - // i | 0 | 1 | 2 | 3 | 4 | 5 | - // next | | | | | | | - // prev | | | | | | | - // val | A | | C | | | | - // - // After defrag the stash should look like this: - // - // i | 0 | 1 | - // next | | | - // prev | | | - // val | A | C | - let mut entry_moves = Vec::new(); - let callback = |from, to, value: &u8| { - entry_moves.push(EntryMove { - from, - to, - value: *value, - }); - }; - assert_eq!(stash.defrag(None, callback), 4); - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 2); - assert_eq!(stash.get(0), Some(&b'A')); - assert_eq!(stash.get(1), Some(&b'C')); - assert_eq!( - &entry_moves, - &[EntryMove { - from: 2, - to: 1, - value: 67 - }] - ); -} - -/// Returns a storage stash that looks internally like this: -/// -/// i | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 -/// ----------|---|---|---|---|---|---|---|--- -/// next | | | | | | | | -/// previous | | | | | | | | -/// val | | | | | E | | | H -fn complex_defrag_setup() -> StorageStash { - let mut stash = [b'A', b'B', b'C', b'D', b'E', b'F', b'G', b'H'] - .iter() - .copied() - .collect::>(); - assert_eq!(stash.len(), 8); - assert_eq!(stash.len_entries(), 8); - // Remove some of the entries in specific order. - assert_eq!(stash.take(0), Some(b'A')); - assert_eq!(stash.take(6), Some(b'G')); - assert_eq!(stash.take(1), Some(b'B')); - assert_eq!(stash.take(5), Some(b'F')); - assert_eq!(stash.take(2), Some(b'C')); - assert_eq!(stash.take(3), Some(b'D')); - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 8); - stash -} - -/// Returns the expected entry move set for the complex defragmentation test. -fn complex_defrag_expected_moves() -> &'static [EntryMove] { - &[ - EntryMove { - from: 7, - to: 0, - value: 72, - }, - EntryMove { - from: 4, - to: 1, - value: 69, - }, - ] -} - -#[test] -fn complex_defrag_works() { - let mut stash = complex_defrag_setup(); - let mut entry_moves = Vec::new(); - let callback = |from, to, value: &u8| { - entry_moves.push(EntryMove { - from, - to, - value: *value, - }); - }; - assert_eq!(stash.defrag(None, callback), 6); - // After defrag the stash should look like this: - // - // i | 0 | 1 | - // next | | | - // prev | | | - // val | H | E | - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 2); - assert_eq!(stash.get(0), Some(&b'H')); - assert_eq!(stash.get(1), Some(&b'E')); - assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); -} - -#[test] -fn incremental_defrag_works() { - // This tests asserts that incremental defragmentation of storage stashes - // yields the same result as immediate defragmentation of the same stash. - let mut stash = complex_defrag_setup(); - let mut entry_moves = Vec::new(); - let mut callback = |from, to, value: &u8| { - entry_moves.push(EntryMove { - from, - to, - value: *value, - }); - }; - let len_entries_before = stash.len_entries(); - for i in 0..stash.len_entries() { - stash.defrag(Some(1), &mut callback); - assert_eq!( - stash.len_entries(), - core::cmp::max(2, len_entries_before - i - 1) - ); - } - // After defrag the stash should look like this: - // - // i | 0 | 1 | - // next | | | - // prev | | | - // val | H | E | - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 2); - assert_eq!(stash.get(0), Some(&b'H')); - assert_eq!(stash.get(1), Some(&b'E')); - assert_eq!(entry_moves.as_slice(), complex_defrag_expected_moves()); -} - -#[derive(Debug, PartialEq, Eq)] -enum Entry { - /// Vacant entry with `prev` and `next` links. - Vacant(u32, u32), - /// Occupied entry with value. - Occupied(u8), -} - -fn entries_of_stash(stash: &StorageStash) -> Vec { - stash - .entries() - .map(|entry| { - use super::Entry as StashEntry; - match entry { - StashEntry::Vacant(entry) => Entry::Vacant(entry.prev, entry.next), - StashEntry::Occupied(value) => Entry::Occupied(*value), - } - }) - .collect::>() -} - -#[test] -fn take_in_order_works() { - let mut stash = [b'A', b'B', b'C', b'D'] - .iter() - .copied() - .collect::>(); - assert_eq!(stash.len(), 4); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), None); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Occupied(b'A'), - Entry::Occupied(b'B'), - Entry::Occupied(b'C'), - Entry::Occupied(b'D') - ] - ); - // Take first. - assert_eq!(stash.take(0), Some(b'A')); - assert_eq!(stash.len(), 3); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(0)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Vacant(0, 0), - Entry::Occupied(b'B'), - Entry::Occupied(b'C'), - Entry::Occupied(b'D') - ] - ); - // Take second. - assert_eq!(stash.take(1), Some(b'B')); - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(0)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Vacant(1, 1), - Entry::Vacant(0, 0), - Entry::Occupied(b'C'), - Entry::Occupied(b'D') - ] - ); - // Take third. - assert_eq!(stash.take(2), Some(b'C')); - assert_eq!(stash.len(), 1); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(0)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Vacant(2, 1), - Entry::Vacant(0, 2), - Entry::Vacant(1, 0), - Entry::Occupied(b'D') - ] - ); - // Take last. - assert_eq!(stash.take(3), Some(b'D')); - assert_eq!(stash.len(), 0); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(0)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Vacant(3, 1), - Entry::Vacant(0, 2), - Entry::Vacant(1, 3), - Entry::Vacant(2, 0), - ] - ); -} - -#[test] -fn take_rev_order_works() { - let mut stash = [b'A', b'B', b'C', b'D'] - .iter() - .copied() - .collect::>(); - assert_eq!(stash.len(), 4); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), None); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Occupied(b'A'), - Entry::Occupied(b'B'), - Entry::Occupied(b'C'), - Entry::Occupied(b'D') - ] - ); - // Take last. - assert_eq!(stash.take(3), Some(b'D')); - assert_eq!(stash.len(), 3); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(3)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Occupied(b'A'), - Entry::Occupied(b'B'), - Entry::Occupied(b'C'), - Entry::Vacant(3, 3) - ] - ); - // Take third. - assert_eq!(stash.take(2), Some(b'C')); - assert_eq!(stash.len(), 2); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(2)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Occupied(b'A'), - Entry::Occupied(b'B'), - Entry::Vacant(3, 3), - Entry::Vacant(2, 2) - ] - ); - // Take second. - assert_eq!(stash.take(1), Some(b'B')); - assert_eq!(stash.len(), 1); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(1)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Occupied(b'A'), - Entry::Vacant(3, 2), - Entry::Vacant(1, 3), - Entry::Vacant(2, 1) - ] - ); - // Take first. - assert_eq!(stash.take(0), Some(b'A')); - assert_eq!(stash.len(), 0); - assert_eq!(stash.len_entries(), 4); - assert_eq!(stash.last_vacant_index(), Some(0)); - assert_eq!( - entries_of_stash(&stash), - vec![ - Entry::Vacant(3, 1), - Entry::Vacant(0, 2), - Entry::Vacant(1, 3), - Entry::Vacant(2, 0) - ] - ); -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let stash1 = create_holey_stash(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); - // Load the pushed storage vector into another instance and check that - // both instances are equal: - let stash2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(stash1, stash2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "storage entry was empty")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let stash1 = create_holey_stash(); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&stash1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `stash1` and check whether - // loading another instance from this storage will panic since the - // vector's length property cannot read a value: - SpreadLayout::clear_spread(&stash1, &mut KeyPtr::from(root_key)); - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} - -#[test] -fn storage_is_cleared_completely_after_pull_lazy() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - let lazy_stash = Lazy::new(create_holey_stash()); - SpreadLayout::push_spread(&lazy_stash, &mut KeyPtr::from(root_key)); - let pulled_stash = > as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - - // when - SpreadLayout::clear_spread(&pulled_stash, &mut KeyPtr::from(root_key)); - - // then - let contract_id = ink_env::test::callee::(); - let storage_used = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(storage_used, 0); - - Ok(()) - }) - .unwrap() -} - -#[test] -#[should_panic(expected = "storage entry was empty")] -fn drop_works() { - ink_env::test::run_test::(|_| { - let root_key = Key::from([0x42; 32]); - - // if the setup panics it should not cause the test to pass - let setup_result = std::panic::catch_unwind(|| { - let stash = create_holey_stash(); - SpreadLayout::push_spread(&stash, &mut KeyPtr::from(root_key)); - let _ = as SpreadLayout>::pull_spread(&mut KeyPtr::from( - root_key, - )); - // stash is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/collections/vec/fuzz_tests.rs b/crates/storage/src/collections/vec/fuzz_tests.rs deleted file mode 100644 index 1418a2d819..0000000000 --- a/crates/storage/src/collections/vec/fuzz_tests.rs +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The fuzz tests are testing complex types. -#![allow(clippy::type_complexity)] - -use super::Vec as StorageVec; -use crate::{ - test_utils::FuzzCollection, - traits::{ - KeyPtr, - PackedLayout, - SpreadLayout, - }, - Pack, -}; -use itertools::Itertools; -use quickcheck::{ - Arbitrary, - Gen, -}; -use std::vec::Vec; - -impl Arbitrary for StorageVec -where - T: Arbitrary + PackedLayout + Send + Clone + 'static, -{ - fn arbitrary(g: &mut Gen) -> StorageVec { - let vec = Vec::::arbitrary(g); - StorageVec::::from_iter(vec) - } -} - -impl Clone for StorageVec -where - T: PackedLayout + Clone, -{ - fn clone(&self) -> Self { - let mut svec = StorageVec::::new(); - self.iter().for_each(|v| svec.push(v.clone())); - svec - } -} - -impl<'a, T> FuzzCollection for &'a mut StorageVec -where - T: Clone + PackedLayout, -{ - type Collection = StorageVec; - type Item = &'a mut T; - - /// Makes `self` equal to `instance2` by executing a series of operations - /// on `self`. - fn equalize(&mut self, instance2: &Self::Collection) { - self.clear(); - instance2.into_iter().for_each(|v| self.push(v.clone())); - } - - /// `val` is a value from the vector. We take an element out - /// of `self` and assign it to `val`. - /// - /// Hence this method only might modify values of `item`, leaving - /// others intact. - fn assign(&mut self, val: Self::Item) { - if let Some(popped_val) = self.pop() { - *val = popped_val; - } - } -} - -crate::fuzz_storage!("vec_1", StorageVec); -crate::fuzz_storage!("vec_2", StorageVec>>>); -crate::fuzz_storage!("vec_3", StorageVec<(bool, (u32, u128))>); -crate::fuzz_storage!("vec_4", StorageVec<(i128, u32, bool, Option<(u32, i128)>)>); - -#[quickcheck] -fn fuzz_binary_search(mut std_vec: Vec) { - // given - if std_vec.is_empty() { - return - } - let original_std_vec = std_vec.clone(); - std_vec.sort_unstable(); - let ink_vec = StorageVec::from_iter(std_vec.clone()); - - for x in original_std_vec { - // when - let index = std_vec - .binary_search(&x) - .expect("`x` must be found in `Vec`") as u32; - let ink_index = ink_vec - .binary_search(&x) - .expect("`x` must be found in `StorageVec`"); - let ink_index_by = ink_vec - .binary_search_by(|by_x| by_x.cmp(&x)) - .expect("`x` must be found in `StorageVec`"); - - // then - assert_eq!(index, ink_index); - assert_eq!(index, ink_index_by); - } -} - -#[quickcheck] -fn fuzz_binary_search_nonexistent(std_vec: Vec) { - // given - if std_vec.is_empty() { - return - } - let mut unique_std_vec: Vec = std_vec.into_iter().unique().collect(); - let removed_el = unique_std_vec - .pop() - .expect("length is non-zero, first element must exist"); - unique_std_vec.sort_unstable(); - let ink_vec = StorageVec::from_iter(unique_std_vec.clone()); - - // when - let std_err_index = unique_std_vec - .binary_search(&removed_el) - .expect_err("element must not be found") as u32; - let ink_err_index = ink_vec - .binary_search(&removed_el) - .expect_err("element must not be found"); - let ink_search_by_err_index = ink_vec - .binary_search_by(|by_x| by_x.cmp(&removed_el)) - .expect_err("element must not be found"); - - // then - assert_eq!(std_err_index, ink_err_index); - assert_eq!(std_err_index, ink_search_by_err_index); -} - -#[quickcheck] -fn fuzz_binary_search_by_key(mut std_vec: Vec<(i32, i32)>) { - // given - if std_vec.is_empty() { - return - } - let original_std_vec = std_vec.clone(); - std_vec.sort_by_key(|&(_a, b)| b); - let ink_vec = StorageVec::from_iter(std_vec.clone()); - - for (_x, y) in original_std_vec { - // when - let std_index = std_vec - .binary_search_by_key(&y, |&(_a, b)| b) - .expect("`y` must be found in `Vec`") as u32; - let ink_index = ink_vec - .binary_search_by_key(&y, |&(_a, b)| b) - .expect("`y` must be found in `StorageVec`"); - - // then - assert_eq!(std_index, ink_index); - } -} -#[quickcheck] -fn fuzz_binary_search_by_key_nonexistent(std_vec: Vec<(i32, i32)>) { - // given - if std_vec.is_empty() { - return - } - let mut unique_std_vec: Vec<(i32, i32)> = - std_vec.into_iter().unique_by(|&(_a, b)| b).collect(); - let removed_el = unique_std_vec - .pop() - .expect("length is non-zero, first element must exist"); - unique_std_vec.sort_by_key(|&(_a, b)| b); - let ink_vec = StorageVec::from_iter(unique_std_vec.clone()); - - // when - let std_err_index = unique_std_vec - .binary_search_by_key(&removed_el.1, |&(_a, b)| b) - .expect_err("element must not be found in `Vec`") as u32; - let ink_err_index = ink_vec - .binary_search_by_key(&removed_el.1, |&(_a, b)| b) - .expect_err("element must not be found in `StorageVec`"); - - // then - assert_eq!(std_err_index, ink_err_index); -} diff --git a/crates/storage/src/collections/vec/impls.rs b/crates/storage/src/collections/vec/impls.rs deleted file mode 100644 index 9b2864a44a..0000000000 --- a/crates/storage/src/collections/vec/impls.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of generic traits that are useful for the storage vector. - -use super::{ - Iter, - IterMut, - Vec as StorageVec, -}; -use crate::traits::PackedLayout; -use core::iter::{ - Extend, - FromIterator, -}; - -impl Drop for StorageVec -where - T: PackedLayout, -{ - fn drop(&mut self) { - self.clear_cells(); - } -} - -impl core::ops::Index for StorageVec -where - T: PackedLayout, -{ - type Output = T; - - fn index(&self, index: u32) -> &Self::Output { - match self.get(index) { - Some(value) => value, - None => { - panic!( - "index out of bounds: the len is {} but the index is {}", - self.len(), - index - ) - } - } - } -} - -impl core::ops::IndexMut for StorageVec -where - T: PackedLayout, -{ - fn index_mut(&mut self, index: u32) -> &mut Self::Output { - let len = self.len(); - match self.get_mut(index) { - Some(value) => value, - None => { - panic!( - "index out of bounds: the len is {} but the index is {}", - len, index - ) - } - } - } -} - -impl<'a, T: 'a> IntoIterator for &'a StorageVec -where - T: PackedLayout, -{ - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, T: 'a> IntoIterator for &'a mut StorageVec -where - T: PackedLayout, -{ - type Item = &'a mut T; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -impl Extend for StorageVec -where - T: PackedLayout, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for item in iter { - self.push(item) - } - } -} - -impl FromIterator for StorageVec -where - T: PackedLayout, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut vec = StorageVec::new(); - vec.extend(iter); - vec - } -} - -impl core::cmp::PartialEq for StorageVec -where - T: PartialEq + PackedLayout, -{ - fn eq(&self, other: &Self) -> bool { - if self.len() != other.len() { - return false - } - self.iter().zip(other.iter()).all(|(lhs, rhs)| lhs == rhs) - } -} - -impl core::cmp::Eq for StorageVec where T: Eq + PackedLayout {} diff --git a/crates/storage/src/collections/vec/iter.rs b/crates/storage/src/collections/vec/iter.rs deleted file mode 100644 index 481fb0d212..0000000000 --- a/crates/storage/src/collections/vec/iter.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - collections::extend_lifetime, - traits::PackedLayout, - Vec as StorageVec, -}; - -/// An iterator over shared references to the elements of a storage vector. -#[derive(Debug, Clone, Copy)] -pub struct Iter<'a, T> -where - T: PackedLayout, -{ - /// The storage vector to iterate over. - vec: &'a StorageVec, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> Iter<'a, T> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage vector. - pub(crate) fn new(vec: &'a StorageVec) -> Self { - Self { - vec, - begin: 0, - end: vec.len(), - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.end - self.begin - } -} - -impl<'a, T> Iterator for Iter<'a, T> -where - T: PackedLayout, -{ - type Item = &'a T; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } - - fn nth(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin + n >= self.end { - return None - } - let cur = self.begin + n; - self.begin += 1 + n; - self.vec.get(cur).expect("access is within bounds").into() - } -} - -impl<'a, T> ExactSizeIterator for Iter<'a, T> where T: PackedLayout {} - -impl<'a, T> DoubleEndedIterator for Iter<'a, T> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin >= self.end.saturating_sub(n) { - return None - } - self.end -= 1 + n; - self.vec - .get(self.end) - .expect("access is within bounds") - .into() - } -} - -/// An iterator over exclusive references to the elements of a storage vector. -#[derive(Debug)] -pub struct IterMut<'a, T> -where - T: PackedLayout, -{ - /// The storage vector to iterate over. - vec: &'a mut StorageVec, - /// The current begin of the iteration. - begin: u32, - /// The current end of the iteration. - end: u32, -} - -impl<'a, T> IterMut<'a, T> -where - T: PackedLayout, -{ - /// Creates a new iterator for the given storage vector. - pub(crate) fn new(vec: &'a mut StorageVec) -> Self { - let len = vec.len(); - Self { - vec, - begin: 0, - end: len, - } - } - - /// Returns the amount of remaining elements to yield by the iterator. - fn remaining(&self) -> u32 { - self.end - self.begin - } -} - -impl<'a, T> IterMut<'a, T> -where - T: PackedLayout, -{ - fn get_mut<'b>(&'b mut self, at: u32) -> Option<&'a mut T> { - self.vec.get_mut(at).map(|value| { - // SAFETY: We extend the lifetime of the reference here. - // - // This is safe because the iterator yields an exclusive - // reference to every element in the iterated vector - // just once and also there can be only one such iterator - // for the same vector at the same time which is - // guaranteed by the constructor of the iterator. - unsafe { extend_lifetime::<'b, 'a, T>(value) } - }) - } -} - -impl<'a, T> Iterator for IterMut<'a, T> -where - T: PackedLayout, -{ - type Item = &'a mut T; - - fn next(&mut self) -> Option { - ::nth(self, 0) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.remaining() as usize; - (remaining, Some(remaining)) - } - - fn count(self) -> usize { - self.remaining() as usize - } - - fn nth(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin + n >= self.end { - return None - } - let cur = self.begin + n; - self.begin += 1 + n; - self.get_mut(cur).expect("access is within bounds").into() - } -} - -impl<'a, T> ExactSizeIterator for IterMut<'a, T> where T: PackedLayout {} - -impl<'a, T> DoubleEndedIterator for IterMut<'a, T> -where - T: PackedLayout, -{ - fn next_back(&mut self) -> Option { - ::nth_back(self, 0) - } - - fn nth_back(&mut self, n: usize) -> Option { - debug_assert!(self.begin <= self.end); - let n = n as u32; - if self.begin >= self.end.saturating_sub(n) { - return None - } - self.end -= 1 + n; - self.get_mut(self.end) - .expect("access is within bounds") - .into() - } -} diff --git a/crates/storage/src/collections/vec/mod.rs b/crates/storage/src/collections/vec/mod.rs deleted file mode 100644 index 4626a9f3fc..0000000000 --- a/crates/storage/src/collections/vec/mod.rs +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A storage vector used to store elements in a contiguous sequenced order. -//! -//! This is by default the go-to collection for most smart contracts if there -//! are no special requirements to the storage data structure. - -mod impls; -mod iter; -mod storage; - -#[cfg(test)] -mod tests; - -#[cfg(all(test, feature = "ink-fuzz-tests"))] -mod fuzz_tests; - -pub use self::iter::{ - Iter, - IterMut, -}; -use crate::{ - lazy::{ - Lazy, - LazyIndexMap, - }, - traits::PackedLayout, -}; - -/// A contiguous growable array type, written `Vec` but pronounced "vector". -/// -/// # Note -/// -/// Despite the similarity to Rust's `Vec` type this storage `Vec` has many -/// differences in its internal data layout. While it stores its data in contiguous -/// storage slots this does not mean that the data is actually densely stored -/// in memory. -/// -/// Also its technical performance characteristics may be different from Rust's -/// `Vec` due to the differences stated above. -/// -/// Allows to store up to `2^32` elements and is guaranteed to not reallocate -/// upon pushing new elements to it. -#[derive(Debug)] -pub struct Vec -where - T: PackedLayout, -{ - /// The length of the vector. - len: Lazy, - /// The synchronized cells to operate on the contract storage. - elems: LazyIndexMap, -} - -/// The index is out of the bounds of this vector. -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct IndexOutOfBounds; - -impl Default for Vec -where - T: PackedLayout, -{ - fn default() -> Self { - Self::new() - } -} - -impl Vec -where - T: PackedLayout, -{ - /// Creates a new empty storage vector. - pub fn new() -> Self { - Self { - len: Lazy::new(0), - elems: LazyIndexMap::new(), - } - } - - /// Returns the number of elements in the vector, also referred to as its length. - pub fn len(&self) -> u32 { - *self.len - } - - /// Returns `true` if the vector contains no elements. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -impl Vec -where - T: PackedLayout, -{ - /// Clears the underlying storage cells of the storage vector. - /// - /// # Note - /// - /// This completely invalidates the storage vector's invariants about - /// the contents of its associated storage region. - /// - /// This API is used for the `Drop` implementation of [`Vec`] as well as - /// for the [`SpreadLayout::clear_spread`][`crate::traits::SpreadLayout::clear_spread`] - /// trait implementation. - fn clear_cells(&self) { - if self.elems.key().is_none() { - // We won't clear any storage if we are in lazy state since there - // probably has not been any state written to storage, yet. - return - } - for index in 0..self.len() { - self.elems.clear_packed_at(index); - } - } -} - -impl Vec -where - T: PackedLayout, -{ - /// Returns an iterator yielding shared references to all elements of the vector. - /// - /// # Note - /// - /// Avoid unbounded iteration over big storage vectors. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter(&self) -> Iter { - Iter::new(self) - } - - /// Returns an iterator yielding exclusive references to all elements of the vector. - /// - /// # Note - /// - /// Avoid unbounded iteration over big storage vectors. - /// Prefer using methods like `Iterator::take` in order to limit the number - /// of yielded elements. - pub fn iter_mut(&mut self) -> IterMut { - IterMut::new(self) - } - - /// Returns the index if it is within bounds or `None` otherwise. - fn within_bounds(&self, index: u32) -> Option { - if index < self.len() { - return Some(index) - } - None - } - - /// Returns a shared reference to the first element if any. - pub fn first(&self) -> Option<&T> { - if self.is_empty() { - return None - } - self.get(0) - } - - /// Returns a shared reference to the last element if any. - pub fn last(&self) -> Option<&T> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - self.get(last_index) - } - - /// Returns a shared reference to the indexed element. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get(&self, index: u32) -> Option<&T> { - self.within_bounds(index) - .and_then(|index| self.elems.get(index)) - } -} - -impl Vec -where - T: PackedLayout, -{ - /// Appends an element to the back of the vector. - pub fn push(&mut self, value: T) { - assert!( - self.len() < core::u32::MAX, - "cannot push more elements into the storage vector" - ); - let last_index = self.len(); - *self.len += 1; - self.elems.put(last_index, Some(value)); - } - - /// Binary searches this sorted vector for a given element. - /// - /// If the value is found then [`Result::Ok`] is returned, containing the - /// index of the matching element. If there are multiple matches, then any - /// one of the matches could be returned. If the value is not found then - /// [`Result::Err`] is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// See also [`binary_search_by`], [`binary_search_by_key`]. - /// - /// [`binary_search_by`]: Vec::binary_search_by - /// [`binary_search_by_key`]: Vec::binary_search_by_key - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ```ignore - /// # // Tracking issue [#1119]: We currently ignore this test since we stopped exposing - /// # // `StorageVec` publicly. - /// use ink_storage::Vec as StorageVec; - /// - /// let s: StorageVec = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] - /// .into_iter() - /// .collect(); - /// - /// assert_eq!(s.binary_search(&13), Ok(9)); - /// assert_eq!(s.binary_search(&4), Err(7)); - /// assert_eq!(s.binary_search(&100), Err(13)); - /// let r = s.binary_search(&1); - /// assert!(match r { Ok(1..=4) => true, _ => false, }); - /// ``` - #[inline] - pub fn binary_search(&self, x: &T) -> Result - where - T: Ord, - { - self.binary_search_by(|p| p.cmp(x)) - } - - /// Binary searches this sorted vector with a comparator function. - /// - /// The comparator function should implement an order consistent - /// with the sort order of the underlying vector, returning an - /// order code that indicates whether its argument is `Less`, - /// `Equal` or `Greater` the desired target. - /// - /// If the value is found then [`Result::Ok`] is returned, containing the - /// index of the matching element. If there are multiple matches, then any - /// one of the matches could be returned. If the value is not found then - /// [`Result::Err`] is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// See also [`binary_search`], [`binary_search_by_key`]. - /// - /// [`binary_search`]: Vec::binary_search - /// [`binary_search_by_key`]: Vec::binary_search_by_key - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ```ignore - /// # // Tracking issue [#1119]: We currently ignore this test since we stopped exposing - /// # // `StorageVec` publicly. - /// use ink_storage::Vec as StorageVec; - /// - /// let s: StorageVec = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] - /// .into_iter() - /// .collect(); - /// - /// let seek = 13; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9)); - /// let seek = 4; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7)); - /// let seek = 100; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13)); - /// let seek = 1; - /// let r = s.binary_search_by(|probe| probe.cmp(&seek)); - /// assert!(match r { Ok(1..=4) => true, _ => false, }); - /// ``` - // The binary_search implementation is ported from - // https://github.com/rust-lang/rust/blob/c5e344f7747dbd7e7d4b209e3c480deb5979a56f/library/core/src/slice/mod.rs#L2191 - // and attempts to remain as close to the source as possible. - #[inline] - pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result - where - F: FnMut(&'a T) -> core::cmp::Ordering, - { - use core::cmp::Ordering::*; - - let mut size = self.len(); - let mut left = 0; - let mut right = size; - while left < right { - let mid = left + size / 2; - - // the call is made safe by the following invariants: - // - `mid >= 0` - // - `mid < size`: `mid` is limited by `[left; right)` bound. - let cmp = f(&self[mid]); - - // The reason why we use if/else control flow rather than match - // is because match reorders comparison operations, which is perf sensitive. - if cmp == Less { - left = mid + 1; - } else if cmp == Greater { - right = mid; - } else { - return Ok(mid) - } - - size = right - left; - } - Err(left) - } - - /// Binary searches this sorted vector with a key extraction function. - /// - /// If the value is found then [`Result::Ok`] is returned, containing the - /// index of the matching element. If there are multiple matches, then any - /// one of the matches could be returned. If the value is not found then - /// [`Result::Err`] is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// See also [`binary_search`], [`binary_search_by`]. - /// - /// [`binary_search`]: Vec::binary_search - /// [`binary_search_by`]: Vec::binary_search_by - /// - /// # Examples - /// - /// Looks up a series of four elements in a vector of pairs sorted by - /// their second elements. The first is found, with a uniquely - /// determined position; the second and third are not found; the - /// fourth could match any position in `[1, 4]`. - /// - /// ```ignore - /// # // Tracking issue [#1119]: We currently ignore this test since we stopped exposing - /// # // `StorageVec` publicly. - /// use ink_storage::Vec as StorageVec; - /// - /// let s: StorageVec<(i32, i32)> = [ - /// (0, 0), - /// (2, 1), - /// (4, 1), - /// (5, 1), - /// (3, 1), - /// (1, 2), - /// (2, 3), - /// (4, 5), - /// (5, 8), - /// (3, 13), - /// (1, 21), - /// (2, 34), - /// (4, 55), - /// ] - /// .into_iter() - /// .collect(); - /// - /// assert_eq!(s.binary_search_by_key(&13, |&(a, b)| b), Ok(9)); - /// assert_eq!(s.binary_search_by_key(&4, |&(a, b)| b), Err(7)); - /// assert_eq!(s.binary_search_by_key(&100, |&(a, b)| b), Err(13)); - /// let r = s.binary_search_by_key(&1, |&(a, b)| b); - /// assert!(match r { Ok(1..=4) => true, _ => false, }); - /// ``` - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result - where - F: FnMut(&'a T) -> B, - B: Ord, - { - self.binary_search_by(|k| f(k).cmp(b)) - } -} - -impl Vec -where - T: PackedLayout, -{ - /// Pops the last element from the vector and returns it. - // - /// Returns `None` if the vector is empty. - pub fn pop(&mut self) -> Option { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - *self.len = last_index; - self.elems.put_get(last_index, None) - } - - /// Pops the last element from the vector and immediately drops it. - /// - /// Returns `Some(())` if an element has been removed and `None` otherwise. - /// - /// # Note - /// - /// This operation is a bit more efficient than [`Vec::pop`] - /// since it avoids reading from contract storage in some use cases. - pub fn pop_drop(&mut self) -> Option<()> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - *self.len = last_index; - self.elems.put(last_index, None); - Some(()) - } - - /// Returns an exclusive reference to the first element if any. - pub fn first_mut(&mut self) -> Option<&mut T> { - if self.is_empty() { - return None - } - self.get_mut(0) - } - - /// Returns an exclusive reference to the last element if any. - pub fn last_mut(&mut self) -> Option<&mut T> { - if self.is_empty() { - return None - } - let last_index = self.len() - 1; - self.get_mut(last_index) - } - - /// Returns an exclusive reference to the indexed element. - /// - /// Returns `None` if `index` is out of bounds. - pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { - self.within_bounds(index) - .and_then(move |index| self.elems.get_mut(index)) - } - - /// Swaps the elements at the given indices. - /// - /// # Panics - /// - /// If one or both indices are out of bounds. - pub fn swap(&mut self, a: u32, b: u32) { - assert!( - a < self.len() && b < self.len(), - "indices are out of bounds" - ); - self.elems.swap(a, b) - } - - /// Removes the indexed element from the vector and returns it. - /// - /// The last element of the vector is put into the indexed slot. - /// Returns `None` and does not mutate the vector if the index is out of bounds. - /// - /// # Note - /// - /// This operation does not preserve ordering but is constant time. - pub fn swap_remove(&mut self, n: u32) -> Option { - if self.is_empty() { - return None - } - self.elems.swap(n, self.len() - 1); - self.pop() - } - - /// Removes the indexed element from the vector. - /// - /// The last element of the vector is put into the indexed slot. - /// Returns `Some(())` if an element has been removed and `None` otherwise. - /// - /// # Note - /// - /// This operation should be preferred over [`Vec::swap_remove`] if there is - /// no need to return the removed element since it avoids a contract storage - /// read for some use cases. - pub fn swap_remove_drop(&mut self, n: u32) -> Option<()> { - if self.is_empty() { - return None - } - self.elems.put(n, None); - let last_index = self.len() - 1; - let last = self.elems.put_get(last_index, None); - self.elems.put(n, last); - *self.len = last_index; - Some(()) - } - - /// Sets the elements at the given index to the new value. - /// - /// Won't return the old element back to the caller. - /// Prefer this operation over other method of overriding an element - /// in the storage vector since this is more efficient. - #[inline] - pub fn set(&mut self, index: u32, new_value: T) -> Result<(), IndexOutOfBounds> { - if self.within_bounds(index).is_none() { - return Err(IndexOutOfBounds) - } - self.elems.put(index, Some(new_value)); - Ok(()) - } - - /// Removes all elements from this vector. - /// - /// # Note - /// - /// Use this method to clear the vector instead of e.g. iterative `pop()`. - /// This method performs significantly better and does not actually read - /// any of the elements (whereas `pop()` does). - pub fn clear(&mut self) { - if self.is_empty() { - return - } - for index in 0..self.len() { - self.elems.put(index, None); - } - *self.len = 0; - } -} diff --git a/crates/storage/src/collections/vec/storage.rs b/crates/storage/src/collections/vec/storage.rs deleted file mode 100644 index fb6de91a0b..0000000000 --- a/crates/storage/src/collections/vec/storage.rs +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of ink! storage traits. - -use super::Vec as StorageVec; -use crate::{ - lazy::LazyIndexMap, - traits::{ - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, - }, -}; - -#[cfg(feature = "std")] -const _: () = { - use crate::{ - lazy::Lazy, - traits::StorageLayout, - }; - use ink_metadata::layout::{ - FieldLayout, - Layout, - StructLayout, - }; - use scale_info::TypeInfo; - - impl StorageLayout for StorageVec - where - T: PackedLayout + TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Struct(StructLayout::new([ - FieldLayout::new("len", as StorageLayout>::layout(key_ptr)), - FieldLayout::new( - "elems", - as StorageLayout>::layout(key_ptr), - ), - ])) - } - } -}; - -impl SpreadLayout for StorageVec -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = 1 + as SpreadLayout>::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadLayout::pull_spread(ptr), - elems: SpreadLayout::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.len, ptr); - SpreadLayout::push_spread(&self.elems, ptr); - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - self.clear_cells(); - SpreadLayout::clear_spread(&self.len, ptr); - SpreadLayout::clear_spread(&self.elems, ptr); - } -} - -impl SpreadAllocate for StorageVec -where - T: PackedLayout, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - len: SpreadAllocate::allocate_spread(ptr), - elems: SpreadAllocate::allocate_spread(ptr), - } - } -} diff --git a/crates/storage/src/collections/vec/tests.rs b/crates/storage/src/collections/vec/tests.rs deleted file mode 100644 index a4ccee76d9..0000000000 --- a/crates/storage/src/collections/vec/tests.rs +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -use super::Vec as StorageVec; -use crate::{ - collections::vec::IndexOutOfBounds, - traits::{ - KeyPtr, - PackedLayout, - SpreadLayout, - }, - Lazy, -}; -use core::cmp::Ordering; -use ink_primitives::Key; - -#[test] -fn new_vec_works() { - // `StorageVec::new` - let vec = >::new(); - assert!(vec.is_empty()); - assert_eq!(vec.len(), 0); - assert_eq!(vec.get(0), None); - assert!(vec.iter().next().is_none()); - // `StorageVec::default` - let default = as Default>::default(); - assert!(default.is_empty()); - assert_eq!(default.len(), 0); - assert_eq!(vec.get(0), None); - assert!(default.iter().next().is_none()); - // `StorageVec::new` and `StorageVec::default` should be equal. - assert_eq!(vec, default); -} - -#[test] -fn from_iterator_works() { - let some_primes = [1, 2, 3, 5, 7, 11, 13]; - assert_eq!(some_primes.iter().copied().collect::>(), { - let mut vec = StorageVec::new(); - for prime in &some_primes { - vec.push(*prime) - } - vec - }); -} - -#[test] -fn from_empty_iterator_works() { - assert_eq!( - [].iter().copied().collect::>(), - StorageVec::new(), - ); -} - -#[test] -fn first_last_of_empty() { - let mut vec = >::new(); - assert_eq!(vec.first(), None); - assert_eq!(vec.first_mut(), None); - assert_eq!(vec.last(), None); - assert_eq!(vec.last_mut(), None); -} - -#[test] -fn push_pop_first_last_works() { - /// Asserts conditions are met for the given storage vector. - fn assert_vec(vec: &StorageVec, len: u32, first: F, last: L) - where - F: Into>, - L: Into>, - { - assert_eq!(vec.is_empty(), len == 0); - assert_eq!(vec.len(), len); - assert_eq!(vec.first().copied(), first.into()); - assert_eq!(vec.last().copied(), last.into()); - } - - let mut vec = StorageVec::new(); - assert_vec(&vec, 0, None, None); - - // Sequence of `push` - vec.push(b'a'); - assert_vec(&vec, 1, b'a', b'a'); - vec.push(b'b'); - assert_vec(&vec, 2, b'a', b'b'); - vec.push(b'c'); - assert_vec(&vec, 3, b'a', b'c'); - vec.push(b'd'); - assert_vec(&vec, 4, b'a', b'd'); - - // Sequence of `pop` - assert_eq!(vec.pop(), Some(b'd')); - assert_vec(&vec, 3, b'a', b'c'); - assert_eq!(vec.pop(), Some(b'c')); - assert_vec(&vec, 2, b'a', b'b'); - assert_eq!(vec.pop(), Some(b'b')); - assert_vec(&vec, 1, b'a', b'a'); - assert_eq!(vec.pop(), Some(b'a')); - assert_vec(&vec, 0, None, None); - - // Pop from empty vector. - assert_eq!(vec.pop(), None); - assert_vec(&vec, 0, None, None); -} - -#[test] -fn pop_drop_works() { - let elems = [b'a', b'b', b'c', b'd']; - let mut vec = vec_from_slice(&elems); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..3]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..2]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &elems[0..1]); - assert_eq!(vec.pop_drop(), Some(())); - assert_eq_slice(&vec, &[]); - assert_eq!(vec.pop_drop(), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn get_works() { - let elems = [b'a', b'b', b'c', b'd']; - let mut vec = vec_from_slice(&elems); - for (n, mut expected) in elems.iter().copied().enumerate() { - let n = n as u32; - assert_eq!(vec.get(n), Some(&expected)); - assert_eq!(vec.get_mut(n), Some(&mut expected)); - assert_eq!(&vec[n], &expected); - assert_eq!(&mut vec[n], &mut expected); - } - let len = vec.len(); - assert_eq!(vec.get(len), None); - assert_eq!(vec.get_mut(len), None); -} - -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let vec = vec_from_slice(&test_values); - let _ = &vec[test_values.len() as u32]; -} - -#[test] -#[should_panic(expected = "index out of bounds: the len is 3 but the index is 3")] -fn index_mut_out_of_bounds_works() { - let test_values = [b'a', b'b', b'c']; - let mut vec = vec_from_slice(&test_values); - let _ = &mut vec[test_values.len() as u32]; -} - -#[test] -fn iter_next_works() { - let elems = [b'a', b'b', b'c', b'd']; - let vec = vec_from_slice(&elems); - // Test iterator over `&T`: - let mut iter = vec.iter(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some(&b'a')); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some(&b'b')); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.count(), 2); - assert_eq!(iter.next(), Some(&b'c')); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some(&b'd')); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over `&mut T`: - let mut vec = vec; - let mut iter = vec.iter_mut(); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.next(), Some(&mut b'a')); - assert_eq!(iter.size_hint(), (3, Some(3))); - assert_eq!(iter.next(), Some(&mut b'b')); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.next(), Some(&mut b'c')); - assert_eq!(iter.size_hint(), (1, Some(1))); - assert_eq!(iter.next(), Some(&mut b'd')); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_nth_works() { - let elems = [b'a', b'b', b'c', b'd']; - let vec = vec_from_slice(&elems); - // Test iterator over `&T`: - let mut iter = vec.iter(); - assert_eq!(iter.count(), 4); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.nth(1), Some(&b'b')); - assert_eq!(iter.count(), 2); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.nth(1), Some(&b'd')); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.count(), 0); - assert_eq!(iter.nth(1), None); - // Test iterator over `&mut T`: - let mut vec = vec; - let mut iter = vec.iter_mut(); - assert_eq!(iter.size_hint(), (4, Some(4))); - assert_eq!(iter.nth(1), Some(&mut b'b')); - assert_eq!(iter.size_hint(), (2, Some(2))); - assert_eq!(iter.nth(1), Some(&mut b'd')); - assert_eq!(iter.size_hint(), (0, Some(0))); - assert_eq!(iter.nth(1), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_next_back_works() { - let elems = [b'a', b'b', b'c', b'd']; - let vec = vec_from_slice(&elems); - // Test iterator over `&T`: - let mut iter = vec.iter().rev(); - assert_eq!(iter.clone().count(), 4); - assert_eq!(iter.next(), Some(&b'd')); - assert_eq!(iter.next(), Some(&b'c')); - assert_eq!(iter.clone().count(), 2); - assert_eq!(iter.next(), Some(&b'b')); - assert_eq!(iter.next(), Some(&b'a')); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.next(), None); - // Test iterator over `&mut T`: - let mut vec = vec; - let mut iter = vec.iter_mut().rev(); - assert_eq!(iter.next(), Some(&mut b'd')); - assert_eq!(iter.next(), Some(&mut b'c')); - assert_eq!(iter.next(), Some(&mut b'b')); - assert_eq!(iter.next(), Some(&mut b'a')); - assert_eq!(iter.next(), None); - assert_eq!(iter.count(), 0); -} - -#[test] -fn iter_nth_back_works() { - let elems = [b'a', b'b', b'c', b'd']; - let vec = vec_from_slice(&elems); - // Test iterator over `&T`: - let mut iter = vec.iter().rev(); - assert_eq!(iter.clone().count(), 4); - assert_eq!(iter.nth(1), Some(&b'c')); - assert_eq!(iter.clone().count(), 2); - assert_eq!(iter.nth(1), Some(&b'a')); - assert_eq!(iter.clone().count(), 0); - assert_eq!(iter.nth(1), None); - // Test iterator over `&mut T`: - let mut vec = vec; - let mut iter = vec.iter_mut().rev(); - assert_eq!(iter.nth(1), Some(&mut b'c')); - assert_eq!(iter.nth(1), Some(&mut b'a')); - assert_eq!(iter.nth(1), None); - assert_eq!(iter.count(), 0); -} - -/// Asserts that the given ordered storage vector elements are equal to the -/// ordered elements of the given slice. -fn assert_eq_slice(vec: &StorageVec, slice: &[u8]) { - assert_eq!(vec.len() as usize, slice.len()); - assert!(vec.iter().zip(slice.iter()).all(|(lhs, rhs)| *lhs == *rhs)) -} - -/// Creates a storage vector from the given slice. -fn vec_from_slice(slice: &[T]) -> StorageVec { - slice.iter().copied().collect::>() -} - -#[test] -fn swap_works() { - let elems = [b'a', b'b', b'c', b'd']; - let mut vec = vec_from_slice(&elems); - - // Swap at same position is a no-op. - for index in 0..elems.len() as u32 { - vec.swap(index, index); - assert_eq_slice(&vec, &elems); - } - - // Swap first and second - vec.swap(0, 1); - assert_eq_slice(&vec, &[b'b', b'a', b'c', b'd']); - // Swap third and last - vec.swap(2, 3); - assert_eq_slice(&vec, &[b'b', b'a', b'd', b'c']); - // Swap first and last - vec.swap(0, 3); - assert_eq_slice(&vec, &[b'c', b'a', b'd', b'b']); -} - -#[test] -#[should_panic] -fn swap_one_invalid_index() { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - vec.swap(0, vec.len()); -} - -#[test] -#[should_panic] -fn swap_both_invalid_indices() { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - vec.swap(vec.len(), vec.len()); -} - -#[test] -fn swap_remove_works() { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - - // Swap remove first element. - assert_eq!(vec.swap_remove(0), Some(b'a')); - assert_eq_slice(&vec, &[b'd', b'b', b'c']); - // Swap remove middle element. - assert_eq!(vec.swap_remove(1), Some(b'b')); - assert_eq_slice(&vec, &[b'd', b'c']); - // Swap remove last element. - assert_eq!(vec.swap_remove(1), Some(b'c')); - assert_eq_slice(&vec, &[b'd']); - // Swap remove only element. - assert_eq!(vec.swap_remove(0), Some(b'd')); - assert_eq_slice(&vec, &[]); - // Swap remove from empty vector. - assert_eq!(vec.swap_remove(0), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn swap_remove_drop_works() { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - - // Swap remove first element. - assert_eq!(vec.swap_remove_drop(0), Some(())); - assert_eq_slice(&vec, &[b'd', b'b', b'c']); - // Swap remove middle element. - assert_eq!(vec.swap_remove_drop(1), Some(())); - assert_eq_slice(&vec, &[b'd', b'c']); - // Swap remove last element. - assert_eq!(vec.swap_remove_drop(1), Some(())); - assert_eq_slice(&vec, &[b'd']); - // Swap remove only element. - assert_eq!(vec.swap_remove_drop(0), Some(())); - assert_eq_slice(&vec, &[]); - // Swap remove from empty vector. - assert_eq!(vec.swap_remove_drop(0), None); - assert_eq_slice(&vec, &[]); -} - -#[test] -fn spread_layout_push_pull_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); - // Load the pushed storage vector into another instance and check that - // both instances are equal: - let vec2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(vec1, vec2); - Ok(()) - }) -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn spread_layout_clear_works() { - ink_env::test::run_test::(|_| { - let vec1 = vec_from_slice(&[b'a', b'b', b'c', b'd']); - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&vec1, &mut KeyPtr::from(root_key)); - // It has already been asserted that a valid instance can be pulled - // from contract storage after a push to the same storage region. - // - // Now clear the associated storage from `vec1` and check whether - // loading another instance from this storage will panic since the - // vector's length property cannot read a value: - SpreadLayout::clear_spread(&vec1, &mut KeyPtr::from(root_key)); - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} - -#[test] -fn set_works() { - ink_env::test::run_test::(|_| { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - vec.set(0, b'x').unwrap(); - let expected = vec_from_slice(&[b'x', b'b', b'c', b'd']); - assert_eq!(vec, expected); - Ok(()) - }) - .unwrap() -} - -#[test] -fn set_fails_when_index_oob() { - let mut vec = vec_from_slice(&[b'a']); - let res = vec.set(1, b'x'); - assert_eq!(res, Err(IndexOutOfBounds)); -} - -#[test] -fn clear_works_on_filled_vec() { - let mut vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - vec.clear(); - assert!(vec.is_empty()); -} - -#[test] -fn clear_works_on_empty_vec() { - let mut vec: StorageVec<()> = vec_from_slice(&[]); - vec.clear(); - assert!(vec.is_empty()); -} - -#[test] -fn test_binary_search() { - let b: StorageVec = StorageVec::new(); - assert_eq!(b.binary_search(&5), Err(0)); - - let b = vec_from_slice(&[4]); - assert_eq!(b.binary_search(&3), Err(0)); - assert_eq!(b.binary_search(&4), Ok(0)); - assert_eq!(b.binary_search(&5), Err(1)); - - let b = vec_from_slice(&[1, 2, 4, 6, 8, 9]); - assert_eq!(b.binary_search(&5), Err(3)); - assert_eq!(b.binary_search(&6), Ok(3)); - assert_eq!(b.binary_search(&7), Err(4)); - assert_eq!(b.binary_search(&8), Ok(4)); - - let b = vec_from_slice(&[1, 2, 4, 5, 6, 8]); - assert_eq!(b.binary_search(&9), Err(6)); - - let b = vec_from_slice(&[1, 2, 4, 6, 7, 8, 9]); - assert_eq!(b.binary_search(&6), Ok(3)); - assert_eq!(b.binary_search(&5), Err(3)); - assert_eq!(b.binary_search(&8), Ok(5)); - - let b = vec_from_slice(&[1, 2, 4, 5, 6, 8, 9]); - assert_eq!(b.binary_search(&7), Err(5)); - assert_eq!(b.binary_search(&0), Err(0)); - - let b = vec_from_slice(&[1, 3, 3, 3, 7]); - assert_eq!(b.binary_search(&0), Err(0)); - assert_eq!(b.binary_search(&1), Ok(0)); - assert_eq!(b.binary_search(&2), Err(1)); - matches!(b.binary_search(&3), Ok(1..=3)); - assert_eq!(b.binary_search(&4), Err(4)); - assert_eq!(b.binary_search(&5), Err(4)); - assert_eq!(b.binary_search(&6), Err(4)); - assert_eq!(b.binary_search(&7), Ok(4)); - assert_eq!(b.binary_search(&8), Err(5)); - - let b = vec_from_slice(&[(); u8::MAX as usize]); - assert_eq!(b.binary_search(&()), Ok(u8::MAX as u32 / 2)); -} - -#[test] -fn test_binary_search_by_overflow() { - let b = vec_from_slice(&[(); u8::MAX as usize]); - assert_eq!( - b.binary_search_by(|_| Ordering::Equal), - Ok(u8::MAX as u32 / 2) - ); - assert_eq!(b.binary_search_by(|_| Ordering::Greater), Err(0)); - assert_eq!(b.binary_search_by(|_| Ordering::Less), Err(u8::MAX as u32)); -} - -#[test] -// Test implementation specific behavior when finding equivalent elements. -fn test_binary_search_implementation_details() { - let b = vec_from_slice(&[1, 1, 2, 2, 3, 3, 3]); - assert_eq!(b.binary_search(&1), Ok(1)); - assert_eq!(b.binary_search(&2), Ok(3)); - assert_eq!(b.binary_search(&3), Ok(5)); - let b = vec_from_slice(&[1, 1, 1, 1, 1, 3, 3, 3, 3]); - assert_eq!(b.binary_search(&1), Ok(4)); - assert_eq!(b.binary_search(&3), Ok(7)); - let b = vec_from_slice(&[1, 1, 1, 1, 3, 3, 3, 3, 3]); - assert_eq!(b.binary_search(&1), Ok(2)); - assert_eq!(b.binary_search(&3), Ok(4)); -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn storage_is_cleared_completely_after_pull_lazy() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - let mut lazy_vec: Lazy> = Lazy::new(StorageVec::new()); - lazy_vec.push(13u32); - lazy_vec.push(13u32); - SpreadLayout::push_spread(&lazy_vec, &mut KeyPtr::from(root_key)); - let pulled_vec = > as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - - // when - SpreadLayout::clear_spread(&pulled_vec, &mut KeyPtr::from(root_key)); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - let _ = - *> as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - - Ok(()) - }) - .unwrap() -} - -#[test] -#[should_panic(expected = "encountered empty storage cell")] -fn drop_works() { - ink_env::test::run_test::(|_| { - let root_key = Key::from([0x42; 32]); - - // if the setup panics it should not cause the test to pass - let setup_result = std::panic::catch_unwind(|| { - let vec = vec_from_slice(&[b'a', b'b', b'c', b'd']); - SpreadLayout::push_spread(&vec, &mut KeyPtr::from(root_key)); - let _ = as SpreadLayout>::pull_spread(&mut KeyPtr::from( - root_key, - )); - // vec is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - - let _ = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() -} diff --git a/crates/storage/src/hashmap_entry_api_tests.rs b/crates/storage/src/hashmap_entry_api_tests.rs deleted file mode 100644 index 2e4776629a..0000000000 --- a/crates/storage/src/hashmap_entry_api_tests.rs +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::traits::{ - KeyPtr, - SpreadLayout, -}; -use ink_primitives::Key; - -#[cfg(test)] -macro_rules! gen_tests_for_backend { - ( $backend:ty ) => { - /// Returns some test values. - fn test_values() -> [(u8, i32); 2] { - [(b'A', 13), (b'B', 23)] - } - - /// Returns a prefilled hashmap with `[('A', 13), ['B', 23])`. - fn prefilled_hmap() -> $backend { - test_values().iter().copied().collect::<$backend>() - } - - /// Returns always the same `KeyPtr`. - fn key_ptr() -> KeyPtr { - let root_key = Key::from([0x42; 32]); - KeyPtr::from(root_key) - } - - /// Pushes a `HashMap` instance into the contract storage. - fn push_hmap(hmap: &$backend) { - SpreadLayout::push_spread(hmap, &mut key_ptr()); - } - - /// Pulls a `HashMap` instance from the contract storage. - fn pull_hmap() -> $backend { - <$backend as SpreadLayout>::pull_spread(&mut key_ptr()) - } - - fn push_pull_prefilled_hmap() -> $backend { - let hmap1 = prefilled_hmap(); - assert_eq!(hmap1.get(&b'A'), Some(&13)); - push_hmap(&hmap1); - pull_hmap() - } - - #[test] - fn insert_inexistent_works_with_empty() { - // given - let mut hmap = <$backend>::new(); - assert!(matches!(hmap.entry(b'A'), Vacant(_))); - assert!(hmap.get(&b'A').is_none()); - - // when - assert_eq!(*hmap.entry(b'A').or_insert(77), 77); - - // then - assert_eq!(hmap.get(&b'A'), Some(&77)); - assert_eq!(hmap.len_cached_entries(), 1); - } - - #[test] - fn insert_existent_works() { - // given - let mut hmap = prefilled_hmap(); - match hmap.entry(b'A') { - Vacant(_) => panic!(), - Occupied(o) => assert_eq!(o.get(), &13), - } - - // when - hmap.entry(b'A').or_insert(77); - - // then - assert_eq!(hmap.get(&b'A'), Some(&13)); - assert_eq!(hmap.len_cached_entries(), 2); - } - - #[test] - fn mutations_work_with_push_pull() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let hmap1 = prefilled_hmap(); - assert_eq!(hmap1.get(&b'A'), Some(&13)); - push_hmap(&hmap1); - - let mut hmap2 = pull_hmap(); - assert_eq!(hmap2.get(&b'A'), Some(&13)); - - // when - let v = hmap2.entry(b'A').or_insert(42); - *v += 1; - assert_eq!(hmap2.get(&b'A'), Some(&14)); - push_hmap(&hmap2); - - // then - let hmap3 = pull_hmap(); - assert_eq!(hmap3.get(&b'A'), Some(&14)); - Ok(()) - }) - } - - #[test] - fn simple_insert_with_works() { - // given - let mut hmap = prefilled_hmap(); - - // when - assert!(hmap.get(&b'C').is_none()); - let v = hmap.entry(b'C').or_insert_with(|| 42); - - // then - assert_eq!(*v, 42); - assert_eq!(hmap.get(&b'C'), Some(&42)); - assert_eq!(hmap.len_cached_entries(), 3); - } - - #[test] - fn simple_default_insert_works() { - // given - let mut hmap = <$backend>::new(); - - // when - let v = hmap.entry(b'A').or_default(); - - // then - assert_eq!(*v, 0); - assert_eq!(hmap.get(&b'A'), Some(&0)); - } - - #[test] - fn insert_with_works_with_mutations() { - // given - let mut hmap = <$backend>::new(); - let v = hmap.entry(b'A').or_insert_with(|| 42); - assert_eq!(*v, 42); - - // when - *v += 1; - - // then - assert_eq!(hmap.get(&b'A'), Some(&43)); - assert_eq!(hmap.len_cached_entries(), 1); - } - - #[test] - fn insert_with_works_with_push_pull() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap1 = <$backend>::new(); - let value = hmap1.entry(b'A').or_insert_with(|| 42); - - // when - *value = 43; - push_hmap(&hmap1); - - // then - let hmap2 = pull_hmap(); - assert_eq!(hmap2.get(&b'A'), Some(&43)); - Ok(()) - }) - } - - #[test] - fn simple_insert_with_key_works() { - // given - let mut hmap = <$backend>::new(); - - // when - let _ = hmap.entry(b'A').or_insert_with_key(|key| (key * 2).into()); - - // then - assert_eq!(hmap.get(&b'A'), Some(&130)); - } - - #[test] - fn key_get_works_with_nonexistent() { - let mut hmap = <$backend>::new(); - assert_eq!(hmap.entry(b'A').key(), &b'A'); - } - - #[test] - fn key_get_works_with_existent() { - let mut hmap = prefilled_hmap(); - assert_eq!(hmap.entry(b'A').key(), &b'A'); - assert_eq!(hmap.entry(b'B').key(), &b'B'); - } - - #[test] - fn and_modify_has_no_effect_for_nonexistent() { - // given - let mut hmap = <$backend>::new(); - - // when - hmap.entry(b'B').and_modify(|e| *e += 1).or_insert(42); - - // then - assert_eq!(hmap.get(&b'B'), Some(&42)); - } - - #[test] - fn and_modify_works_for_existent() { - // given - let mut hmap = prefilled_hmap(); - - // when - assert_eq!(hmap.get(&b'B'), Some(&23)); - hmap.entry(b'B').and_modify(|e| *e += 1).or_insert(7); - - // then - assert_eq!(hmap.get(&b'B'), Some(&24)); - } - - #[test] - fn occupied_entry_api_works_with_push_pull() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap1 = prefilled_hmap(); - assert_eq!(hmap1.get(&b'A'), Some(&13)); - match hmap1.entry(b'A') { - Entry::Occupied(mut o) => { - assert_eq!(o.key(), &b'A'); - assert_eq!(o.insert(15), 13); - } - Entry::Vacant(_) => panic!(), - } - push_hmap(&hmap1); - - // when - let mut hmap2 = pull_hmap(); - assert_eq!(hmap2.get(&b'A'), Some(&15)); - match hmap2.entry(b'A') { - Entry::Occupied(o) => { - assert_eq!(o.remove_entry(), (b'A', 15)); - } - Entry::Vacant(_) => panic!(), - } - push_hmap(&hmap2); - - // then - let hmap3 = pull_hmap(); - assert_eq!(hmap3.get(&b'A'), None); - - Ok(()) - }) - } - - #[test] - fn vacant_api_works() { - let mut hmap = <$backend>::new(); - match hmap.entry(b'A') { - Entry::Occupied(_) => panic!(), - Entry::Vacant(v) => { - assert_eq!(v.key(), &b'A'); - assert_eq!(v.into_key(), b'A'); - } - } - } - - #[test] - fn vacant_api_works_with_push_pull() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap1 = <$backend>::new(); - match hmap1.entry(b'A') { - Entry::Occupied(_) => panic!(), - Entry::Vacant(v) => { - let val = v.insert(42); - *val += 1; - } - } - push_hmap(&hmap1); - - // when - let hmap2 = pull_hmap(); - - // then - assert_eq!(hmap2.get(&b'A'), Some(&43)); - Ok(()) - }) - } - - #[test] - fn pulling_occupied_entry_must_succeed() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let hmap1 = prefilled_hmap(); - push_hmap(&hmap1); - - // when - let mut hmap2 = pull_hmap(); - - // then - for (k, _v) in test_values().iter() { - match hmap2.entry(*k) { - Entry::Occupied(_) => (), - Entry::Vacant(_) => panic!("the entry must be occupied"), - } - } - Ok(()) - }) - } - - #[test] - fn value_not_in_cache_but_in_storage_get_and_get_mut() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap2 = push_pull_prefilled_hmap(); - - // then - match hmap2.entry(b'A') { - Entry::Occupied(mut o) => { - assert_eq!(o.get(), &13); - assert_eq!(o.get_mut(), &mut 13); - } - Entry::Vacant(_) => panic!(), - } - - Ok(()) - }) - } - - #[test] - fn value_not_in_cache_but_in_storage_insert() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap2 = push_pull_prefilled_hmap(); - - // then - match hmap2.entry(b'A') { - Entry::Occupied(mut o) => { - assert_eq!(o.insert(999), 13); - } - Entry::Vacant(_) => panic!(), - } - assert_eq!(hmap2.get(&b'A'), Some(&999)); - - Ok(()) - }) - } - - #[test] - fn value_not_in_cache_but_in_storage_remove_entry() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap2 = push_pull_prefilled_hmap(); - - // then - match hmap2.entry(b'A') { - Entry::Occupied(o) => { - assert_eq!(o.remove_entry(), (b'A', 13)); - assert_eq!(hmap2.get(&b'A'), None); - push_hmap(&hmap2); - } - Entry::Vacant(_) => panic!(), - } - - let hmap3 = pull_hmap(); - assert_eq!(hmap3.get(&b'A'), None); - Ok(()) - }) - } - - #[test] - fn value_not_in_cache_is_properly_flushed_after_insert() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap2 = push_pull_prefilled_hmap(); - - // when - match hmap2.entry(b'A') { - Entry::Occupied(mut o) => { - assert_eq!(o.insert(999), 13); - } - Entry::Vacant(_) => panic!(), - } - assert_eq!(hmap2.get(&b'A'), Some(&999)); - push_hmap(&hmap2); - - // then - // the value must have been flushed, which implies that after the - // insert is was marked as `Mutated`. - let hmap3 = pull_hmap(); - assert_eq!(hmap3.get(&b'A'), Some(&999)); - - Ok(()) - }) - } - - #[test] - fn value_not_in_cache_but_in_storage_into_mut() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - // given - let mut hmap2 = push_pull_prefilled_hmap(); - - // then - match hmap2.entry(b'A') { - Entry::Occupied(o) => assert_eq!(o.into_mut(), &mut 13), - Entry::Vacant(_) => panic!(), - } - - Ok(()) - }) - } - }; -} - -mod lazyhmap_backend { - use super::*; - use crate::lazy::lazy_hmap::{ - Entry, - Entry::{ - Occupied, - Vacant, - }, - LazyHashMap, - }; - use ink_env::hash::Blake2x256; - - gen_tests_for_backend!(LazyHashMap); -} - -mod hashmap_backend { - use super::*; - use crate::collections::hashmap::{ - Entry, - Entry::{ - Occupied, - Vacant, - }, - HashMap as StorageHashMap, - }; - use ink_env::hash::Blake2x256; - - gen_tests_for_backend!(StorageHashMap); -} diff --git a/crates/storage/src/lazy/cache_cell.rs b/crates/storage/src/lazy/cache_cell.rs deleted file mode 100644 index 50688bca05..0000000000 --- a/crates/storage/src/lazy/cache_cell.rs +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use core::{ - cell::UnsafeCell, - fmt, - fmt::Debug, - ptr::NonNull, -}; - -/// A cache for a `T` that allow to mutate the inner `T` through `&self`. -/// -/// Internally this is a thin wrapper around an `UnsafeCell`. -/// The main difference to `UnsafeCell` is that this type provides an out-of-the-box -/// API to safely access the inner `T` as well for single threaded contexts. -pub struct CacheCell { - /// The inner value that is allowed to be mutated in shared contexts. - inner: UnsafeCell, -} - -impl CacheCell { - /// Creates a new cache cell from the given value. - #[inline] - pub fn new(value: T) -> Self { - Self { - inner: UnsafeCell::new(value), - } - } - - /// Returns the inner value. - #[allow(dead_code)] - pub fn into_inner(self) -> T { - self.inner.into_inner() - } -} - -impl Debug for CacheCell -where - T: ?Sized + Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - ::fmt(self.as_inner(), f) - } -} - -impl From for CacheCell { - #[inline] - fn from(value: T) -> Self { - Self::new(value) - } -} - -impl Default for CacheCell -where - T: Default, -{ - #[inline] - fn default() -> Self { - Self::new(::default()) - } -} - -impl CacheCell -where - T: ?Sized, -{ - /// Returns a shared reference to the inner value. - #[inline] - pub fn as_inner(&self) -> &T { - // SAFETY: This is safe since we are returning a shared reference back - // to the caller while this method itself accesses `self` as - // shared reference. - unsafe { &*self.inner.get() } - } - - /// Returns an exclusive reference to the inner value. - #[inline] - pub fn as_inner_mut(&mut self) -> &mut T { - // SAFETY: This is safe since we are returning the exclusive reference - // of the inner value through the `get_mut` API which itself - // requires exclusive reference access to the wrapping `self` - // disallowing aliasing exclusive references. - unsafe { &mut *self.inner.get() } - } - - /// Returns a mutable pointer to the inner value. - #[inline] - pub fn get_ptr(&self) -> NonNull { - // SAFETY: The inner `T` of the internal `UnsafeCell` exists and thus - // the pointer that we get returned to it via `UnsafeCell::get` - // is never going to be `null`. - unsafe { NonNull::new_unchecked(self.inner.get()) } - } -} diff --git a/crates/storage/src/lazy/entry.rs b/crates/storage/src/lazy/entry.rs deleted file mode 100644 index d7ad53b91c..0000000000 --- a/crates/storage/src/lazy/entry.rs +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::traits::{ - clear_packed_root, - clear_spread_root_opt, - pull_packed_root_opt, - pull_spread_root_opt, - push_packed_root_opt, - push_spread_root_opt, - ExtKeyPtr, - KeyPtr, - PackedAllocate, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; -use core::{ - cell::Cell, - fmt, - fmt::Debug, -}; -use ink_prelude::vec::Vec; -use ink_primitives::Key; - -/// The entry of a single cached value of a lazy storage data structure. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct StorageEntry { - /// The value or `None` if the value has been removed. - value: Option, - /// This is [`EntryState::Mutated`] if the value has been mutated and is in - /// need to be synchronized with the contract storage. If it is - /// [`EntryState::Preserved`] the value from the contract storage has been - /// preserved and does not need to be synchronized. - state: Cell, -} - -impl Debug for StorageEntry -where - T: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Entry") - .field("value", &self.value) - .field("state", &self.state.get()) - .finish() - } -} - -#[test] -fn debug_impl_works() { - let e1 = >::new(None, EntryState::Preserved); - assert_eq!( - format!("{:?}", &e1), - "Entry { value: None, state: Preserved }", - ); - let e2 = StorageEntry::new(Some(42), EntryState::Mutated); - assert_eq!( - format!("{:?}", &e2), - "Entry { value: Some(42), state: Mutated }", - ); -} - -/// The state of the entry. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub enum EntryState { - /// The entry's value must be synchronized with the contract storage. - Mutated, - /// The entry's value preserved the value from the contract storage. - Preserved, -} - -impl EntryState { - /// Returns `true` if the entry state is mutated. - pub fn is_mutated(self) -> bool { - match self { - EntryState::Mutated => true, - EntryState::Preserved => false, - } - } - - /// Returns `true` if the entry state is preserved. - pub fn is_preserved(self) -> bool { - !self.is_mutated() - } -} - -impl SpreadLayout for StorageEntry -where - T: SpreadLayout, -{ - const FOOTPRINT: u64 = ::FOOTPRINT; - - #[inline] - fn pull_spread(ptr: &mut KeyPtr) -> Self { - let root_key = ExtKeyPtr::next_for::(ptr); - Self::pull_spread_root(root_key) - } - - #[inline] - fn push_spread(&self, ptr: &mut KeyPtr) { - let root_key = ExtKeyPtr::next_for::(ptr); - self.push_spread_root(root_key) - } - - #[inline] - fn clear_spread(&self, ptr: &mut KeyPtr) { - let root_key = ExtKeyPtr::next_for::(ptr); - self.clear_spread_root(root_key) - } -} - -impl SpreadAllocate for StorageEntry -where - T: SpreadLayout, -{ - #[inline] - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - let root_key = ExtKeyPtr::next_for::(ptr); - Self::pull_spread_root(root_key) - } -} - -impl scale::Encode for StorageEntry -where - T: scale::Encode, -{ - #[inline] - fn size_hint(&self) -> usize { - as scale::Encode>::size_hint(&self.value) - } - - #[inline] - fn encode_to(&self, dest: &mut O) { - as scale::Encode>::encode_to(&self.value, dest) - } - - #[inline] - fn encode(&self) -> Vec { - as scale::Encode>::encode(&self.value) - } - - #[inline] - fn using_encoded R>(&self, f: F) -> R { - as scale::Encode>::using_encoded(&self.value, f) - } -} - -impl scale::Decode for StorageEntry -where - T: scale::Decode, -{ - fn decode(input: &mut I) -> Result { - Ok(Self::new( - as scale::Decode>::decode(input)?, - EntryState::Preserved, - )) - } -} - -impl PackedLayout for StorageEntry -where - T: PackedLayout, -{ - #[inline] - fn pull_packed(&mut self, at: &Key) { - PackedLayout::pull_packed(&mut self.value, at) - } - - #[inline] - fn push_packed(&self, at: &Key) { - PackedLayout::push_packed(&self.value, at) - } - - #[inline] - fn clear_packed(&self, at: &Key) { - PackedLayout::clear_packed(&self.value, at) - } -} - -impl PackedAllocate for StorageEntry -where - T: PackedAllocate, -{ - #[inline] - fn allocate_packed(&mut self, at: &Key) { - PackedAllocate::allocate_packed(&mut self.value, at) - } -} - -impl StorageEntry -where - T: SpreadLayout, -{ - /// Pulls the entity from the underlying associated storage as a `SpreadLayout` - /// storage layout representation. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`]. - pub fn pull_spread_root(root_key: &Key) -> Self { - Self::new(pull_spread_root_opt::(root_key), EntryState::Preserved) - } - - /// Pushes the underlying associated data to the contract storage using - /// the `SpreadLayout` storage layout. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`]. - pub fn push_spread_root(&self, root_key: &Key) { - let old_state = self.replace_state(EntryState::Preserved); - if old_state.is_mutated() { - push_spread_root_opt::(self.value().into(), root_key); - } - } - - /// Clears the underlying associated storage as `SpreadLayout` storage layout representation. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyCell`][`crate::lazy::LazyCell`]. - pub fn clear_spread_root(&self, root_key: &Key) { - clear_spread_root_opt::(root_key, || self.value().into()); - } -} - -impl StorageEntry -where - T: PackedLayout, -{ - /// Pulls the entity from the underlying associated storage as packed representation. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`] or - /// [`LazyArray`][`crate::lazy::LazyArray`]. - pub fn pull_packed_root(root_key: &Key) -> Self { - Self::new(pull_packed_root_opt::(root_key), EntryState::Preserved) - } - - /// Pushes the underlying associated storage as packed representation. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`] - /// or [`LazyArray`][`crate::lazy::LazyArray`]. - pub fn push_packed_root(&self, root_key: &Key) { - let old_state = self.replace_state(EntryState::Preserved); - if old_state.is_mutated() { - push_packed_root_opt::(self.value().into(), root_key); - } - } - - /// Clears the underlying associated storage as packed representation. - /// - /// # Note - /// - /// Mainly used by lazy storage abstractions that only allow operating on - /// packed storage entities such as [`LazyIndexMap`][`crate::lazy::LazyIndexMap`] - /// or [`LazyArray`][`crate::lazy::LazyArray`]. - pub fn clear_packed_root(&self, root_key: &Key) { - clear_packed_root::>(self.value(), root_key); - } -} - -impl StorageEntry { - /// Creates a new entry with the value and state. - pub fn new(value: Option, state: EntryState) -> Self { - Self { - value, - state: Cell::new(state), - } - } - - /// Replaces the current entry state with the new state and returns it. - pub fn replace_state(&self, new_state: EntryState) -> EntryState { - // The implementation of `Cell::set` uses `Cell::replace` so instead - // of offering both APIs we simply opted to offer just the more general - // replace API for `Entry`. - self.state.replace(new_state) - } - - /// Returns a shared reference to the value of the entry. - pub fn value(&self) -> &Option { - &self.value - } - - /// Returns an exclusive reference to the entry value. - /// - /// # Note - /// - /// This changes the `mutate` state of the entry if the entry was occupied - /// since the caller could potentially change the returned value. - pub fn value_mut(&mut self) -> &mut Option { - if self.value.is_some() { - self.state.set(EntryState::Mutated); - } - &mut self.value - } - - /// Converts the entry into its value. - pub fn into_value(self) -> Option { - self.value - } - - /// Puts the new value into the entry and returns the old value. - /// - /// # Note - /// - /// This changes the `mutate` state of the entry to `true` as long as at - /// least one of `old_value` and `new_value` is `Some`. - pub fn put(&mut self, new_value: Option) -> Option { - let new_value_is_some = new_value.is_some(); - let old_value = core::mem::replace(&mut self.value, new_value); - if old_value.is_some() || new_value_is_some { - self.state.set(EntryState::Mutated); - } - old_value - } -} diff --git a/crates/storage/src/lazy/lazy_array.rs b/crates/storage/src/lazy/lazy_array.rs deleted file mode 100644 index 6e1974969a..0000000000 --- a/crates/storage/src/lazy/lazy_array.rs +++ /dev/null @@ -1,881 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - CacheCell, - EntryState, - StorageEntry, -}; -use crate::traits::{ - clear_packed_root, - pull_packed_root_opt, - ExtKeyPtr, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; -use core::{ - fmt, - fmt::Debug, - mem, - ptr::NonNull, -}; -use ink_primitives::Key; - -/// The index type used in the lazy storage chunk. -pub type Index = u32; - -/// A lazy storage array that spans over N storage cells. -/// -/// Storage data structure to emulate storage arrays: `[T; N]`. -/// -/// # Note -/// -/// Computes operations on the underlying N storage cells in a lazy fashion. -/// Due to the size constraints the `LazyArray` is generally more efficient -/// than the [`LazyMap`](`super::LazyIndexMap`) for most use cases with limited elements. -/// -/// This is mainly used as low-level storage primitives by other high-level -/// storage primitives in order to manage the contract storage for a whole -/// chunk of storage cells. -pub struct LazyArray { - /// The offset key for the N cells. - /// - /// If the lazy chunk has been initialized during contract initialization - /// the key will be `None` since there won't be a storage region associated - /// to the lazy chunk which prevents it from lazily loading elements. This, - /// however, is only checked at contract runtime. We might incorporate - /// compile-time checks for this particular use case later on. - key: Option, - /// The subset of currently cached entries of the lazy storage chunk. - /// - /// An entry is cached as soon as it is loaded or written. - cached_entries: EntryArray, -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::{ - ArrayLayout, - CellLayout, - Layout, - LayoutKey, - }; - use scale_info::TypeInfo; - - impl StorageLayout for LazyArray - where - T: TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - let capacity = N as u32; - Layout::Array(ArrayLayout::new( - LayoutKey::from(key_ptr.advance_by(capacity as u64)), - capacity, - 1, - Layout::Cell(CellLayout::new::(LayoutKey::from( - key_ptr.advance_by(0), - ))), - )) - } - } -}; - -struct DebugEntryArray<'a, T, const N: usize>(&'a EntryArray) -where - T: Debug; - -impl<'a, T, const N: usize> Debug for DebugEntryArray<'a, T, N> -where - T: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_map() - .entries( - self.0 - .iter() - .enumerate() - .filter_map(|(key, entry)| entry.as_ref().map(|entry| (key, entry))), - ) - .finish() - } -} - -impl Debug for LazyArray -where - T: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LazyArray") - .field("key", &self.key) - .field("cached_entries", &DebugEntryArray(&self.cached_entries)) - .finish() - } -} - -#[test] -fn debug_impl_works() { - let mut larray = >::new(); - // Empty imap. - assert_eq!( - format!("{:?}", &larray), - "LazyArray { key: None, cached_entries: {} }", - ); - // Filled imap. - larray.put(0, Some(1)); - larray.put(2, Some(2)); - larray.put(3, None); - assert_eq!( - format!("{:?}", &larray), - "LazyArray { \ - key: None, \ - cached_entries: {\ - 0: Entry { \ - value: Some(1), \ - state: Mutated \ - }, \ - 2: Entry { \ - value: Some(2), \ - state: Mutated \ - }, \ - 3: Entry { \ - value: None, \ - state: Mutated \ - }\ - } \ - }", - ); -} - -/// Returns the capacity for an array with the given array length. -fn array_capacity() -> u32 { - N as u32 -} - -/// The underlying array cache for the [`LazyArray`]. -#[derive(Debug)] -pub struct EntryArray { - /// The cache entries of the entry array. - entries: [CacheCell>>; N], -} - -#[derive(Debug)] -pub struct EntriesIter<'a, T> { - iter: core::slice::Iter<'a, CacheCell>>>, -} - -impl<'a, T> EntriesIter<'a, T> { - pub fn new(entry_array: &'a EntryArray) -> Self { - Self { - iter: entry_array.entries.iter(), - } - } -} - -impl<'a, T> Iterator for EntriesIter<'a, T> { - type Item = &'a Option>; - - fn next(&mut self) -> Option { - self.iter.next().map(|cell| cell.as_inner()) - } - - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn count(self) -> usize - where - Self: Sized, - { - self.iter.count() - } -} - -impl<'a, T> DoubleEndedIterator for EntriesIter<'a, T> { - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|cell| cell.as_inner()) - } -} - -impl<'a, T> ExactSizeIterator for EntriesIter<'a, T> {} - -impl EntryArray { - /// Creates a new entry array cache. - pub fn new() -> Self { - Self { - entries: array_init::array_init(|_| Default::default()), - } - } -} - -impl Default for EntryArray { - fn default() -> Self { - Self::new() - } -} - -impl EntryArray { - /// Returns the constant capacity of the lazy array. - #[inline] - pub fn capacity() -> u32 { - array_capacity::() - } - - /// Puts the new value into the indexed slot and - /// returns the old value if any. - fn put(&self, at: Index, new_value: Option) -> Option { - mem::replace( - unsafe { self.entries[at as usize].get_ptr().as_mut() }, - Some(StorageEntry::new(new_value, EntryState::Mutated)), - ) - .and_then(StorageEntry::into_value) - } - - /// Inserts a new entry into the cache and returns an exclusive reference to it. - unsafe fn insert_entry( - &self, - at: Index, - new_entry: StorageEntry, - ) -> NonNull> { - let entry: &mut Option> = - &mut *CacheCell::get_ptr(&self.entries[at as usize]).as_ptr(); - *entry = Some(new_entry); - entry - .as_mut() - .map(NonNull::from) - .expect("just inserted the entry") - } - - /// Returns an exclusive reference to the entry at the given index if any. - unsafe fn get_entry_mut(&self, at: Index) -> Option<&mut StorageEntry> { - if at >= Self::capacity() { - return None - } - (*CacheCell::get_ptr(&self.entries[at as usize]).as_ptr()).as_mut() - } - - /// Returns an iterator that yields shared references to all cached entries. - pub fn iter(&self) -> EntriesIter { - EntriesIter::new(self) - } -} - -impl LazyArray -where - T: PackedLayout, -{ - /// Clears the underlying storage of the entry at the given index. - /// - /// # Safety - /// - /// For performance reasons this does not synchronize the lazy array's - /// memory-side cache which invalidates future accesses the cleared entry. - /// Care should be taken when using this API. - /// - /// The general use of this API is to streamline `Drop` implementations of - /// high-level abstractions that build upon this low-level data structure. - pub fn clear_packed_at(&self, index: Index) { - let root_key = self.key_at(index).expect("cannot clear in lazy state"); - if ::REQUIRES_DEEP_CLEAN_UP { - // We need to load the entity before we remove its associated contract storage - // because it requires a deep clean-up which propagates clearing to its fields, - // for example in the case of `T` being a `storage::Box`. - let entity = self.get(index).expect("cannot clear a non existing entity"); - clear_packed_root::(entity, &root_key); - } else { - // The type does not require deep clean-up so we can simply clean-up - // its associated storage cell and be done without having to load it first. - ink_env::clear_contract_storage(&root_key); - } - } -} - -impl Default for LazyArray { - fn default() -> Self { - Self::new() - } -} - -impl LazyArray { - /// Creates a new empty lazy array. - /// - /// # Note - /// - /// A lazy array created this way cannot be used to load from the contract storage. - /// All operations that directly or indirectly load from storage will panic. - pub fn new() -> Self { - Self { - key: None, - cached_entries: Default::default(), - } - } - - /// Creates a new empty lazy array positioned at the given key. - /// - /// # Note - /// - /// This constructor is private and should never need to be called from - /// outside this module. It is used to construct a lazy array from a - /// key that is only useful upon a contract call. - /// Use [`LazyArray::new`] for construction during contract initialization. - fn lazy(key: Key) -> Self { - Self { - key: Some(key), - cached_entries: Default::default(), - } - } - - /// Returns the constant capacity of the lazy array. - #[inline] - pub fn capacity(&self) -> u32 { - array_capacity::() - } - - /// Returns the offset key of the lazy array if any. - pub fn key(&self) -> Option<&Key> { - self.key.as_ref() - } - - /// Returns a shared reference to the underlying cached entries. - /// - /// # Safety - /// - /// This operation is safe since it returns a shared reference from - /// a `&self` which is viable in safe Rust. - fn cached_entries(&self) -> &EntryArray { - &self.cached_entries - } - - /// Puts a new value into the given indexed slot. - /// - /// # Note - /// - /// Use [`LazyArray::put_get`]`(None)` to remove an element. - pub fn put(&mut self, at: Index, new_value: Option) { - self.cached_entries().put(at, new_value); - } -} - -impl SpreadLayout for LazyArray -where - T: PackedLayout, -{ - const FOOTPRINT: u64 = N as u64; - - #[inline] - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - let offset_key = ExtKeyPtr::next_for::(ptr); - let mut root_key = Key::default(); - for (index, entry) in self.cached_entries().iter().enumerate() { - if let Some(entry) = entry { - offset_key.add_assign_using(index as u64, &mut root_key); - entry.push_packed_root(&root_key); - } - } - } - - #[inline] - fn clear_spread(&self, _ptr: &mut KeyPtr) { - // Low-level lazy abstractions won't perform automated clean-up since - // they generally are not aware of their entire set of associated - // elements. The high-level abstractions that build upon them are - // responsible for cleaning up. - } -} - -impl SpreadAllocate for LazyArray -where - T: PackedLayout, -{ - #[inline] - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } -} - -impl LazyArray { - /// Returns the offset key for the given index if not out of bounds. - pub fn key_at(&self, at: Index) -> Option { - if at >= self.capacity() { - return None - } - self.key.map(|mut key| { - key += at as u64; - key - }) - } -} - -impl LazyArray -where - T: PackedLayout, -{ - /// Loads the entry at the given index. - /// - /// Tries to load the entry from cache and falls back to lazily load the - /// entry from the contract storage. - fn load_through_cache(&self, at: Index) -> NonNull> { - assert!(at < self.capacity(), "index is out of bounds"); - match unsafe { self.cached_entries.get_entry_mut(at) } { - Some(entry) => { - // Load value from cache. - NonNull::from(entry) - } - None => { - // Load value from storage and put into cache. - // Then load value from cache. - let value = self - .key_at(at) - .map(|key| pull_packed_root_opt::(&key)) - .unwrap_or(None); - let entry = StorageEntry::new(value, EntryState::Preserved); - unsafe { self.cached_entries.insert_entry(at, entry) } - } - } - } - - /// Loads the entry at the given index. - /// - /// Tries to load the entry from cache and falls back to lazily load the - /// entry from the contract storage. - /// - /// # Panics - /// - /// - If the lazy array is in a state that forbids lazy loading. - /// - If the given index is out of bounds. - fn load_through_cache_mut(&mut self, index: Index) -> &mut StorageEntry { - // SAFETY: - // Returning a `&mut Entry` from within a `&mut self` function - // won't allow creating aliasing between exclusive references. - unsafe { &mut *self.load_through_cache(index).as_ptr() } - } - - /// Returns a shared reference to the element at the given index if any. - /// - /// # Note - /// - /// This operation eventually loads from contract storage. - /// - /// # Panics - /// - /// If the given index is out of bounds. - pub fn get(&self, at: Index) -> Option<&T> { - unsafe { &*self.load_through_cache(at).as_ptr() } - .value() - .into() - } - - /// Returns an exclusive reference to the element at the given index if any. - /// - /// # Note - /// - /// This operation eventually loads from contract storage. - /// - /// # Panics - /// - /// If the given index is out of bounds. - pub fn get_mut(&mut self, at: Index) -> Option<&mut T> { - self.load_through_cache_mut(at).value_mut().into() - } - - /// Puts the new value into the indexed slot and returns the old value if any. - /// - /// # Note - /// - /// - This operation eventually loads from contract storage. - /// - Prefer [`LazyArray::put`] if you are not interested in the old value. - /// - Use [`LazyArray::put_get`]`(None)` to remove an element. - /// - /// # Panics - /// - /// If the given index is out of bounds. - pub fn put_get(&mut self, at: Index, new_value: Option) -> Option { - self.load_through_cache_mut(at).put(new_value) - } - - /// Swaps the values at indices x and y. - /// - /// # Note - /// - /// This operation eventually loads from contract storage. - /// - /// # Panics - /// - /// If any of the given indices is out of bounds. - pub fn swap(&mut self, a: Index, b: Index) { - assert!(a < self.capacity(), "a is out of bounds"); - assert!(b < self.capacity(), "b is out of bounds"); - if a == b { - // Bail out early if both indices are the same. - return - } - let (loaded_a, loaded_b) = - // SAFETY: The loaded `x` and `y` entries are distinct from each - // other guaranteed by the previous checks so they cannot - // alias. - unsafe { ( - &mut *self.load_through_cache(a).as_ptr(), - &mut *self.load_through_cache(b).as_ptr(), - ) }; - if loaded_a.value().is_none() && loaded_b.value().is_none() { - // Bail out since nothing has to be swapped if both values are `None`. - return - } - // At this point at least one of the values is `Some` so we have to - // perform the swap and set both entry states to mutated. - loaded_a.replace_state(EntryState::Mutated); - loaded_b.replace_state(EntryState::Mutated); - core::mem::swap(loaded_a.value_mut(), loaded_b.value_mut()); - } -} - -#[cfg(test)] -mod tests { - use super::{ - super::{ - EntryState, - StorageEntry, - }, - Index, - LazyArray, - }; - use crate::traits::{ - KeyPtr, - SpreadLayout, - }; - use ink_primitives::Key; - - /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. - fn assert_cached_entries( - larray: &LazyArray, - expected: &[(Index, StorageEntry)], - ) { - let mut len = 0; - for (given, expected) in larray - .cached_entries() - .iter() - .enumerate() - .filter_map(|(index, entry)| { - entry.as_ref().map(|entry| (index as u32, entry)) - }) - .zip(expected.iter().map(|(index, entry)| (*index, entry))) - { - assert_eq!(given, expected); - len += 1; - } - assert_eq!(len, expected.len()); - } - - #[test] - fn new_works() { - let larray = >::new(); - // Key must be none. - assert_eq!(larray.key(), None); - assert_eq!(larray.key_at(0), None); - assert_eq!(larray.capacity(), 4); - // Cached elements must be empty. - assert_cached_entries(&larray, &[]); - // Same as default: - let default_larray = >::default(); - assert_eq!(default_larray.key(), larray.key()); - assert_eq!(default_larray.key_at(0), larray.key_at(0)); - assert_eq!(larray.capacity(), 4); - assert_cached_entries(&default_larray, &[]); - } - - fn add_key(key: &Key, offset: u64) -> Key { - let mut result = *key; - result += offset; - result - } - - #[test] - fn lazy_works() { - let key = Key::from([0x42; 32]); - let larray = >::lazy(key); - // Key must be Some. - assert_eq!(larray.key(), Some(&key)); - assert_eq!(larray.key_at(0), Some(key)); - assert_eq!(larray.key_at(1), Some(add_key(&key, 1))); - assert_eq!(larray.capacity(), 4); - // Cached elements must be empty. - assert_cached_entries(&larray, &[]); - } - - #[test] - fn get_works() { - let mut larray = >::new(); - let nothing_changed = &[ - (0, StorageEntry::new(None, EntryState::Preserved)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(Some(b'D'), EntryState::Mutated)), - ]; - // Put some values. - assert_eq!(larray.put_get(0, None), None); - assert_eq!(larray.put_get(1, Some(b'B')), None); - assert_eq!(larray.put_get(2, None), None); - assert_eq!(larray.put_get(3, Some(b'D')), None); - assert_cached_entries(&larray, nothing_changed); - // `get` works: - assert_eq!(larray.get(0), None); - assert_eq!(larray.get(1), Some(&b'B')); - assert_eq!(larray.get(2), None); - assert_eq!(larray.get(3), Some(&b'D')); - assert_cached_entries(&larray, nothing_changed); - // `get_mut` works: - assert_eq!(larray.get_mut(0), None); - assert_eq!(larray.get_mut(1), Some(&mut b'B')); - assert_eq!(larray.get_mut(2), None); - assert_eq!(larray.get_mut(3), Some(&mut b'D')); - assert_cached_entries(&larray, nothing_changed); - } - - #[test] - #[should_panic(expected = "index is out of bounds")] - fn get_out_of_bounds_works() { - let larray = >::new(); - let _ = larray.get(4); - } - - #[test] - fn put_get_works() { - let mut larray = >::new(); - // Assert that the array cache is empty at first. - assert_cached_entries(&larray, &[]); - // Put none values. - assert_eq!(larray.put_get(0, None), None); - assert_eq!(larray.put_get(1, None), None); - assert_eq!(larray.put_get(3, None), None); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(None, EntryState::Preserved)), - (1, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override with some values. - assert_eq!(larray.put_get(0, Some(b'A')), None); - assert_eq!(larray.put_get(1, Some(b'B')), None); - assert_eq!(larray.put_get(3, None), None); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override some values with none. - assert_eq!(larray.put_get(1, None), Some(b'B')); - assert_eq!(larray.put_get(3, None), None); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (1, StorageEntry::new(None, EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - } - - #[test] - #[should_panic(expected = "index is out of bounds")] - fn put_get_out_of_bounds_works() { - let mut larray = >::new(); - let _ = larray.put_get(4, Some(b'A')); - } - - #[test] - fn put_works() { - let mut larray = >::new(); - // Put some values. - larray.put(0, None); - larray.put(1, Some(b'B')); - larray.put(3, None); - // The main difference between `put` and `put_get` is that `put` never - // loads from storage which also has one drawback: Putting a `None` - // value always ends-up in `Mutated` state for the entry even if the - // entry is already `None`. - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(None, EntryState::Mutated)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - // Overwrite entries: - larray.put(0, Some(b'A')); - larray.put(1, None); - larray.put(2, Some(b'C')); - larray.put(3, None); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'C'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - } - - #[test] - #[should_panic(expected = "index out of bounds: the len is 4 but the index is 4")] - fn put_out_of_bounds_works() { - let mut larray = >::new(); - larray.put(4, Some(b'A')); - } - - #[test] - fn swap_works() { - let mut larray = >::new(); - let nothing_changed = &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(larray.put_get(0, Some(b'A')), None); - assert_eq!(larray.put_get(1, Some(b'B')), None); - assert_eq!(larray.put_get(2, None), None); - assert_eq!(larray.put_get(3, None), None); - assert_cached_entries(&larray, nothing_changed); - // Swap same indices: Check that nothing has changed. - for i in 0..4 { - larray.swap(i, i); - } - assert_cached_entries(&larray, nothing_changed); - // Swap `None` values: Check that nothing has changed. - larray.swap(2, 3); - larray.swap(3, 2); - assert_cached_entries(&larray, nothing_changed); - // Swap `Some` and `None`: - larray.swap(0, 2); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(None, EntryState::Mutated)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap `Some` and `Some`: - larray.swap(1, 2); - assert_cached_entries( - &larray, - &[ - (0, StorageEntry::new(None, EntryState::Mutated)), - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - } - - #[test] - #[should_panic(expected = "b is out of bounds")] - fn swap_rhs_out_of_bounds() { - let mut larray = >::new(); - larray.swap(0, 4); - } - - #[test] - #[should_panic(expected = "a is out of bounds")] - fn swap_both_out_of_bounds() { - let mut larray = >::new(); - larray.swap(4, 4); - } - - #[test] - fn spread_layout_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let mut larray = >::new(); - let nothing_changed = &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (1, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(larray.put_get(0, Some(b'A')), None); - assert_eq!(larray.put_get(1, Some(b'B')), None); - assert_eq!(larray.put_get(2, None), None); - assert_eq!(larray.put_get(3, None), None); - assert_cached_entries(&larray, nothing_changed); - // Push the lazy index map onto the contract storage and then load - // another instance of it from the contract stoarge. - // Then: Compare both instances to be equal. - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&larray, &mut KeyPtr::from(root_key)); - let larray2 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&larray2, &[]); - assert_eq!(larray2.get(0), Some(&b'A')); - assert_eq!(larray2.get(1), Some(&b'B')); - assert_eq!(larray2.get(2), None); - assert_eq!(larray2.get(3), None); - assert_cached_entries( - &larray2, - &[ - (0, StorageEntry::new(Some(b'A'), EntryState::Preserved)), - (1, StorageEntry::new(Some(b'B'), EntryState::Preserved)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Clear the first lazy index map instance and reload another instance - // to check whether the associated storage has actually been freed - // again: - SpreadLayout::clear_spread(&larray2, &mut KeyPtr::from(root_key)); - // The above `clear_spread` call is a no-op since lazy index map is - // generally not aware of its associated elements. So we have to - // manually clear them from the contract storage which is what the - // high-level data structures like `storage::Vec` would command: - larray2.clear_packed_at(0); - larray2.clear_packed_at(1); - larray2.clear_packed_at(2); // Not really needed here. - larray2.clear_packed_at(3); // Not really needed here. - let larray3 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&larray3, &[]); - assert_eq!(larray3.get(0), None); - assert_eq!(larray3.get(1), None); - assert_eq!(larray3.get(2), None); - assert_eq!(larray3.get(3), None); - assert_cached_entries( - &larray3, - &[ - (0, StorageEntry::new(None, EntryState::Preserved)), - (1, StorageEntry::new(None, EntryState::Preserved)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - Ok(()) - }) - } -} diff --git a/crates/storage/src/lazy/lazy_cell.rs b/crates/storage/src/lazy/lazy_cell.rs deleted file mode 100644 index a4eb48cd12..0000000000 --- a/crates/storage/src/lazy/lazy_cell.rs +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - CacheCell, - EntryState, - StorageEntry, -}; -use crate::traits::{ - clear_spread_root_opt, - pull_spread_root_opt, - ExtKeyPtr, - KeyPtr, - SpreadAllocate, - SpreadLayout, -}; -use core::{ - fmt, - fmt::Debug, - ptr::NonNull, -}; -use ink_primitives::Key; - -/// A lazy storage entity. -/// -/// This loads its value from storage upon first use. -/// -/// # Note -/// -/// Use this if the storage field does not need to be loaded in some or most cases. -pub struct LazyCell -where - T: SpreadLayout, -{ - /// The key to lazily load the value from. - /// - /// # Note - /// - /// This can be `None` on contract initialization where a `LazyCell` is - /// normally initialized given a concrete value. - key: Option, - /// The low-level cache for the lazily loaded storage value. - /// - /// # Developer Note: Safety - /// - /// We use `UnsafeCell` instead of `RefCell` because - /// the intended use-case is to hand out references (`&` and `&mut`) - /// to the callers of `Lazy`. This cannot be done without `unsafe` - /// code even with `RefCell`. Also `RefCell` has a larger memory footprint - /// and has additional overhead that we can avoid by the interface - /// and the fact that ink! code is always run single-threaded. - /// Being efficient is important here because this is intended to be - /// a low-level primitive with lots of dependencies. - cache: CacheCell>>, -} - -impl Debug for LazyCell -where - T: Debug + SpreadLayout, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LazyCell") - .field("key", &self.key) - .field("cache", self.cache.as_inner()) - .finish() - } -} - -#[test] -fn debug_impl_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let c1 = >::new(None); - assert_eq!( - format!("{:?}", &c1), - "LazyCell { key: None, cache: Some(Entry { value: None, state: Mutated }) }", - ); - let c2 = >::new(Some(42)); - assert_eq!( - format!("{:?}", &c2), - "LazyCell { key: None, cache: Some(Entry { value: Some(42), state: Mutated }) }", - ); - let c3 = >::lazy(Key::from([0x00; 32])); - assert_eq!( - format!("{:?}", &c3), - "LazyCell { \ - key: Some(Key(0x_\ - 00000000_00000000_\ - 00000000_00000000_\ - 00000000_00000000_\ - 00000000_00000000)\ - ), \ - cache: None \ - }", - ); - Ok(()) - }) -} - -impl Drop for LazyCell -where - T: SpreadLayout, -{ - fn drop(&mut self) { - if let Some(root_key) = self.key() { - match self.entry() { - Some(entry) => { - // The inner cell needs to be cleared, no matter if it has - // been loaded or not. Otherwise there might be leftovers. - // Load from storage and then clear: - clear_spread_root_opt::(root_key, || entry.value().into()) - } - None => { - // The value is not yet in the cache. we need it in there - // though in order to properly clean up. - if ::REQUIRES_DEEP_CLEAN_UP { - // The inner cell needs to be cleared, no matter if it has - // been loaded or not. Otherwise there might be leftovers. - // Load from storage and then clear: - clear_spread_root_opt::(root_key, || self.get()) - } else { - // Clear without loading from storage: - let footprint = ::FOOTPRINT; - assert_footprint_threshold(footprint); - let mut key_ptr = KeyPtr::from(*root_key); - for _ in 0..footprint { - ink_env::clear_contract_storage(key_ptr.advance_by(1)); - } - } - } - } - } - } -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::Layout; - - impl StorageLayout for LazyCell - where - T: StorageLayout + SpreadLayout, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - ::layout(key_ptr) - } - } -}; - -impl SpreadLayout for LazyCell -where - T: SpreadLayout, -{ - const FOOTPRINT: u64 = ::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - let root_key = ExtKeyPtr::next_for::(ptr); - if let Some(entry) = self.entry() { - entry.push_spread_root(root_key) - } - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - let root_key = ExtKeyPtr::next_for::(ptr); - match ::REQUIRES_DEEP_CLEAN_UP { - true => { - // The inner cell needs to be cleared, no matter if it has - // been loaded or not. Otherwise there might be leftovers. - // Load from storage and then clear: - clear_spread_root_opt::(root_key, || self.get()) - } - false => { - // Clear without loading from storage: - let footprint = ::FOOTPRINT; - assert_footprint_threshold(footprint); - let mut key_ptr = KeyPtr::from(*root_key); - for _ in 0..footprint { - ink_env::clear_contract_storage(key_ptr.advance_by(1)); - } - } - } - } -} - -impl SpreadAllocate for LazyCell -where - T: SpreadLayout, -{ - #[inline] - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } -} - -// # Developer Note -// -// Implementing PackedLayout for LazyCell is not useful since that would -// potentially allow overlapping distinct LazyCell instances by pulling -// from the same underlying storage cell. -// -// If a user wants a packed LazyCell they can instead pack its inner type. - -impl From for LazyCell -where - T: SpreadLayout, -{ - fn from(value: T) -> Self { - Self::new(Some(value)) - } -} - -impl Default for LazyCell -where - T: Default + SpreadLayout, -{ - fn default() -> Self { - Self::new(Some(Default::default())) - } -} - -impl LazyCell -where - T: SpreadLayout, -{ - /// Creates an already populated lazy storage cell. - /// - /// # Note - /// - /// Since this already has a value it will never actually load from - /// the contract storage. - #[must_use] - pub fn new(value: Option) -> Self { - Self { - key: None, - cache: CacheCell::new(Some(StorageEntry::new(value, EntryState::Mutated))), - } - } - - /// Creates a lazy storage cell for the given key. - /// - /// # Note - /// - /// This will actually lazily load from the associated storage cell - /// upon access. - #[must_use] - pub fn lazy(key: Key) -> Self { - Self { - key: Some(key), - cache: CacheCell::new(None), - } - } - - /// Returns the lazy key if any. - /// - /// # Note - /// - /// The key is `None` if the `LazyCell` has been initialized as a value. - /// This generally only happens in ink! constructors. - fn key(&self) -> Option<&Key> { - self.key.as_ref() - } - - /// Returns the cached entry. - fn entry(&self) -> Option<&StorageEntry> { - self.cache.as_inner().as_ref() - } -} - -impl LazyCell -where - T: SpreadLayout, -{ - /// Loads the storage entry. - /// - /// Tries to load the entry from cache and falls back to lazily load the - /// entry from the contract storage. - unsafe fn load_through_cache(&self) -> NonNull> { - // SAFETY: This is critical because we mutably access the entry. - // However, we mutate the entry only if it is vacant. - // If the entry is occupied by a value we return early. - // This way we do not invalidate pointers to this value. - let cache = &mut *self.cache.get_ptr().as_ptr(); - if cache.is_none() { - // Load value from storage and then return the cached entry. - let value = self - .key - .map(|key| pull_spread_root_opt::(&key)) - .unwrap_or(None); - *cache = Some(StorageEntry::new(value, EntryState::Preserved)); - } - debug_assert!(cache.is_some()); - NonNull::from(cache.as_mut().expect("unpopulated cache entry")) - } - - /// Returns a shared reference to the entry. - fn load_entry(&self) -> &StorageEntry { - // SAFETY: We load the entry either from cache of from contract storage. - // - // This is safe because we are just returning a shared reference - // from within a `&self` method. This also cannot change the - // loaded value and thus cannot change the `mutate` flag of the - // entry. Aliases using this method are safe since ink! is - // single-threaded. - unsafe { &*self.load_through_cache().as_ptr() } - } - - /// Returns an exclusive reference to the entry. - fn load_entry_mut(&mut self) -> &mut StorageEntry { - // SAFETY: We load the entry either from cache of from contract storage. - // - // This is safe because we are just returning an exclusive reference - // from within a `&mut self` method. This may change the - // loaded value and thus the `mutate` flag of the entry is set. - // Aliases cannot happen through this method since ink! is - // single-threaded. - let entry = unsafe { &mut *self.load_through_cache().as_ptr() }; - entry.replace_state(EntryState::Mutated); - entry - } - - /// Returns a shared reference to the value. - /// - /// # Note - /// - /// This eventually lazily loads the value from the contract storage. - /// - /// # Panics - /// - /// If decoding the loaded value to `T` failed. - #[must_use] - pub fn get(&self) -> Option<&T> { - self.load_entry().value().into() - } - - /// Returns an exclusive reference to the value. - /// - /// # Note - /// - /// This eventually lazily loads the value from the contract storage. - /// - /// # Panics - /// - /// If decoding the loaded value to `T` failed. - #[must_use] - pub fn get_mut(&mut self) -> Option<&mut T> { - self.load_entry_mut().value_mut().into() - } - - /// Sets the value in this cell to `value`, without executing any reads. - /// - /// # Note - /// - /// No reads from contract storage will be executed. - /// - /// This method should be preferred over dereferencing or `get_mut` - /// in case the returned value is of no interest to the caller. - /// - /// # Panics - /// - /// If accessing the inner value fails. - #[inline] - pub fn set(&mut self, new_value: T) { - // SAFETY: This is critical because we mutably access the entry. - let cache = unsafe { &mut *self.cache.get_ptr().as_ptr() }; - if let Some(cache) = cache.as_mut() { - // Cache is already populated we simply overwrite its already existing value. - cache.put(Some(new_value)); - } else { - // Cache is empty, so we simply set the cache to the value. - // The key does not need to exist for this to work, we only need to - // write the value into the cache and are done. Writing to contract - // storage happens during setup/teardown of a contract. - *cache = Some(StorageEntry::new(Some(new_value), EntryState::Mutated)); - } - debug_assert!(cache.is_some()); - } -} - -/// Asserts that the given `footprint` is below `FOOTPRINT_CLEANUP_THRESHOLD`. -fn assert_footprint_threshold(footprint: u64) { - let footprint_threshold = crate::traits::FOOTPRINT_CLEANUP_THRESHOLD; - assert!( - footprint <= footprint_threshold, - "cannot clean-up a storage entity with a footprint of {}. maximum threshold for clean-up is {}.", - footprint, - footprint_threshold, - ); -} - -#[cfg(test)] -mod tests { - use super::{ - EntryState, - LazyCell, - StorageEntry, - }; - use crate::{ - traits::{ - KeyPtr, - SpreadLayout, - }, - Lazy, - }; - use ink_env::test::run_test; - use ink_primitives::Key; - - #[test] - fn new_works() { - // Initialized via some value: - let mut a = >::new(Some(b'A')); - assert_eq!(a.key(), None); - assert_eq!( - a.entry(), - Some(&StorageEntry::new(Some(b'A'), EntryState::Mutated)) - ); - assert_eq!(a.get(), Some(&b'A')); - assert_eq!(a.get_mut(), Some(&mut b'A')); - // Initialized as none: - let mut b = >::new(None); - assert_eq!(b.key(), None); - assert_eq!( - b.entry(), - Some(&StorageEntry::new(None, EntryState::Mutated)) - ); - assert_eq!(b.get(), None); - assert_eq!(b.get_mut(), None); - // Same as default or from: - let default_lc = >::default(); - let from_lc = LazyCell::from(u8::default()); - let new_lc = LazyCell::new(Some(u8::default())); - assert_eq!(default_lc.get(), from_lc.get()); - assert_eq!(from_lc.get(), new_lc.get()); - assert_eq!(new_lc.get(), Some(&u8::default())); - } - - #[test] - fn lazy_works() -> ink_env::Result<()> { - run_test::(|_| { - let root_key = Key::from([0x42; 32]); - let cell = >::lazy(root_key); - assert_eq!(cell.key(), Some(&root_key)); - Ok(()) - }) - } - - #[test] - fn lazy_get_works() -> ink_env::Result<()> { - run_test::(|_| { - let cell = >::lazy(Key::from([0x42; 32])); - let value = cell.get(); - // We do the normally unreachable check in order to have an easier - // time finding the issue if the above execution did not panic. - assert_eq!(value, None); - Ok(()) - }) - } - - #[test] - fn get_mut_works() { - let mut cell = >::new(Some(1)); - assert_eq!(cell.get(), Some(&1)); - *cell.get_mut().unwrap() += 1; - assert_eq!(cell.get(), Some(&2)); - } - - #[test] - fn spread_layout_works() -> ink_env::Result<()> { - run_test::(|_| { - let cell_a0 = >::new(Some(b'A')); - assert_eq!(cell_a0.get(), Some(&b'A')); - // Push `cell_a0` to the contract storage. - // Then, pull `cell_a1` from the contract storage and check if it is - // equal to `cell_a0`. - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&cell_a0, &mut KeyPtr::from(root_key)); - let cell_a1 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(cell_a1.get(), cell_a0.get()); - assert_eq!(cell_a1.get(), Some(&b'A')); - assert_eq!( - cell_a1.entry(), - Some(&StorageEntry::new(Some(b'A'), EntryState::Preserved)) - ); - // Also test if a lazily instantiated cell works: - let cell_a2 = >::lazy(root_key); - assert_eq!(cell_a2.get(), cell_a0.get()); - assert_eq!(cell_a2.get(), Some(&b'A')); - assert_eq!( - cell_a2.entry(), - Some(&StorageEntry::new(Some(b'A'), EntryState::Preserved)) - ); - // Test if clearing works: - SpreadLayout::clear_spread(&cell_a1, &mut KeyPtr::from(root_key)); - let cell_a3 = >::lazy(root_key); - assert_eq!(cell_a3.get(), None); - assert_eq!( - cell_a3.entry(), - Some(&StorageEntry::new(None, EntryState::Preserved)) - ); - Ok(()) - }) - } - - #[test] - fn set_works() { - let mut cell = >::new(Some(1)); - cell.set(23); - assert_eq!(cell.get(), Some(&23)); - } - - #[test] - fn lazy_set_works() -> ink_env::Result<()> { - run_test::(|_| { - let mut cell = >::lazy(Key::from([0x42; 32])); - let value = cell.get(); - assert_eq!(value, None); - - cell.set(13); - assert_eq!(cell.get(), Some(&13)); - Ok(()) - }) - } - - #[test] - fn lazy_set_works_with_spread_layout_push_pull() -> ink_env::Result<()> { - run_test::(|_| { - type MaybeValue = Option; - - // Initialize a LazyCell with None and push it to `k` - let k = Key::from([0x00; 32]); - let val: MaybeValue = None; - SpreadLayout::push_spread(&Lazy::new(val), &mut KeyPtr::from(k)); - - // Pull another instance `v` from `k`, check that it is `None` - let mut v = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(k)); - assert_eq!(*v, None); - - // Set `v` using `set` to an actual value - let actual_value: MaybeValue = Some(13); - Lazy::set(&mut v, actual_value); - - // Push `v` to `k` - SpreadLayout::push_spread(&v, &mut KeyPtr::from(k)); - - // Load `v2` from `k` - let v2 = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(k)); - - // Check that V2 is the set value - assert_eq!(*v2, Some(13)); - - Ok(()) - }) - } - - #[test] - fn regression_test_for_issue_528() -> ink_env::Result<()> { - run_test::(|_| { - let root_key = Key::from([0x00; 32]); - { - // Step 1: Push a valid pair onto the contract storage. - let pair = (LazyCell::new(Some(1i32)), 2i32); - SpreadLayout::push_spread(&pair, &mut KeyPtr::from(root_key)); - } - { - // Step 2: Pull the pair from the step before. - // - // 1. Change the second `i32` value of the pair. - // 2. Push the pair again to contract storage. - // - // We prevent the intermediate instance from clearing the storage preemtively by wrapping - // it inside `ManuallyDrop`. The third step will clean up the same storage region afterwards. - // - // We explicitly do not touch or assert the value of `pulled_pair.0` in order to trigger - // the bug. - let pulled_pair: (LazyCell, i32) = - SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); - let mut pulled_pair = core::mem::ManuallyDrop::new(pulled_pair); - assert_eq!(pulled_pair.1, 2i32); - pulled_pair.1 = 3i32; - SpreadLayout::push_spread(&*pulled_pair, &mut KeyPtr::from(root_key)); - } - { - // Step 3: Pull the pair again from the storage. - // - // If the bug with `Lazy` that has been fixed in PR #528 has been fixed we should be - // able to inspect the correct values for both pair entries which is: `(Some(1), 3)` - let pulled_pair: (LazyCell, i32) = - SpreadLayout::pull_spread(&mut KeyPtr::from(root_key)); - assert_eq!(pulled_pair.0.get(), Some(&1i32)); - assert_eq!(pulled_pair.1, 3i32); - } - Ok(()) - }) - } - - #[test] - fn regression_test_for_issue_570() -> ink_env::Result<()> { - run_test::(|_| { - let root_key = Key::from([0x00; 32]); - { - // Step 1: Push two valid values one after the other to contract storage. - // The first value needs to be an `Option::None` value, since the bug was - // then messing up following pointers. - let v1: Option = None; - let v2: u32 = 13; - let mut ptr = KeyPtr::from(root_key); - - SpreadLayout::push_spread(&v1, &mut ptr); - SpreadLayout::push_spread(&v2, &mut ptr); - } - { - // Step 2: Pull the values from the step before. - // - // 1. Change the first values `None` to `Some(...)`. - // 2. Push the first value again to contract storage. - // - // We prevent the intermediate instance from clearing the storage preemptively - // by wrapping it inside `ManuallyDrop`. The third step will clean up the same - // storage region afterwards. - let mut ptr = KeyPtr::from(root_key); - let pulled_v1: Option = SpreadLayout::pull_spread(&mut ptr); - let mut pulled_v1 = core::mem::ManuallyDrop::new(pulled_v1); - - let pulled_v2: u32 = SpreadLayout::pull_spread(&mut ptr); - let pulled_v2 = core::mem::ManuallyDrop::new(pulled_v2); - - assert_eq!(*pulled_v1, None); - assert_eq!(*pulled_v2, 13); - - *pulled_v1 = Some(99u32); - SpreadLayout::push_spread(&*pulled_v1, &mut KeyPtr::from(root_key)); - } - { - // Step 3: Pull the values again from the storage. - // - // If the bug with `Option` has been fixed in PR #520 we must be able to inspect - // the correct values for both entries. - let mut ptr = KeyPtr::from(root_key); - let pulled_v1: Option = SpreadLayout::pull_spread(&mut ptr); - let pulled_v2: u32 = SpreadLayout::pull_spread(&mut ptr); - - assert_eq!(pulled_v1, Some(99)); - assert_eq!(pulled_v2, 13); - } - Ok(()) - }) - } - - #[test] - fn second_regression_test_for_issue_570() -> ink_env::Result<()> { - run_test::(|_| { - // given - let root_key = Key::from([0x00; 32]); - let none: Option = None; - let some: Option = Some(13); - - // when - let mut ptr_push_none = KeyPtr::from(root_key); - SpreadLayout::push_spread(&none, &mut ptr_push_none); - let mut ptr_pull_none = KeyPtr::from(root_key); - let v1: Option = SpreadLayout::pull_spread(&mut ptr_pull_none); - assert!(v1.is_none()); - let mut ptr_clear_none = KeyPtr::from(root_key); - SpreadLayout::clear_spread(&none, &mut ptr_clear_none); - - let mut ptr_push_some = KeyPtr::from(root_key); - SpreadLayout::push_spread(&some, &mut ptr_push_some); - let mut ptr_pull_some = KeyPtr::from(root_key); - let v2: Option = SpreadLayout::pull_spread(&mut ptr_pull_some); - assert!(v2.is_some()); - let mut ptr_clear_some = KeyPtr::from(root_key); - SpreadLayout::clear_spread(&some, &mut ptr_clear_some); - - // then - // the bug which we observed was that the pointer after push/pull/clear - // was set so a different value if the `Option` was `None` vs. if it was - // `Some`. - // - // if the bug has been fixed the pointer must be the same for `None` - // and `Some` after push/pull/clear. otherwise subsequent operations using - // the pointer will break as soon as the `Option` is changed to it's - // opposite (`None` -> `Some`, `Some` -> `None`). - let mut expected_post_op_ptr = KeyPtr::from(root_key); - // advance one time after the cell containing `self.is_some() as u8` has been read - expected_post_op_ptr.advance_by(1); - // advance another time after the cell containing the inner `Option` value - // has either been skipped (in case of the previous cell being `None`) or - // read (in case of `Some`). - expected_post_op_ptr.advance_by(1); - - assert_eq!(expected_post_op_ptr, ptr_push_none); - assert_eq!(ptr_push_none, ptr_push_some); - - assert_eq!(expected_post_op_ptr, ptr_pull_none); - assert_eq!(ptr_pull_none, ptr_pull_some); - - assert_eq!(expected_post_op_ptr, ptr_clear_none); - assert_eq!(ptr_clear_none, ptr_clear_some); - - Ok(()) - }) - } - - #[test] - #[should_panic(expected = "encountered empty storage cell")] - fn nested_lazies_are_cleared_completely_after_pull() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - let nested_lazy: Lazy> = Lazy::new(Lazy::new(13u32)); - SpreadLayout::push_spread(&nested_lazy, &mut KeyPtr::from(root_key)); - let pulled_lazy = > as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - - // when - SpreadLayout::clear_spread(&pulled_lazy, &mut KeyPtr::from(root_key)); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - let _ = *> as SpreadLayout>::pull_spread(&mut KeyPtr::from( - root_key, - )); - Ok(()) - }) - .unwrap() - } - - #[test] - #[should_panic(expected = "encountered empty storage cell")] - fn lazy_drop_works() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - - // when - let setup_result = std::panic::catch_unwind(|| { - let lazy: Lazy = Lazy::new(13u32); - SpreadLayout::push_spread(&lazy, &mut KeyPtr::from(root_key)); - let _pulled_lazy = - as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - // lazy is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - let _ = - * as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() - } - - #[test] - #[should_panic(expected = "encountered empty storage cell")] - fn lazy_drop_works_with_greater_footprint() { - ink_env::test::run_test::(|_| { - // given - let root_key = Key::from([0x42; 32]); - - // when - let setup_result = std::panic::catch_unwind(|| { - let lazy: Lazy<[u32; 5]> = Lazy::new([13, 14, 15, 16, 17]); - SpreadLayout::push_spread(&lazy, &mut KeyPtr::from(root_key)); - let _pulled_lazy = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - // lazy is dropped which should clear the cells - }); - assert!(setup_result.is_ok(), "setup should not panic"); - - // then - let contract_id = ink_env::test::callee::(); - let used_cells = ink_env::test::count_used_storage_cells::< - ink_env::DefaultEnvironment, - >(&contract_id) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); - let _ = - * as SpreadLayout>::pull_spread(&mut KeyPtr::from(root_key)); - Ok(()) - }) - .unwrap() - } -} diff --git a/crates/storage/src/lazy/lazy_hmap.rs b/crates/storage/src/lazy/lazy_hmap.rs deleted file mode 100644 index 7691a34c36..0000000000 --- a/crates/storage/src/lazy/lazy_hmap.rs +++ /dev/null @@ -1,1360 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A lazy storage mapping that stores entries under their SCALE encoded key hashes. - -use super::{ - CacheCell, - EntryState, - StorageEntry, -}; -use crate::traits::{ - clear_packed_root, - pull_packed_root_opt, - ExtKeyPtr, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; -use core::{ - borrow::Borrow, - cmp::{ - Eq, - Ord, - }, - fmt, - fmt::Debug, - marker::PhantomData, - ptr::NonNull, -}; -use ink_env::hash::{ - CryptoHash, - HashOutput, -}; -use ink_prelude::{ - borrow::ToOwned, - boxed::Box, - collections::btree_map::{ - BTreeMap, - Entry as BTreeMapEntry, - OccupiedEntry as BTreeMapOccupiedEntry, - }, -}; -use ink_primitives::Key; - -/// The map for the contract storage entries. -/// -/// # Note -/// -/// We keep the whole entry in a `Box` in order to prevent pointer -/// invalidation upon updating the cache through `&self` methods as in -/// [`LazyHashMap::get`]. -pub type EntryMap = BTreeMap>>; - -/// A lazy storage mapping that stores entries under their SCALE encoded key hashes. -/// -/// # Note -/// -/// This is mainly used as low-level storage primitives by other high-level -/// storage primitives in order to manage the contract storage for a whole -/// mapping of storage cells. -/// -/// This storage data structure might store its entries anywhere in the contract -/// storage. It is the users responsibility to keep track of the entries if it -/// is necessary to do so. -pub struct LazyHashMap { - /// The offset key for the storage mapping. - /// - /// This offsets the mapping for the entries stored in the contract storage - /// so that all lazy hash map instances store equal entries at different - /// locations of the contract storage and avoid collisions. - key: Option, - /// The currently cached entries of the lazy storage mapping. - /// - /// This normally only represents a subset of the total set of elements. - /// An entry is cached as soon as it is loaded or written. - cached_entries: CacheCell>, - /// The used hash builder. - hash_builder: PhantomData, -} - -/// When querying `entry()` there is a case which needs special treatment: -/// In `entry()` we first do a look-up in the cache. If the requested key is -/// in the cache we return the found object. -/// If it is not in the cache we query the storage. If we find the element -/// in storage we insert it into the cache. -/// -/// The problem now is that in this case we only have the `Vacant` object -/// which we got from searching in the cache, but we need to return -/// `Occupied` here, since the object is now in the cache. We could do this -/// by querying the cache another time -- but this would be an additional -/// search. So what we do instead is to save a reference to the inserted -/// cache value in the `Occupied`. As a consequence all Entry API operations -/// (`get`, `remove`, …) need to distinguish both cases. -enum EntryOrMutableValue { - /// An occupied `EntryMap` entry that holds a value. - /// This represents the case where the key was in the cache. - EntryElementWasInCache(E), - /// A reference to the mutable value behind a cache entry. - /// This represents the case where the key was not in the cache, but in storage. - MutableValueElementWasNotInCache(V), -} - -/// An occupied `EntryMap` entry that holds a value. -type OccupiedCache<'a, K, V> = BTreeMapOccupiedEntry<'a, K, Box>>; - -/// An occupied entry that holds the value. -pub struct OccupiedEntry<'a, K, V> -where - K: Clone, -{ - /// The key stored in this entry. - key: K, - /// Either the occupied `EntryMap` entry that holds the value or a mutable reference - /// to the value behind a cache entry. - entry: EntryOrMutableValue, &'a mut Box>>, -} - -/// A vacant entry with previous and next vacant indices. -pub struct VacantEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// The key stored in this entry. - key: K, - /// The entry within the `LazyHashMap`. This entry can be either occupied or vacant. - /// In an `BTreeMapEntry::Occupied` state the entry has been marked to - /// be removed (with `None`), but we still want to expose the `VacantEntry` API - /// to the use. - /// In an `BTreeMapEntry::Vacant` state the entry is vacant, and we want to expose - /// the `VacantEntry` API. - entry: BTreeMapEntry<'a, K, Box>>, -} - -/// An entry within the `LazyHashMap`. -pub enum Entry<'a, K: 'a, V: 'a> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// A vacant entry that holds the index to the next and previous vacant entry. - Vacant(VacantEntry<'a, K, V>), - /// An occupied entry that holds the value. - Occupied(OccupiedEntry<'a, K, V>), -} - -struct DebugEntryMap<'a, K, V>(&'a CacheCell>); - -impl<'a, K, V> Debug for DebugEntryMap<'a, K, V> -where - K: Debug, - V: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_map().entries(self.0.as_inner().iter()).finish() - } -} - -impl Debug for LazyHashMap -where - K: Debug, - V: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // The `hash_builder` field is not really required or needed for debugging purposes. - f.debug_struct("LazyHashMap") - .field("key", &self.key) - .field("cached_entries", &DebugEntryMap(&self.cached_entries)) - .finish() - } -} - -#[test] -fn debug_impl_works() { - use ink_env::hash::Blake2x256; - let mut hmap = >::new(); - // Empty hmap. - assert_eq!( - format!("{:?}", &hmap), - "LazyHashMap { key: None, cached_entries: {} }", - ); - // Filled hmap. - hmap.put('A', Some(1)); - hmap.put('B', Some(2)); - hmap.put('C', None); - assert_eq!( - format!("{:?}", &hmap), - "LazyHashMap { \ - key: None, \ - cached_entries: {\ - 'A': Entry { \ - value: Some(1), \ - state: Mutated \ - }, \ - 'B': Entry { \ - value: Some(2), \ - state: Mutated \ - }, \ - 'C': Entry { \ - value: None, \ - state: Mutated \ - }\ - } \ - }", - ); -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::{ - LayoutCryptoHasher, - StorageLayout, - }; - use ink_metadata::layout::{ - CellLayout, - HashLayout, - HashingStrategy, - Layout, - LayoutKey, - }; - use scale_info::TypeInfo; - - impl StorageLayout for LazyHashMap - where - K: Ord + scale::Encode, - V: TypeInfo + 'static, - H: CryptoHash + LayoutCryptoHasher, - Key: From<::Type>, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - Layout::Hash(HashLayout::new( - LayoutKey::from(key_ptr.advance_by(1)), - HashingStrategy::new( - ::crypto_hasher(), - b"ink hashmap".to_vec(), - Vec::new(), - ), - Layout::Cell(CellLayout::new::(LayoutKey::from( - key_ptr.advance_by(0), - ))), - )) - } - } -}; - -impl SpreadLayout for LazyHashMap -where - K: Ord + scale::Encode, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - const FOOTPRINT: u64 = 1; - - #[inline] - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - let offset_key = ExtKeyPtr::next_for::(ptr); - for (index, entry) in self.entries().iter() { - let root_key = self.to_offset_key(offset_key, index); - entry.push_packed_root(&root_key); - } - } - - #[inline] - fn clear_spread(&self, _ptr: &mut KeyPtr) { - // Low-level lazy abstractions won't perform automated clean-up since - // they generally are not aware of their entire set of associated - // elements. The high-level abstractions that build upon them are - // responsible for cleaning up. - } -} - -impl SpreadAllocate for LazyHashMap -where - K: Ord + scale::Encode, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - #[inline] - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } -} - -// # Developer Note -// -// Even thought `LazyHashMap` would require storing just a single key a thus -// be a packable storage entity we cannot really make it one since this could -// allow for overlapping lazy hash map instances. -// An example for this would be a `Pack<(LazyHashMap, LazyHashMap)>` where -// both lazy hash maps would use the same underlying key and thus would apply -// the same underlying key mapping. - -impl Default for LazyHashMap -where - K: Ord, -{ - fn default() -> Self { - Self::new() - } -} - -impl FromIterator<(K, V)> for LazyHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut hmap = LazyHashMap::new(); - hmap.extend(iter); - hmap - } -} - -impl Extend<(K, V)> for LazyHashMap -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for (key, value) in iter { - self.put(key, Some(value)); - } - } -} - -impl LazyHashMap -where - K: Ord, -{ - /// Creates a new empty lazy hash map. - /// - /// # Note - /// - /// A lazy map created this way cannot be used to load from the contract storage. - /// All operations that directly or indirectly load from storage will panic. - pub fn new() -> Self { - Self { - key: None, - cached_entries: CacheCell::new(EntryMap::new()), - hash_builder: Default::default(), - } - } - - /// Creates a new empty lazy hash map positioned at the given key. - /// - /// # Note - /// - /// This constructor is private and should never need to be called from - /// outside this module. It is used to construct a lazy index map from a - /// key that is only useful upon a contract call. Use - /// [`LazyIndexMap::new`][`crate::lazy::LazyIndexMap::new`] - /// for construction during contract initialization. - fn lazy(key: Key) -> Self { - Self { - key: Some(key), - cached_entries: CacheCell::new(EntryMap::new()), - hash_builder: Default::default(), - } - } - - /// Returns the offset key of the lazy map if any. - pub fn key(&self) -> Option<&Key> { - self.key.as_ref() - } - - /// Returns the length of the cached entries. - #[cfg(test)] - pub(crate) fn len_cached_entries(&self) -> usize { - self.entries().len() - } - - /// Returns a shared reference to the underlying entries. - fn entries(&self) -> &EntryMap { - self.cached_entries.as_inner() - } - - /// Returns an exclusive reference to the underlying entries. - fn entries_mut(&mut self) -> &mut EntryMap { - self.cached_entries.as_inner_mut() - } - - /// Puts the new value under the given key. - /// - /// # Note - /// - /// - Use [`LazyHashMap::put`]`(None)` in order to remove an element. - /// - Prefer this method over [`LazyHashMap::put_get`] if you are not interested - /// in the old value of the same cell index. - /// - /// # Panics - /// - /// - If the lazy hash map is in an invalid state that forbids interaction - /// with the underlying contract storage. - /// - If the decoding of the old element at the given index failed. - pub fn put(&mut self, key: K, new_value: Option) { - self.entries_mut().insert( - key, - Box::new(StorageEntry::new(new_value, EntryState::Mutated)), - ); - } -} - -impl LazyHashMap -where - K: Clone + Ord + PackedLayout, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Gets the given key's corresponding entry in the map for in-place manipulation. - pub fn entry(&mut self, key: K) -> Entry { - // SAFETY: We have put the whole `cached_entries` mapping into an - // `UnsafeCell` because of this caching functionality. The - // trick here is that due to using `Box` internally - // we are able to return references to the cached entries - // while maintaining the invariant that mutating the caching - // `BTreeMap` will never invalidate those references. - // By returning a raw pointer we enforce an `unsafe` block at - // the caller site to underline that guarantees are given by the - // caller. - let cached_entries = unsafe { &mut *self.cached_entries.get_ptr().as_ptr() }; - // We have to clone the key here because we do not have access to the unsafe - // raw entry API for Rust hash maps, yet since it is unstable. We can remove - // the constraints on `K: Clone` once we have access to this API. - // Read more about the issue here: https://github.com/rust-lang/rust/issues/56167 - match cached_entries.entry(key.to_owned()) { - BTreeMapEntry::Occupied(entry) => { - match entry.get().value() { - Some(_) => { - Entry::Occupied(OccupiedEntry { - key, - entry: EntryOrMutableValue::EntryElementWasInCache(entry), - }) - } - None => { - // value is already marked as to be removed - Entry::Vacant(VacantEntry { - key, - entry: BTreeMapEntry::Occupied(entry), - }) - } - } - } - BTreeMapEntry::Vacant(entry) => { - let value = self - .key_at(&key) - .map(|key| pull_packed_root_opt::(&key)) - .unwrap_or(None); - match value.is_some() { - true => { - // The entry was not in the cache, but in the storage. This results in - // a problem: We only have `Vacant` here, but need to return `Occupied`, - // to reflect this. - let v_mut = entry.insert(Box::new(StorageEntry::new( - value, - EntryState::Preserved, - ))); - Entry::Occupied(OccupiedEntry { - key, - entry: EntryOrMutableValue::MutableValueElementWasNotInCache( - v_mut, - ), - }) - } - false => { - Entry::Vacant(VacantEntry { - key, - entry: BTreeMapEntry::Vacant(entry), - }) - } - } - } - } - } -} - -impl LazyHashMap -where - K: Ord + scale::Encode, - H: CryptoHash, - Key: From<::Type>, -{ - /// Returns an offset key for the given key pair. - fn to_offset_key(&self, storage_key: &Key, key: &Q) -> Key - where - K: Borrow, - Q: scale::Encode, - { - #[derive(scale::Encode)] - struct KeyPair<'a, Q> { - prefix: [u8; 11], - storage_key: &'a Key, - value_key: &'a Q, - } - let key_pair = KeyPair { - prefix: [ - b'i', b'n', b'k', b' ', b'h', b'a', b's', b'h', b'm', b'a', b'p', - ], - storage_key, - value_key: key, - }; - let mut output = ::Type::default(); - ink_env::hash_encoded::>(&key_pair, &mut output); - output.into() - } - - /// Returns an offset key for the given key. - fn key_at(&self, key: &Q) -> Option - where - K: Borrow, - Q: scale::Encode, - { - self.key - .map(|storage_key| self.to_offset_key(&storage_key, key)) - } -} - -impl LazyHashMap -where - K: Ord + Eq + scale::Encode, - V: PackedLayout, - H: CryptoHash, - Key: From<::Type>, -{ - /// Lazily loads the value at the given index. - /// - /// # Note - /// - /// Only loads a value if `key` is set and if the value has not been loaded yet. - /// Returns the freshly loaded or already loaded entry of the value. - /// - /// # Safety - /// - /// This function has a `&self` receiver while returning an `Option<*mut T>` - /// which is unsafe in isolation. The caller has to determine how to forward - /// the returned `*mut T`. - /// - /// # Safety - /// - /// This is an `unsafe` operation because it has a `&self` receiver but returns - /// a `*mut Entry` pointer that allows for exclusive access. This is safe - /// within internal use only and should never be given outside the lazy entity - /// for public `&self` methods. - unsafe fn lazily_load(&self, key: &Q) -> NonNull> - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - // SAFETY: We have put the whole `cached_entries` mapping into an - // `UnsafeCell` because of this caching functionality. The - // trick here is that due to using `Box` internally - // we are able to return references to the cached entries - // while maintaining the invariant that mutating the caching - // `BTreeMap` will never invalidate those references. - // By returning a raw pointer we enforce an `unsafe` block at - // the caller site to underline that guarantees are given by the - // caller. - let cached_entries = &mut *self.cached_entries.get_ptr().as_ptr(); - // We have to clone the key here because we do not have access to the unsafe - // raw entry API for Rust hash maps, yet since it is unstable. We can remove - // the contraints on `K: Clone` once we have access to this API. - // Read more about the issue here: https://github.com/rust-lang/rust/issues/56167 - match cached_entries.entry(key.to_owned()) { - BTreeMapEntry::Occupied(occupied) => { - NonNull::from(&mut **occupied.into_mut()) - } - BTreeMapEntry::Vacant(vacant) => { - let value = self - .key_at(key) - .map(|key| pull_packed_root_opt::(&key)) - .unwrap_or(None); - NonNull::from( - &mut **vacant.insert(Box::new(StorageEntry::new( - value, - EntryState::Preserved, - ))), - ) - } - } - } - - /// Lazily loads the value associated with the given key. - /// - /// # Note - /// - /// Only loads a value if `key` is set and if the value has not been loaded yet. - /// Returns a pointer to the freshly loaded or already loaded entry of the value. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the lazy chunk is not in a state that allows lazy loading. - fn lazily_load_mut(&mut self, index: &Q) -> &mut StorageEntry - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - // SAFETY: - // - Returning a `&mut Entry` is safe because entities inside the - // cache are stored within a `Box` to not invalidate references into - // them upon operating on the outer cache. - unsafe { &mut *self.lazily_load(index).as_ptr() } - } - - /// Clears the underlying storage of the entry at the given index. - /// - /// # Safety - /// - /// For performance reasons this does not synchronize the lazy index map's - /// memory-side cache which invalidates future accesses the cleared entry. - /// Care should be taken when using this API. - /// - /// The general use of this API is to streamline `Drop` implementations of - /// high-level abstractions that build upon this low-level data structure. - pub fn clear_packed_at(&self, index: &Q) - where - K: Borrow, - V: PackedLayout, - Q: Ord + scale::Encode + ToOwned, - { - let root_key = self.key_at(index).expect("cannot clear in lazy state"); - if ::REQUIRES_DEEP_CLEAN_UP { - // We need to load the entity before we remove its associated contract storage - // because it requires a deep clean-up which propagates clearing to its fields, - // for example in the case of `T` being a `storage::Box`. - let entity = self.get(index).expect("cannot clear a non existing entity"); - clear_packed_root::(entity, &root_key); - } else { - // The type does not require deep clean-up so we can simply clean-up - // its associated storage cell and be done without having to load it first. - ink_env::clear_contract_storage(&root_key); - } - } - - /// Returns a shared reference to the value associated with the given key if any. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the element at the given index failed. - pub fn get(&self, index: &Q) -> Option<&V> - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - // SAFETY: Dereferencing the `*mut T` pointer into a `&T` is safe - // since this method's receiver is `&self` so we do not - // leak non-shared references to the outside. - unsafe { &*self.lazily_load(index).as_ptr() }.value().into() - } - - /// Returns an exclusive reference to the value associated with the given key if any. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the element at the given index failed. - pub fn get_mut(&mut self, index: &Q) -> Option<&mut V> - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - self.lazily_load_mut(index).value_mut().into() - } - - /// Puts the new value under the given key and returns the old value if any. - /// - /// # Note - /// - /// - Use [`LazyHashMap::put_get`]`(None)` in order to remove an element - /// and retrieve the old element back. - /// - /// # Panics - /// - /// - If the lazy hashmap is in an invalid state that forbids interaction. - /// - If the decoding of the old element at the given index failed. - pub fn put_get(&mut self, key: &Q, new_value: Option) -> Option - where - K: Borrow, - Q: Ord + scale::Encode + ToOwned, - { - self.lazily_load_mut(key).put(new_value) - } - - /// Swaps the values at entries with associated keys `x` and `y`. - /// - /// This operation tries to be as efficient as possible and reuse allocations. - /// - /// # Panics - /// - /// - If the lazy hashmap is in an invalid state that forbids interaction. - /// - If the decoding of one of the elements failed. - pub fn swap(&mut self, x: &Q1, y: &Q2) - where - K: Borrow + Borrow, - Q1: Ord + PartialEq + scale::Encode + ToOwned, - Q2: Ord + PartialEq + scale::Encode + ToOwned, - { - if x == y { - // Bail out early if both indices are the same. - return - } - let (loaded_x, loaded_y) = - // SAFETY: The loaded `x` and `y` entries are distinct from each - // other guaranteed by the previous check. Also `lazily_load` - // guarantees to return a pointer to a pinned entity - // so that the returned references do not conflict with - // each other. - unsafe { ( - &mut *self.lazily_load(x).as_ptr(), - &mut *self.lazily_load(y).as_ptr(), - ) }; - if loaded_x.value().is_none() && loaded_y.value().is_none() { - // Bail out since nothing has to be swapped if both values are `None`. - return - } - // Set the `mutate` flag since at this point at least one of the loaded - // values is guaranteed to be `Some`. - loaded_x.replace_state(EntryState::Mutated); - loaded_y.replace_state(EntryState::Mutated); - core::mem::swap(loaded_x.value_mut(), loaded_y.value_mut()); - } -} - -impl<'a, K, V> Entry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout + core::fmt::Debug + core::cmp::Eq + Default, -{ - /// Returns a reference to this entry's key. - pub fn key(&self) -> &K { - match self { - Entry::Occupied(entry) => &entry.key, - Entry::Vacant(entry) => &entry.key, - } - } - - /// Ensures a value is in the entry by inserting the default value if empty, and returns - /// a reference to the value in the entry. - pub fn or_default(self) -> &'a V { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(V::default()), - } - } - - /// Ensures a value is in the entry by inserting the default if empty, and returns - /// a mutable reference to the value in the entry. - pub fn or_insert(self, default: V) -> &'a mut V { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Ensures a value is in the entry by inserting the result of the default function if empty, - /// and returns mutable references to the key and value in the entry. - pub fn or_insert_with(self, default: F) -> &'a mut V - where - F: FnOnce() -> V, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default()), - } - } - - /// Ensures a value is in the entry by inserting, if empty, the result of the default - /// function, which takes the key as its argument, and returns a mutable reference to - /// the value in the entry. - pub fn or_insert_with_key(self, default: F) -> &'a mut V - where - F: FnOnce(&K) -> V, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => { - let value = default(&entry.key); - entry.insert(value) - } - } - } - - /// Provides in-place mutable access to an occupied entry before any - /// potential inserts into the map. - #[must_use] - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Entry::Occupied(mut entry) => { - { - let v = entry.get_mut(); - f(v); - } - Entry::Occupied(entry) - } - Entry::Vacant(entry) => Entry::Vacant(entry), - } - } -} - -impl<'a, K, V> VacantEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// Gets a reference to the key that would be used when inserting a value through the `VacantEntry`. - pub fn key(&self) -> &K { - &self.key - } - - /// Take ownership of the key. - pub fn into_key(self) -> K { - self.key - } - - /// Sets the value of the entry with the `VacantEntry`s key, and returns a mutable reference to it. - pub fn insert(self, value: V) -> &'a mut V { - let new = Box::new(StorageEntry::new(Some(value), EntryState::Mutated)); - match self.entry { - BTreeMapEntry::Vacant(vacant) => { - vacant - .insert(new) - .value_mut() - .as_mut() - .expect("insert was just executed; qed") - } - BTreeMapEntry::Occupied(mut occupied) => { - occupied.insert(new); - occupied - .into_mut() - .value_mut() - .as_mut() - .expect("insert was just executed; qed") - } - } - } -} - -impl<'a, K, V> OccupiedEntry<'a, K, V> -where - K: Ord + Clone + PackedLayout, - V: PackedLayout, -{ - /// Gets a reference to the key in the entry. - pub fn key(&self) -> &K { - &self.key - } - - /// Take the ownership of the key and value from the map. - pub fn remove_entry(self) -> (K, V) { - let old = match self.entry { - EntryOrMutableValue::EntryElementWasInCache(mut entry) => { - entry - .get_mut() - .value_mut() - .take() - .expect("entry behind `OccupiedEntry` must always exist") - } - EntryOrMutableValue::MutableValueElementWasNotInCache(v_mut) => { - v_mut - .value_mut() - .take() - .expect("entry behind `MutableValue` must always exist") - } - }; - (self.key, old) - } - - /// Gets a reference to the value in the entry. - pub fn get(&self) -> &V { - match &self.entry { - EntryOrMutableValue::EntryElementWasInCache(entry) => { - entry - .get() - .value() - .as_ref() - .expect("entry behind `OccupiedEntry` must always exist") - } - EntryOrMutableValue::MutableValueElementWasNotInCache(v_mut) => { - v_mut - .value() - .as_ref() - .expect("entry behind `MutableValue` must always exist") - } - } - } - - /// Gets a mutable reference to the value in the entry. - /// - /// If you need a reference to the `OccupiedEntry` which may outlive the destruction of the - /// `Entry` value, see `into_mut`. - pub fn get_mut(&mut self) -> &mut V { - match &mut self.entry { - EntryOrMutableValue::EntryElementWasInCache(entry) => { - entry - .get_mut() - .value_mut() - .as_mut() - .expect("entry behind `OccupiedEntry` must always exist") - } - EntryOrMutableValue::MutableValueElementWasNotInCache(v_mut) => { - v_mut - .value_mut() - .as_mut() - .expect("entry behind `MutableValue` must always exist") - } - } - } - - /// Sets the value of the entry, and returns the entry's old value. - pub fn insert(&mut self, new_value: V) -> V { - match &mut self.entry { - EntryOrMutableValue::EntryElementWasInCache(entry) => { - let new_value = - Box::new(StorageEntry::new(Some(new_value), EntryState::Mutated)); - entry - .insert(new_value) - .into_value() - .expect("entry behind `OccupiedEntry` must always exist") - } - EntryOrMutableValue::MutableValueElementWasNotInCache(v_mut) => { - core::mem::replace(v_mut.value_mut(), Some(new_value)) - .expect("entry behind `MutableValue` must always exist") - } - } - } - - /// Takes the value out of the entry, and returns it. - pub fn remove(self) -> V { - self.remove_entry().1 - } - - /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry - /// with a lifetime bound to the map itself. - pub fn into_mut(self) -> &'a mut V { - match self.entry { - EntryOrMutableValue::EntryElementWasInCache(entry) => { - entry - .into_mut() - .value_mut() - .as_mut() - .expect("entry behind `OccupiedEntry` must always exist") - } - EntryOrMutableValue::MutableValueElementWasNotInCache(v_mut) => { - v_mut - .value_mut() - .as_mut() - .expect("entry behind `MutableValue` must always exist") - } - } - } -} - -#[cfg(test)] -mod tests { - use super::{ - EntryState, - LazyHashMap, - StorageEntry, - }; - use crate::traits::{ - KeyPtr, - SpreadLayout, - }; - use ink_env::hash::{ - Blake2x256, - Sha2x256, - }; - use ink_primitives::Key; - - /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. - fn assert_cached_entries( - hmap: &LazyHashMap, - expected: &[(i32, StorageEntry)], - ) { - assert_eq!(hmap.len_cached_entries(), expected.len()); - for (given, expected) in hmap - .entries() - .iter() - .map(|(index, boxed_entry)| (*index, &**boxed_entry)) - .zip(expected.iter().map(|(index, entry)| (*index, entry))) - { - assert_eq!(given, expected); - } - } - - fn new_hmap() -> LazyHashMap { - >::new() - } - - #[test] - fn new_works() { - let hmap = new_hmap(); - // Key must be none. - assert_eq!(hmap.key(), None); - assert_eq!(hmap.key_at(&0), None); - // Cached elements must be empty. - assert_cached_entries(&hmap, &[]); - // Same as default: - let default_hmap = >::default(); - assert_eq!(hmap.key(), default_hmap.key()); - assert_eq!(hmap.entries(), default_hmap.entries()); - } - - #[test] - fn key_at_works() { - let key = Key::from([0x42; 32]); - - // BLAKE-2 256-bit hasher: - let hmap1 = >::lazy(key); - // Key must be some. - assert_eq!(hmap1.key(), Some(&key)); - // Cached elements must be empty. - assert_cached_entries(&hmap1, &[]); - let hmap1_at_0 = b"\ - \x67\x7E\xD3\xA4\x72\x2A\x83\x60\ - \x96\x65\x0E\xCD\x1F\x2C\xE8\x5D\ - \xBF\x7E\xC0\xFF\x16\x40\x8A\xD8\ - \x75\x88\xDE\x52\xF5\x8B\x99\xAF"; - assert_eq!(hmap1.key_at(&0), Some(Key::from(*hmap1_at_0))); - // Same parameters must yield the same key: - // - // This tests an actual regression that happened because the - // hash accumulator was not reset after a hash finalization. - assert_cached_entries(&hmap1, &[]); - assert_eq!(hmap1.key_at(&0), Some(Key::from(*hmap1_at_0))); - assert_eq!( - hmap1.key_at(&1), - Some(Key::from( - *b"\ - \x9A\x46\x1F\xB3\xA1\xC4\x20\xF8\ - \xA0\xD9\xA7\x79\x2F\x07\xFB\x7D\ - \x49\xDD\xAB\x08\x67\x90\x96\x15\ - \xFB\x85\x36\x3B\x82\x94\x85\x3F" - )) - ); - // SHA-2 256-bit hasher: - let hmap2 = >::lazy(key); - // Key must be some. - assert_eq!(hmap2.key(), Some(&key)); - // Cached elements must be empty. - assert_cached_entries(&hmap2, &[]); - assert_eq!( - hmap1.key_at(&0), - Some(Key::from( - *b"\ - \x67\x7E\xD3\xA4\x72\x2A\x83\x60\ - \x96\x65\x0E\xCD\x1F\x2C\xE8\x5D\ - \xBF\x7E\xC0\xFF\x16\x40\x8A\xD8\ - \x75\x88\xDE\x52\xF5\x8B\x99\xAF" - )) - ); - assert_eq!( - hmap1.key_at(&1), - Some(Key::from( - *b"\ - \x9A\x46\x1F\xB3\xA1\xC4\x20\xF8\ - \xA0\xD9\xA7\x79\x2F\x07\xFB\x7D\ - \x49\xDD\xAB\x08\x67\x90\x96\x15\ - \xFB\x85\x36\x3B\x82\x94\x85\x3F" - )) - ); - } - - #[test] - fn put_get_works() { - let mut hmap = new_hmap(); - // Put some values. - assert_eq!(hmap.put_get(&1, Some(b'A')), None); - assert_eq!(hmap.put_get(&2, Some(b'B')), None); - assert_eq!(hmap.put_get(&4, Some(b'C')), None); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(Some(b'C'), EntryState::Mutated)), - ], - ); - // Put none values. - assert_eq!(hmap.put_get(&3, None), None); - assert_eq!(hmap.put_get(&5, None), None); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(Some(b'C'), EntryState::Mutated)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override some values with none. - assert_eq!(hmap.put_get(&2, None), Some(b'B')); - assert_eq!(hmap.put_get(&4, None), Some(b'C')); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Mutated)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override none values with some. - assert_eq!(hmap.put_get(&3, Some(b'X')), None); - assert_eq!(hmap.put_get(&5, Some(b'Y')), None); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (3, StorageEntry::new(Some(b'X'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - (5, StorageEntry::new(Some(b'Y'), EntryState::Mutated)), - ], - ); - } - - #[test] - fn get_works() { - let mut hmap = new_hmap(); - let nothing_changed = &[ - (1, StorageEntry::new(None, EntryState::Preserved)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(Some(b'D'), EntryState::Mutated)), - ]; - // Put some values. - assert_eq!(hmap.put_get(&1, None), None); - assert_eq!(hmap.put_get(&2, Some(b'B')), None); - assert_eq!(hmap.put_get(&3, None), None); - assert_eq!(hmap.put_get(&4, Some(b'D')), None); - assert_cached_entries(&hmap, nothing_changed); - // `get` works: - assert_eq!(hmap.get(&1), None); - assert_eq!(hmap.get(&2), Some(&b'B')); - assert_eq!(hmap.get(&3), None); - assert_eq!(hmap.get(&4), Some(&b'D')); - assert_cached_entries(&hmap, nothing_changed); - // `get_mut` works: - assert_eq!(hmap.get_mut(&1), None); - assert_eq!(hmap.get_mut(&2), Some(&mut b'B')); - assert_eq!(hmap.get_mut(&3), None); - assert_eq!(hmap.get_mut(&4), Some(&mut b'D')); - assert_cached_entries(&hmap, nothing_changed); - // `get` or `get_mut` without cache: - assert_eq!(hmap.get(&5), None); - assert_eq!(hmap.get_mut(&5), None); - } - - #[test] - fn put_works() { - let mut hmap = new_hmap(); - // Put some values. - hmap.put(1, None); - hmap.put(2, Some(b'B')); - hmap.put(4, None); - // The main difference between `put` and `put_get` is that `put` never - // loads from storage which also has one drawback: Putting a `None` - // value always ends-up in `Mutated` state for the entry even if the - // entry is already `None`. - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - // Overwrite entries: - hmap.put(1, Some(b'A')); - hmap.put(2, None); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - } - - #[test] - fn swap_works() { - let mut hmap = new_hmap(); - let nothing_changed = &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(hmap.put_get(&1, Some(b'A')), None); - assert_eq!(hmap.put_get(&2, Some(b'B')), None); - assert_eq!(hmap.put_get(&3, None), None); - assert_eq!(hmap.put_get(&4, None), None); - assert_cached_entries(&hmap, nothing_changed); - // Swap same indices: Check that nothing has changed. - for i in 0..4 { - hmap.swap(&i, &i); - } - assert_cached_entries(&hmap, nothing_changed); - // Swap `None` values: Check that nothing has changed. - hmap.swap(&3, &4); - hmap.swap(&4, &3); - assert_cached_entries(&hmap, nothing_changed); - // Swap `Some` and `None`: - hmap.swap(&1, &3); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap `Some` and `Some`: - hmap.swap(&2, &3); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap out of bounds: `None` and `None` - hmap.swap(&4, &5); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap out of bounds: `Some` and `None` - hmap.swap(&3, &6); - assert_cached_entries( - &hmap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - (5, StorageEntry::new(None, EntryState::Preserved)), - (6, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - ], - ); - } - - #[test] - fn spread_layout_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let mut hmap = new_hmap(); - let nothing_changed = &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(hmap.put_get(&1, Some(b'A')), None); - assert_eq!(hmap.put_get(&2, Some(b'B')), None); - assert_eq!(hmap.put_get(&3, None), None); - assert_eq!(hmap.put_get(&4, None), None); - assert_cached_entries(&hmap, nothing_changed); - // Push the lazy index map onto the contract storage and then load - // another instance of it from the contract stoarge. - // Then: Compare both instances to be equal. - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&hmap, &mut KeyPtr::from(root_key)); - let hmap2 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&hmap2, &[]); - assert_eq!(hmap2.key(), Some(&Key::from([0x42; 32]))); - assert_eq!(hmap2.get(&1), Some(&b'A')); - assert_eq!(hmap2.get(&2), Some(&b'B')); - assert_eq!(hmap2.get(&3), None); - assert_eq!(hmap2.get(&4), None); - assert_cached_entries( - &hmap2, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Preserved)), - (2, StorageEntry::new(Some(b'B'), EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Clear the first lazy index map instance and reload another instance - // to check whether the associated storage has actually been freed - // again: - SpreadLayout::clear_spread(&hmap2, &mut KeyPtr::from(root_key)); - // The above `clear_spread` call is a no-op since lazy index map is - // generally not aware of its associated elements. So we have to - // manually clear them from the contract storage which is what the - // high-level data structures like `storage::Vec` would command: - hmap2.clear_packed_at(&1); - hmap2.clear_packed_at(&2); - hmap2.clear_packed_at(&3); // Not really needed here. - hmap2.clear_packed_at(&4); // Not really needed here. - let hmap3 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&hmap3, &[]); - assert_eq!(hmap3.get(&1), None); - assert_eq!(hmap3.get(&2), None); - assert_eq!(hmap3.get(&3), None); - assert_eq!(hmap3.get(&4), None); - assert_cached_entries( - &hmap3, - &[ - (1, StorageEntry::new(None, EntryState::Preserved)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - Ok(()) - }) - } -} diff --git a/crates/storage/src/lazy/lazy_imap.rs b/crates/storage/src/lazy/lazy_imap.rs deleted file mode 100644 index 6905d70497..0000000000 --- a/crates/storage/src/lazy/lazy_imap.rs +++ /dev/null @@ -1,790 +0,0 @@ -// Copyright 2018-2022 Parity Technologies (UK) Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - CacheCell, - EntryState, - StorageEntry, -}; -use crate::traits::{ - clear_packed_root, - pull_packed_root_opt, - ExtKeyPtr, - KeyPtr, - PackedLayout, - SpreadAllocate, - SpreadLayout, -}; -use core::{ - fmt, - fmt::Debug, - ptr::NonNull, -}; -use ink_prelude::{ - boxed::Box, - collections::BTreeMap, -}; -use ink_primitives::Key; - -/// The index type used in the lazy storage chunk. -pub type Index = u32; - -/// A lazy storage chunk that spans over a whole chunk of storage cells. -/// -/// # Note -/// -/// This is mainly used as low-level storage primitives by other high-level -/// storage primitives in order to manage the contract storage for a whole -/// chunk of storage cells. -/// -/// A chunk of storage cells is a contiguous range of `2^32` storage cells. -pub struct LazyIndexMap { - /// The offset key for the chunk of cells. - /// - /// If the lazy chunk has been initialized during contract initialization - /// the key will be `None` since there won't be a storage region associated - /// to the lazy chunk which prevents it from lazily loading elements. This, - /// however, is only checked at contract runtime. We might incorporate - /// compile-time checks for this particular use case later on. - key: Option, - /// The subset of currently cached entries of the lazy storage chunk. - /// - /// An entry is cached as soon as it is loaded or written. - cached_entries: CacheCell>, -} - -struct DebugEntryMap<'a, V>(&'a CacheCell>); - -impl<'a, V> Debug for DebugEntryMap<'a, V> -where - V: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_map().entries(self.0.as_inner().iter()).finish() - } -} - -impl Debug for LazyIndexMap -where - V: Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LazyIndexMap") - .field("key", &self.key) - .field("cached_entries", &DebugEntryMap(&self.cached_entries)) - .finish() - } -} - -#[test] -fn debug_impl_works() { - let mut imap = >::new(); - // Empty imap. - assert_eq!( - format!("{:?}", &imap), - "LazyIndexMap { key: None, cached_entries: {} }", - ); - // Filled imap. - imap.put(0, Some(1)); - imap.put(42, Some(2)); - imap.put(999, None); - assert_eq!( - format!("{:?}", &imap), - "LazyIndexMap { \ - key: None, \ - cached_entries: {\ - 0: Entry { \ - value: Some(1), \ - state: Mutated \ - }, \ - 42: Entry { \ - value: Some(2), \ - state: Mutated \ - }, \ - 999: Entry { \ - value: None, \ - state: Mutated \ - }\ - } \ - }", - ); -} - -impl Default for LazyIndexMap { - fn default() -> Self { - Self::new() - } -} - -/// The map for the contract storage entries. -/// -/// # Note -/// -/// We keep the whole entry in a `Box` in order to prevent pointer -/// invalidation upon updating the cache through `&self` methods as in -/// [`LazyIndexMap::get`]. -pub type EntryMap = BTreeMap>>; - -impl LazyIndexMap { - /// Creates a new empty lazy map. - /// - /// # Note - /// - /// A lazy map created this way cannot be used to load from the contract storage. - /// All operations that directly or indirectly load from storage will panic. - pub fn new() -> Self { - Self { - key: None, - cached_entries: CacheCell::new(EntryMap::new()), - } - } - - /// Creates a new empty lazy map positioned at the given key. - /// - /// # Note - /// - /// This constructor is private and should never need to be called from - /// outside this module. It is used to construct a lazy index map from a - /// key that is only useful upon a contract call. Use [`LazyIndexMap::new`] - /// for construction during contract initialization. - fn lazy(key: Key) -> Self { - Self { - key: Some(key), - cached_entries: CacheCell::new(EntryMap::new()), - } - } - - /// Returns the offset key of the lazy map if any. - pub fn key(&self) -> Option<&Key> { - self.key.as_ref() - } - - /// Returns a shared reference to the underlying entries. - fn entries(&self) -> &EntryMap { - self.cached_entries.as_inner() - } - - /// Returns an exclusive reference to the underlying entries. - fn entries_mut(&mut self) -> &mut EntryMap { - self.cached_entries.as_inner_mut() - } - - /// Puts the new value at the given index. - /// - /// # Note - /// - /// - Use [`LazyIndexMap::put`]`(None)` in order to remove an element. - /// - Prefer this method over [`LazyIndexMap::put_get`] if you are not interested - /// in the old value of the same cell index. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the old element at the given index failed. - pub fn put(&mut self, index: Index, new_value: Option) { - use ink_prelude::collections::btree_map::Entry as BTreeMapEntry; - match self.entries_mut().entry(index) { - BTreeMapEntry::Occupied(mut occupied) => { - // We can re-use the already existing boxed `Entry` and simply - // swap the underlying values. - occupied.get_mut().put(new_value); - } - BTreeMapEntry::Vacant(vacant) => { - vacant - .insert(Box::new(StorageEntry::new(new_value, EntryState::Mutated))); - } - } - } -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::{ - ArrayLayout, - CellLayout, - Layout, - LayoutKey, - }; - use scale_info::TypeInfo; - - impl StorageLayout for LazyIndexMap - where - T: TypeInfo + 'static, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - let capacity = u32::MAX; - Layout::Array(ArrayLayout::new( - LayoutKey::from(key_ptr.advance_by(capacity as u64)), - capacity, - 1, - Layout::Cell(CellLayout::new::(LayoutKey::from( - key_ptr.advance_by(0), - ))), - )) - } - } -}; - -impl SpreadLayout for LazyIndexMap -where - V: PackedLayout, -{ - const FOOTPRINT: u64 = 1_u64 << 32; - - #[inline] - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - let offset_key = ExtKeyPtr::next_for::(ptr); - let mut root_key = Key::default(); - for (&index, entry) in self.entries().iter() { - offset_key.add_assign_using(index, &mut root_key); - entry.push_packed_root(&root_key); - } - } - - #[inline] - fn clear_spread(&self, _ptr: &mut KeyPtr) { - // Low-level lazy abstractions won't perform automated clean-up since - // they generally are not aware of their entire set of associated - // elements. The high-level abstractions that build upon them are - // responsible for cleaning up. - } -} - -impl SpreadAllocate for LazyIndexMap -where - V: PackedLayout, -{ - #[inline] - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self::lazy(*ExtKeyPtr::next_for::(ptr)) - } -} - -impl LazyIndexMap -where - V: PackedLayout, -{ - /// Clears the underlying storage of the entry at the given index. - /// - /// # Safety - /// - /// For performance reasons this does not synchronize the lazy index map's - /// memory-side cache which invalidates future accesses the cleared entry. - /// Care should be taken when using this API. - /// - /// The general use of this API is to streamline `Drop` implementations of - /// high-level abstractions that build upon this low-level data structure. - pub fn clear_packed_at(&self, index: Index) { - let root_key = self.key_at(index).expect("cannot clear in lazy state"); - if ::REQUIRES_DEEP_CLEAN_UP { - // We need to load the entity before we remove its associated contract storage - // because it requires a deep clean-up which propagates clearing to its fields, - // for example in the case of `T` being a `storage::Box`. - let entity = self.get(index).expect("cannot clear a non existing entity"); - clear_packed_root::(entity, &root_key); - } else { - // The type does not require deep clean-up so we can simply clean-up - // its associated storage cell and be done without having to load it first. - ink_env::clear_contract_storage(&root_key); - } - } -} - -impl LazyIndexMap -where - V: PackedLayout, -{ - /// Returns an offset key for the given index. - pub fn key_at(&self, index: Index) -> Option { - self.key.map(|mut key| { - key += index as u64; - key - }) - } - - /// Lazily loads the value at the given index. - /// - /// # Note - /// - /// Only loads a value if `key` is set and if the value has not been loaded yet. - /// Returns the freshly loaded or already loaded entry of the value. - /// - /// # Safety - /// - /// This function has a `&self` receiver while returning an `Option<*mut T>` - /// which is unsafe in isolation. The caller has to determine how to forward - /// the returned `*mut T`. - /// - /// # Safety - /// - /// This is an `unsafe` operation because it has a `&self` receiver but returns - /// a `*mut Entry` pointer that allows for exclusive access. This is safe - /// within internal use only and should never be given outside the lazy entity - /// for public `&self` methods. - unsafe fn lazily_load(&self, index: Index) -> NonNull> { - // SAFETY: We have put the whole `cached_entries` mapping into an - // `UnsafeCell` because of this caching functionality. The - // trick here is that due to using `Box` internally - // we are able to return references to the cached entries - // while maintaining the invariant that mutating the caching - // `BTreeMap` will never invalidate those references. - // By returning a raw pointer we enforce an `unsafe` block at - // the caller site to underline that guarantees are given by the - // caller. - let cached_entries = &mut *self.cached_entries.get_ptr().as_ptr(); - use ink_prelude::collections::btree_map::Entry as BTreeMapEntry; - match cached_entries.entry(index) { - BTreeMapEntry::Occupied(occupied) => { - NonNull::from(&mut **occupied.into_mut()) - } - BTreeMapEntry::Vacant(vacant) => { - let value = self - .key_at(index) - .map(|key| pull_packed_root_opt::(&key)) - .unwrap_or(None); - NonNull::from( - &mut **vacant.insert(Box::new(StorageEntry::new( - value, - EntryState::Preserved, - ))), - ) - } - } - } - - /// Lazily loads the value at the given index. - /// - /// # Note - /// - /// Only loads a value if `key` is set and if the value has not been loaded yet. - /// Returns the freshly loaded or already loaded entry of the value. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the lazy chunk is not in a state that allows lazy loading. - fn lazily_load_mut(&mut self, index: Index) -> &mut StorageEntry { - // SAFETY: - // - Returning a `&mut Entry` is safe because entities inside the - // cache are stored within a `Box` to not invalidate references into - // them upon operating on the outer cache. - unsafe { &mut *self.lazily_load(index).as_ptr() } - } - - /// Returns a shared reference to the element at the given index if any. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the element at the given index failed. - pub fn get(&self, index: Index) -> Option<&V> { - // SAFETY: Dereferencing the `*mut T` pointer into a `&T` is safe - // since this method's receiver is `&self` so we do not - // leak non-shared references to the outside. - unsafe { &*self.lazily_load(index).as_ptr() }.value().into() - } - - /// Returns an exclusive reference to the element at the given index if any. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the element at the given index failed. - pub fn get_mut(&mut self, index: Index) -> Option<&mut V> { - self.lazily_load_mut(index).value_mut().into() - } - - /// Puts the new value at the given index and returns the old value if any. - /// - /// # Note - /// - /// - Use [`LazyIndexMap::put_get`]`(None)` in order to remove an element - /// and retrieve the old element back. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of the old element at the given index failed. - pub fn put_get(&mut self, index: Index, new_value: Option) -> Option { - self.lazily_load_mut(index).put(new_value) - } - - /// Swaps the values at indices `x` and `y`. - /// - /// This operation tries to be as efficient as possible and reuse allocations. - /// - /// # Panics - /// - /// - If the lazy chunk is in an invalid state that forbids interaction. - /// - If the decoding of one of the elements failed. - pub fn swap(&mut self, x: Index, y: Index) { - if x == y { - // Bail out early if both indices are the same. - return - } - let (loaded_x, loaded_y) = - // SAFETY: The loaded `x` and `y` entries are distinct from each - // other guaranteed by the previous check. Also `lazily_load` - // guarantees to return a pointer to a pinned entity - // so that the returned references do not conflict with - // each other. - unsafe { ( - &mut *self.lazily_load(x).as_ptr(), - &mut *self.lazily_load(y).as_ptr(), - ) }; - if loaded_x.value().is_none() && loaded_y.value().is_none() { - // Bail out since nothing has to be swapped if both values are `None`. - return - } - // Set the `mutate` flag since at this point at least one of the loaded - // values is guaranteed to be `Some`. - loaded_x.replace_state(EntryState::Mutated); - loaded_y.replace_state(EntryState::Mutated); - core::mem::swap(loaded_x.value_mut(), loaded_y.value_mut()); - } -} - -#[cfg(test)] -mod tests { - use super::{ - super::{ - EntryState, - StorageEntry, - }, - Index, - LazyIndexMap, - }; - use crate::traits::{ - KeyPtr, - SpreadLayout, - }; - use ink_primitives::Key; - - /// Asserts that the cached entries of the given `imap` is equal to the `expected` slice. - fn assert_cached_entries( - imap: &LazyIndexMap, - expected: &[(Index, StorageEntry)], - ) { - assert_eq!(imap.entries().len(), expected.len()); - for (given, expected) in imap - .entries() - .iter() - .map(|(index, boxed_entry)| (*index, &**boxed_entry)) - .zip(expected.iter().map(|(index, entry)| (*index, entry))) - { - assert_eq!(given, expected); - } - } - - #[test] - fn new_works() { - let imap = >::new(); - // Key must be none. - assert_eq!(imap.key(), None); - assert_eq!(imap.key_at(0), None); - // Cached elements must be empty. - assert_cached_entries(&imap, &[]); - // Same as default: - let default_imap = >::default(); - assert_eq!(imap.key(), default_imap.key()); - assert_eq!(imap.entries(), default_imap.entries()); - } - - fn add_key(key: &Key, offset: u64) -> Key { - let mut result = *key; - result += offset; - result - } - - #[test] - fn lazy_works() { - let key = Key::from([0x42; 32]); - let imap = >::lazy(key); - // Key must be none. - assert_eq!(imap.key(), Some(&key)); - assert_eq!(imap.key_at(0), Some(key)); - assert_eq!(imap.key_at(1), Some(add_key(&key, 1))); - // Cached elements must be empty. - assert_cached_entries(&imap, &[]); - } - - #[test] - fn put_get_works() { - let mut imap = >::new(); - // Put some values. - assert_eq!(imap.put_get(1, Some(b'A')), None); - assert_eq!(imap.put_get(2, Some(b'B')), None); - assert_eq!(imap.put_get(4, Some(b'C')), None); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(Some(b'C'), EntryState::Mutated)), - ], - ); - // Put none values. - assert_eq!(imap.put_get(3, None), None); - assert_eq!(imap.put_get(5, None), None); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(Some(b'C'), EntryState::Mutated)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override some values with none. - assert_eq!(imap.put_get(2, None), Some(b'B')); - assert_eq!(imap.put_get(4, None), Some(b'C')); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Mutated)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Override none values with some. - assert_eq!(imap.put_get(3, Some(b'X')), None); - assert_eq!(imap.put_get(5, Some(b'Y')), None); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (3, StorageEntry::new(Some(b'X'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - (5, StorageEntry::new(Some(b'Y'), EntryState::Mutated)), - ], - ); - } - - #[test] - fn get_works() { - let mut imap = >::new(); - let nothing_changed = &[ - (1, StorageEntry::new(None, EntryState::Preserved)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(Some(b'D'), EntryState::Mutated)), - ]; - // Put some values. - assert_eq!(imap.put_get(1, None), None); - assert_eq!(imap.put_get(2, Some(b'B')), None); - assert_eq!(imap.put_get(3, None), None); - assert_eq!(imap.put_get(4, Some(b'D')), None); - assert_cached_entries(&imap, nothing_changed); - // `get` works: - assert_eq!(imap.get(1), None); - assert_eq!(imap.get(2), Some(&b'B')); - assert_eq!(imap.get(3), None); - assert_eq!(imap.get(4), Some(&b'D')); - assert_cached_entries(&imap, nothing_changed); - // `get_mut` works: - assert_eq!(imap.get_mut(1), None); - assert_eq!(imap.get_mut(2), Some(&mut b'B')); - assert_eq!(imap.get_mut(3), None); - assert_eq!(imap.get_mut(4), Some(&mut b'D')); - assert_cached_entries(&imap, nothing_changed); - // `get` or `get_mut` without cache: - assert_eq!(imap.get(5), None); - assert_eq!(imap.get_mut(5), None); - } - - #[test] - fn put_works() { - let mut imap = >::new(); - // Put some values. - imap.put(1, None); - imap.put(2, Some(b'B')); - imap.put(4, None); - // The main difference between `put` and `put_get` is that `put` never - // loads from storage which also has one drawback: Putting a `None` - // value always ends-up in `Mutated` state for the entry even if the - // entry is already `None`. - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - // Overwrite entries: - imap.put(1, Some(b'A')); - imap.put(2, None); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(None, EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Mutated)), - ], - ); - } - - #[test] - fn swap_works() { - let mut imap = >::new(); - let nothing_changed = &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(imap.put_get(1, Some(b'A')), None); - assert_eq!(imap.put_get(2, Some(b'B')), None); - assert_eq!(imap.put_get(3, None), None); - assert_eq!(imap.put_get(4, None), None); - assert_cached_entries(&imap, nothing_changed); - // Swap same indices: Check that nothing has changed. - for i in 0..4 { - imap.swap(i, i); - } - assert_cached_entries(&imap, nothing_changed); - // Swap `None` values: Check that nothing has changed. - imap.swap(3, 4); - imap.swap(4, 3); - assert_cached_entries(&imap, nothing_changed); - // Swap `Some` and `None`: - imap.swap(1, 3); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap `Some` and `Some`: - imap.swap(2, 3); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap out of bounds: `None` and `None` - imap.swap(4, 5); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - (5, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Swap out of bounds: `Some` and `None` - imap.swap(3, 6); - assert_cached_entries( - &imap, - &[ - (1, StorageEntry::new(None, EntryState::Mutated)), - (2, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Mutated)), - (4, StorageEntry::new(None, EntryState::Preserved)), - (5, StorageEntry::new(None, EntryState::Preserved)), - (6, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - ], - ); - } - - #[test] - fn spread_layout_works() -> ink_env::Result<()> { - ink_env::test::run_test::(|_| { - let mut imap = >::new(); - let nothing_changed = &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Mutated)), - (2, StorageEntry::new(Some(b'B'), EntryState::Mutated)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ]; - // Put some values. - assert_eq!(imap.put_get(1, Some(b'A')), None); - assert_eq!(imap.put_get(2, Some(b'B')), None); - assert_eq!(imap.put_get(3, None), None); - assert_eq!(imap.put_get(4, None), None); - assert_cached_entries(&imap, nothing_changed); - // Push the lazy index map onto the contract storage and then load - // another instance of it from the contract stoarge. - // Then: Compare both instances to be equal. - let root_key = Key::from([0x42; 32]); - SpreadLayout::push_spread(&imap, &mut KeyPtr::from(root_key)); - let imap2 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&imap2, &[]); - assert_eq!(imap2.get(1), Some(&b'A')); - assert_eq!(imap2.get(2), Some(&b'B')); - assert_eq!(imap2.get(3), None); - assert_eq!(imap2.get(4), None); - assert_cached_entries( - &imap2, - &[ - (1, StorageEntry::new(Some(b'A'), EntryState::Preserved)), - (2, StorageEntry::new(Some(b'B'), EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - // Clear the first lazy index map instance and reload another instance - // to check whether the associated storage has actually been freed - // again: - SpreadLayout::clear_spread(&imap2, &mut KeyPtr::from(root_key)); - // The above `clear_spread` call is a no-op since lazy index map is - // generally not aware of its associated elements. So we have to - // manually clear them from the contract storage which is what the - // high-level data structures like `storage::Vec` would command: - imap2.clear_packed_at(1); - imap2.clear_packed_at(2); - imap2.clear_packed_at(3); // Not really needed here. - imap2.clear_packed_at(4); // Not really needed here. - let imap3 = as SpreadLayout>::pull_spread( - &mut KeyPtr::from(root_key), - ); - assert_cached_entries(&imap3, &[]); - assert_eq!(imap3.get(1), None); - assert_eq!(imap3.get(2), None); - assert_eq!(imap3.get(3), None); - assert_eq!(imap3.get(4), None); - assert_cached_entries( - &imap3, - &[ - (1, StorageEntry::new(None, EntryState::Preserved)), - (2, StorageEntry::new(None, EntryState::Preserved)), - (3, StorageEntry::new(None, EntryState::Preserved)), - (4, StorageEntry::new(None, EntryState::Preserved)), - ], - ); - Ok(()) - }) - } -} diff --git a/crates/storage/src/lazy/mapping.rs b/crates/storage/src/lazy/mapping.rs index a704217f0c..c1d5b78ba6 100644 --- a/crates/storage/src/lazy/mapping.rs +++ b/crates/storage/src/lazy/mapping.rs @@ -298,7 +298,7 @@ mod tests { fn can_clear_entries() { ink_env::test::run_test::(|_| { // We use `Pack` here since it `REQUIRES_DEEP_CLEAN_UP` - use crate::Pack; + use crate::pack::Pack; // Given let mut mapping: Mapping = Mapping::new([0u8; 32].into()); @@ -327,7 +327,7 @@ mod tests { fn can_clear_unexistent_entries() { ink_env::test::run_test::(|_| { // We use `Pack` here since it `REQUIRES_DEEP_CLEAN_UP` - use crate::Pack; + use crate::pack::Pack; // Given let mapping: Mapping = Mapping::new([0u8; 32].into()); diff --git a/crates/storage/src/lazy/mod.rs b/crates/storage/src/lazy/mod.rs index bd06983238..d3b84b5f87 100644 --- a/crates/storage/src/lazy/mod.rs +++ b/crates/storage/src/lazy/mod.rs @@ -15,309 +15,7 @@ //! Low-level collections and data structures to manage storage entities in the //! persisted contract storage. //! -//! Users should generally avoid using these collections directly in their -//! contracts and should instead adhere to the high-level collections found -//! in [`collections`][`crate::collections`]. -//! The low-level collections are mainly used as building blocks for internals -//! of other higher-level storage collections. -//! //! These low-level collections are not aware of the elements they manage thus //! extra care has to be taken when operating directly on them. -pub mod lazy_hmap; pub mod mapping; - -mod cache_cell; -mod entry; -mod lazy_array; -mod lazy_cell; -mod lazy_imap; - -#[doc(inline)] -pub use self::lazy_array::LazyArray; -use self::{ - cache_cell::CacheCell, - entry::{ - EntryState, - StorageEntry, - }, -}; -#[doc(inline)] -pub use self::{ - lazy_cell::LazyCell, - lazy_hmap::LazyHashMap, - lazy_imap::LazyIndexMap, - mapping::Mapping, -}; -use crate::traits::{ - KeyPtr, - SpreadAllocate, - SpreadLayout, -}; -use ink_primitives::Key; - -/// A lazy storage entity. -/// -/// This loads its value from storage upon first use. -/// -/// # Note -/// -/// Use this if the storage field does not need to be loaded in some or most cases. -#[derive(Debug)] -pub struct Lazy -where - T: SpreadLayout, -{ - cell: LazyCell, -} - -#[cfg(feature = "std")] -const _: () = { - use crate::traits::StorageLayout; - use ink_metadata::layout::Layout; - - impl StorageLayout for Lazy - where - T: StorageLayout + SpreadLayout, - { - fn layout(key_ptr: &mut KeyPtr) -> Layout { - ::layout(key_ptr) - } - } -}; - -impl SpreadLayout for Lazy -where - T: SpreadLayout, -{ - const FOOTPRINT: u64 = ::FOOTPRINT; - - fn pull_spread(ptr: &mut KeyPtr) -> Self { - Self { - cell: as SpreadLayout>::pull_spread(ptr), - } - } - - fn push_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::push_spread(&self.cell, ptr) - } - - fn clear_spread(&self, ptr: &mut KeyPtr) { - SpreadLayout::clear_spread(&self.cell, ptr) - } -} - -impl SpreadAllocate for Lazy -where - T: SpreadLayout, -{ - fn allocate_spread(ptr: &mut KeyPtr) -> Self { - Self { - cell: as SpreadAllocate>::allocate_spread(ptr), - } - } -} - -impl Lazy -where - T: SpreadLayout, -{ - /// Creates an eagerly populated lazy storage value. - #[must_use] - pub fn new(value: T) -> Self { - Self { - cell: LazyCell::new(Some(value)), - } - } - - /// Creates a true lazy storage value for the given key. - #[must_use] - pub(crate) fn from_key(key: Key) -> Self { - Self { - cell: LazyCell::lazy(key), - } - } -} - -impl Lazy -where - T: SpreadLayout, -{ - /// Returns a shared reference to the lazily loaded value. - /// - /// # Note - /// - /// This loads the value from the contract storage if this did not happen before. - /// - /// # Panics - /// - /// If loading from contract storage failed. - #[must_use] - pub fn get(lazy: &Self) -> &T { - lazy.cell.get().expect("encountered empty storage cell") - } - - /// Returns an exclusive reference to the lazily loaded value. - /// - /// # Note - /// - /// This loads the value from the contract storage if this did not happen before. - /// - /// # Panics - /// - /// If loading from contract storage failed. - #[must_use] - pub fn get_mut(lazy: &mut Self) -> &mut T { - lazy.cell.get_mut().expect("encountered empty storage cell") - } - - /// Sets the value to `value`, without executing any reads. - /// - /// # Note - /// - /// No reads from contract storage will be executed. - /// - /// This method should be preferred over dereferencing or `get_mut` - /// in case the returned value is of no interest to the caller. - /// - /// # Panics - /// - /// If accessing the inner value fails. - #[inline] - pub fn set(lazy: &mut Self, new_value: T) { - lazy.cell.set(new_value); - } -} - -impl From for Lazy -where - T: SpreadLayout, -{ - fn from(value: T) -> Self { - Self::new(value) - } -} - -impl Default for Lazy -where - T: Default + SpreadLayout, -{ - fn default() -> Self { - Self::new(Default::default()) - } -} - -impl core::cmp::PartialEq for Lazy -where - T: PartialEq + SpreadLayout, -{ - fn eq(&self, other: &Self) -> bool { - PartialEq::eq(Lazy::get(self), Lazy::get(other)) - } -} - -impl core::cmp::Eq for Lazy where T: Eq + SpreadLayout {} - -impl core::cmp::PartialOrd for Lazy -where - T: PartialOrd + SpreadLayout, -{ - fn partial_cmp(&self, other: &Self) -> Option { - PartialOrd::partial_cmp(Lazy::get(self), Lazy::get(other)) - } - fn lt(&self, other: &Self) -> bool { - PartialOrd::lt(Lazy::get(self), Lazy::get(other)) - } - fn le(&self, other: &Self) -> bool { - PartialOrd::le(Lazy::get(self), Lazy::get(other)) - } - fn ge(&self, other: &Self) -> bool { - PartialOrd::ge(Lazy::get(self), Lazy::get(other)) - } - fn gt(&self, other: &Self) -> bool { - PartialOrd::gt(Lazy::get(self), Lazy::get(other)) - } -} - -impl core::cmp::Ord for Lazy -where - T: core::cmp::Ord + SpreadLayout, -{ - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - Ord::cmp(Lazy::get(self), Lazy::get(other)) - } -} - -impl core::fmt::Display for Lazy -where - T: core::fmt::Display + SpreadLayout, -{ - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - core::fmt::Display::fmt(Lazy::get(self), f) - } -} - -impl core::hash::Hash for Lazy -where - T: core::hash::Hash + SpreadLayout, -{ - fn hash(&self, state: &mut H) { - Lazy::get(self).hash(state); - } -} - -impl core::convert::AsRef for Lazy -where - T: SpreadLayout, -{ - fn as_ref(&self) -> &T { - Lazy::get(self) - } -} - -impl core::convert::AsMut for Lazy -where - T: SpreadLayout, -{ - fn as_mut(&mut self) -> &mut T { - Lazy::get_mut(self) - } -} - -impl ink_prelude::borrow::Borrow for Lazy -where - T: SpreadLayout, -{ - fn borrow(&self) -> &T { - Lazy::get(self) - } -} - -impl ink_prelude::borrow::BorrowMut for Lazy -where - T: SpreadLayout, -{ - fn borrow_mut(&mut self) -> &mut T { - Lazy::get_mut(self) - } -} - -impl core::ops::Deref for Lazy -where - T: SpreadLayout, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { - Lazy::get(self) - } -} - -impl core::ops::DerefMut for Lazy -where - T: SpreadLayout, -{ - fn deref_mut(&mut self) -> &mut Self::Target { - Lazy::get_mut(self) - } -} diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 6c13fbbac8..9734b6028e 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -49,27 +49,11 @@ extern crate quickcheck_macros; pub mod traits; -// Tracking issue [#1119]: We allow `dead_code` here since we're purposefully hiding the -// collections and will remove them at a later time. -#[allow(dead_code)] -pub(crate) mod collections; -#[allow(dead_code)] -pub(crate) mod lazy; - +mod lazy; mod pack; -#[cfg(test)] -mod hashmap_entry_api_tests; - #[cfg(test)] mod test_utils; #[doc(inline)] -pub use self::lazy::Mapping; - -#[doc(inline)] -pub(crate) use self::{ - collections::Vec, - lazy::Lazy, - pack::Pack, -}; +pub use self::lazy::mapping::Mapping; diff --git a/crates/storage/src/test_utils.rs b/crates/storage/src/test_utils.rs index af01021bfe..87afc2e6d2 100644 --- a/crates/storage/src/test_utils.rs +++ b/crates/storage/src/test_utils.rs @@ -76,139 +76,3 @@ macro_rules! push_pull_works_for_primitive { } }; } - -/// A trait to enable running some fuzz tests on a collection. -pub trait FuzzCollection { - type Collection; - type Item; - - /// Executes a series of operations on `self` in order to make it - /// equal to `template`. - fn equalize(&mut self, template: &Self::Collection); - - /// Takes a value from `self` and puts it into `item`. - fn assign(&mut self, item: Self::Item); -} - -/// Creates two fuzz tests. Both tests have the same flow: -/// - Take two instances of the collection, generated by our fuzzer -/// - Push `instance2` to storage, pull it out and assert that what -/// is pulled out is what was pushed. -/// - Do some mutations on the `pulled` object. Here the two tests -/// behave differently: -/// -/// * `fuzz_ $id _mutate_some` Mutates some entries of the data -/// structure based on the content of `instance2`. -/// -/// * `fuzz_ $id _mutate_all` Mutates the entire data structure, -/// so that it has the same content as `instance2`. -/// -/// - Push the mutated `pulled` object into storage again, pull it -/// out as `pulled2` and assert that both objects are equal. -/// - Clear the object from storage and assert that storage was -/// cleared up properly, without any leftovers. -#[macro_export] -macro_rules! fuzz_storage { - ($id:literal, $collection_type:ty) => { - ::paste::paste! { - /// Does some basic storage interaction tests whilst mutating - /// *some* of the data structure's entries. - #[allow(trivial_casts)] - #[quickcheck] - fn [< fuzz_ $id _mutate_some >] ( - instance1: $collection_type, - mut instance2: $collection_type, - ) { - ink_env::test::run_test::(|_| { - // we push the generated object into storage - let root_key = ink_primitives::Key::from([0x42; 32]); - let ptr = KeyPtr::from(root_key); - $crate::traits::push_spread_root(&instance1, &root_key.clone()); - - // we pull what's in storage and assert that this is what was just pushed - let mut pulled: $collection_type = $crate::traits::pull_spread_root(&root_key.clone()); - assert_eq!(instance1, pulled); - - // we iterate over what was pulled and call `assign` for all entries. - // this function may or may not modify elements of `pulled`. - pulled.iter_mut().for_each(|item| { - // this may leave some entries of `pulled` in `State::Preserved`. - // even though the instance which is supposed to be mutated is - // `pulled`, we still need to call this on a mutable `instance2`, - // since e.g. Vec does a `pop()` in assign, so that we don't always - // execute the same operation. - (&mut instance2).assign(item); - }); - - // we push the `pulled` object, on which we just executed mutations - // back into storage and asserts it can be pulled out intact again. - $crate::traits::push_spread_root(&pulled, &root_key.clone()); - let pulled2: $collection_type = $crate::traits::pull_spread_root(&root_key.clone()); - assert_eq!(pulled, pulled2); - - // we clear the objects from storage and assert that everything was - // removed without any leftovers. - SpreadLayout::clear_spread(&pulled2, &mut ptr.clone()); - SpreadLayout::clear_spread(&pulled, &mut ptr.clone()); - $crate::test_utils::assert_storage_clean(); - - Ok(()) - }) - .unwrap() - } - - /// Does some basic storage interaction tests whilst mutating - /// *all* the data structure's entries. - #[allow(trivial_casts)] - #[quickcheck] - fn [< fuzz_ $id _mutate_all >] ( - instance1: $collection_type, - instance2: $collection_type, - ) { - ink_env::test::run_test::(|_| { - // we push the generated object into storage - let root_key = ink_primitives::Key::from([0x42; 32]); - let ptr = KeyPtr::from(root_key); - $crate::traits::push_spread_root(&instance1, &root_key.clone()); - - // we pull what's in storage and assert that this is what was just pushed - let mut pulled: $collection_type = $crate::traits::pull_spread_root(&root_key.clone()); - assert_eq!(instance1, pulled); - - // `pulled` is going to be equalized to ` - (&mut pulled).equalize(&instance2); - - // we push the `pulled` object, on which we just executed mutations - // back into storage and assert it can be pulled out intact again and - // is equal to `instance2`. - $crate::traits::push_spread_root(&pulled, &root_key.clone()); - let pulled2: $collection_type = $crate::traits::pull_spread_root(&root_key.clone()); - assert_eq!(pulled, pulled2); - assert_eq!(pulled2, instance2); - - // we clear the objects from storage and assert that everything was - // removed without any leftovers. - SpreadLayout::clear_spread(&pulled2, &mut ptr.clone()); - SpreadLayout::clear_spread(&pulled, &mut ptr.clone()); - $crate::test_utils::assert_storage_clean(); - - Ok(()) - - }) - .unwrap() - } - } - }; -} - -/// Asserts that the storage is empty, without any leftovers. -#[cfg(all(test, feature = "ink-fuzz-tests"))] -pub fn assert_storage_clean() { - let contract_id = ink_env::test::callee::(); - let used_cells = - ink_env::test::count_used_storage_cells::( - &contract_id, - ) - .expect("used cells must be returned"); - assert_eq!(used_cells, 0); -} diff --git a/crates/storage/src/traits/mod.rs b/crates/storage/src/traits/mod.rs index 06f9e3b19c..f89c1f580b 100644 --- a/crates/storage/src/traits/mod.rs +++ b/crates/storage/src/traits/mod.rs @@ -37,13 +37,7 @@ pub use self::layout::{ LayoutCryptoHasher, StorageLayout, }; -pub(crate) use self::optspec::{ - clear_spread_root_opt, - pull_packed_root_opt, - pull_spread_root_opt, - push_packed_root_opt, - push_spread_root_opt, -}; +pub(crate) use self::optspec::pull_packed_root_opt; pub use self::{ impls::{ forward_allocate_packed, diff --git a/crates/storage/src/traits/optspec.rs b/crates/storage/src/traits/optspec.rs index 88d706cf53..1c0965c39d 100644 --- a/crates/storage/src/traits/optspec.rs +++ b/crates/storage/src/traits/optspec.rs @@ -18,86 +18,9 @@ //! The specializations make use of the storage entry state (occupied or vacant) //! in order to store the option's state thus using less storage in total. -use super::{ - KeyPtr, - PackedLayout, - SpreadLayout, -}; +use super::PackedLayout; use ink_primitives::Key; -pub fn pull_spread_root_opt(root_key: &Key) -> Option -where - T: SpreadLayout, -{ - // In case the contract storage is occupied we handle - // the Option as if it was a T. - ink_env::get_contract_storage::<()>(root_key) - .ok() - .flatten() - .map(|_| super::pull_spread_root::(root_key)) -} - -pub fn push_spread_root_opt(entity: Option<&T>, root_key: &Key) -where - T: SpreadLayout, -{ - match entity { - Some(value) => { - // Handle the Option as if it was a T. - // - // Sadly this does not not work well with `Option>`. - // For this we'd need specialization in Rust or similar. - super::push_spread_root(value, root_key) - } - None => clear_spread_root_opt::(root_key, || entity), - } -} - -pub fn clear_spread_root_opt<'a, T: 'a, F>(root_key: &Key, f: F) -where - T: SpreadLayout, - F: FnOnce() -> Option<&'a T>, -{ - // We can clean up some storage entity using its `SpreadLayout::clear_spread` - // implementation or its defined storage footprint. - // - // While using its `SpreadLayout::clear_spread` implementation is more precise - // and will only clean-up what is necessary it requires an actual instance. - // Loading such an instance if it is not already in the memory cache of some - // lazy abstraction will incur significant overhead. - // Using its defined storage footprint this procedure can eagerly clean-up - // the associated contract storage region, however, this might clean-up more - // cells than needed. - // - // There are types that need a so-called "deep" clean-up. An example for this - // is `storage::Box>` where the outer storage box definitely - // needs to propagate clearing signals onto its inner `storage::Box` in order - // to properly clean-up the whole associate contract storage region. - // This is when we cannot avoid loading the entity for the clean-up procedure. - // - // If the entity that shall be cleaned-up does not require deep clean-up we - // check if its storage footprint exceeds a certain threshold and only then - // we will still load it first in order to not clean-up too many unneeded - // storage cells. - let footprint = ::FOOTPRINT; - if footprint >= super::FOOTPRINT_CLEANUP_THRESHOLD - || ::REQUIRES_DEEP_CLEAN_UP - { - // We need to load the entity before we remove its associated contract storage - // because it requires a deep clean-up which propagates clearing to its fields, - // for example in the case of `T` being a `storage::Box`. - if let Some(value) = f() { - super::clear_spread_root(value, root_key); - return - } - } - // Clean-up eagerly without potentially loading the entity from storage: - let mut ptr = KeyPtr::from(*root_key); - for _ in 0..footprint { - ink_env::clear_contract_storage(ptr.advance_by(1)); - } -} - pub fn pull_packed_root_opt(root_key: &Key) -> Option where T: PackedLayout, @@ -116,22 +39,3 @@ where value }) } - -pub fn push_packed_root_opt(entity: Option<&T>, root_key: &Key) -where - T: PackedLayout, -{ - match entity { - Some(value) => { - // Handle the Option as if it was a T. - // - // Sadly this does not work well with `Option>`. - // For this we'd need specialization in Rust or similar. - super::push_packed_root(value, root_key); - } - None => { - // Clear the associated storage cell since the entity is `None`. - ink_env::clear_contract_storage(root_key); - } - } -}