From 0ec6653d40741f4755e84398391eed8aaf5f9d84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=CC=81=20Molina?= Date: Tue, 25 Mar 2025 13:30:07 +0100 Subject: [PATCH 1/3] Improve unbounded storage iteration --- src/critical/Avoid_Unbounded_Iteration.md | 30 ++++++++++++++--------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/critical/Avoid_Unbounded_Iteration.md b/src/critical/Avoid_Unbounded_Iteration.md index c7db5c5..7db885d 100644 --- a/src/critical/Avoid_Unbounded_Iteration.md +++ b/src/critical/Avoid_Unbounded_Iteration.md @@ -12,11 +12,13 @@ The following example illustrates an iteration over all items in `big_data_set` ```rust #[pallet::storage] -pub type UnboundedData = StorageValue<_, Vec>; +pub type UnboundedData = StorageMap<_, Blake2_128Concat, T::AccountId, u32>; -let big_data_set = UnboundedData::::get(); -for item in big_data_set { - // Process each item with no limit +fn iterate_over_data() { + let big_data_set = UnboundedData::::iter(); + for (acc, data) in big_data_set { + // Process each item with no limit + } } ``` @@ -30,11 +32,13 @@ Use a bounded iterator or limit the number of items processed in each iteration. const MAX_ITEMS: usize = 20; #[pallet::storage] -pub type UnboundedData = StorageValue<_, Vec>; +pub type UnboundedData = StorageMap<_, Blake2_128Concat, T::AccountId, u32>; -let big_data = UnboundedData::::get(); -for item in big_data.iter().take(MAX_ITEMS) { - // Process a limited number of items safely +fn iterate_over_data() { + let big_data = UnboundedData::::iter(); + for (acc, data) in big_data.take(MAX_ITEMS) { + // Process a limited number of items safely + } } ``` @@ -46,11 +50,13 @@ Alternatively, use bounded data structures to enforce strict size limits at the ```rust #[pallet::storage] -pub type BoundedData = StorageValue<_, BoundedVec>; +pub type BoundedData = StorageValue<_, BoundedBTreeMap>; -let bounded_data = BoundedData::::get(); -for item in bounded_data { - // Iterates over a data structure with bounded size. +fn iterate_over_data() { + let bounded_data = BoundedData::::get(); + for (acc, data) in bounded_data { + // Iterates over a data structure with bounded size. + } } ``` From e05f54d217ba21f850078a997360735c9862c8b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=CC=81=20Molina?= Date: Tue, 25 Mar 2025 13:53:14 +0100 Subject: [PATCH 2/3] Improve storage growth --- src/high/Be_Careful_With_Storage_Growth.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/high/Be_Careful_With_Storage_Growth.md b/src/high/Be_Careful_With_Storage_Growth.md index badde4f..f427b69 100644 --- a/src/high/Be_Careful_With_Storage_Growth.md +++ b/src/high/Be_Careful_With_Storage_Growth.md @@ -12,6 +12,7 @@ The following code allows adding entries without any limit, leading to uncontrol ```rust #[pallet::storage] +#[pallet::unbounded] pub type Entries = StorageValue<_, Vec>; fn add_entry(entry: u32) { @@ -32,15 +33,13 @@ pub type Entries = StorageValue<_, BoundedVec>; #[pallet::error] pub enum Error { - /// MaxEntries limit reached + /// MaxEntries limit reached. TooManyEntries, } fn add_entry_limited(entry: u32) -> Result<(), Error> { - Entries::::try_mutate(|entries| { - entries.try_push(entry).map_err(|_| Error::::TooManyEntries)?; - Ok(()) - }) + Entries::::try_append(entry).map_err(|_| Error::::TooManyEntries)?; + Ok(()) } ``` From ee76e2c326253dbeef89f0dc95edd07e9ccb3c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=CC=81=20Molina?= Date: Tue, 25 Mar 2025 14:05:47 +0100 Subject: [PATCH 3/3] Add extra section about try_append --- src/SUMMARY.md | 1 + src/TABLE.md | 3 +- src/high/Be_Careful_With_Storage_Growth.md | 2 +- src/medium/Append_Entries_Efficiently.md | 48 ++++++++++++++++++++++ 4 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 src/medium/Append_Entries_Efficiently.md diff --git a/src/SUMMARY.md b/src/SUMMARY.md index 28ef1fc..0538485 100644 --- a/src/SUMMARY.md +++ b/src/SUMMARY.md @@ -29,6 +29,7 @@ - [Medium Severity](medium/README.md) + - [Append entries efficiently](medium/Append_Entries_Efficiently.md) - [Remove deprecated storage getters](medium/Remove_Deprecated_Storage_Getters.md) - [Avoid hardcoded parameters and values](medium/Avoid_Hardcoded_Parameters_and_Values.md) - [Include tests for edge cases](medium/Include_Tests_for_Edge_Cases.md) diff --git a/src/TABLE.md b/src/TABLE.md index 1d2c767..ad8f5a7 100644 --- a/src/TABLE.md +++ b/src/TABLE.md @@ -4,7 +4,7 @@ | Critical | Use appropriate origin checks | Open access on extrinsics without checks may allow unauthorized actions that can compromise security | Add access control checks to limit access to specific users or roles | | Critical | Avoid unbounded iteration | Unbounded iterations over large data structures can lead to resource exhaustion and potential denial of service | Implement limits or use a bounded storage map for these iterations | | Critical | Unchecked input data | Lack of input validation can lead to unexpected behaviors and potential vulnerabilities | Validate input data before processing to ensure safe and predictable behavior | -| Critical | Avoid unwrap usage inside runtime | Using `unwrap()` or `expect()` without proper error handling can lead to runtime panics and crashes | Handle errors gracefully with `Result` or `Option` types to prevent panics | +| Critical | Avoid unwrap usage inside runtime | Using `unwrap()` or `expect()` without proper error handling can lead to runtime panics and crashes | Handle errors gracefully with `Result` or `Option` types to prevent panics | | Critical | Use benchmarking for accurate dynamic weights | Using hardcoded weights for extrinsics can lead to inaccurate resource estimations and performance issues. | Implement benchmarking to dynamically assess the weights of functions, ensuring they accurately reflect execution costs | | High | Make proper usage of XCM `Junctions` | Misuse of junction types (especially GeneralIndex) for purposes beyond their intended entity representation can lead to incorrect path routing | Use junctions strictly for their intended purpose of representing entities in Location paths; propose RFCs for new needs | | High | Properly setup XCM `Barrier` | Improperly configured XCM executor barriers can allow unauthorized free execution from any origin | Implement restrictive barriers with explicit authorization for unpaid execution and clear documentation of intended uses | @@ -19,6 +19,7 @@ | High | Avoid redundant storage access in mutations | Using both try_mutate and insert leads to unnecessary storage accesses | Use `try_mutate` or `try_mutate_exists` to read, modify, and write in a single step | | High | Prevent unnecessary reads and writes in storage access | Frequent reads and writes to storage without optimization can degrade performance | Use efficient storage access methods such as `try_mutate` to combine reads and writes | | High | Implement `try-state` Hook | The absence of `try-state` hooks prevents runtime sanity checks, making it harder to ensure that the storage state is sensible after upgrades | Implement the `try-state` hook to perform thorough state checks without altering storage | +| Medium | Append entries efficiently | Using `try_mutate` has a severe penalty when appending elements to a `StorageValue` | Use `try_append` instead of `try_mutate` whenever possible | | Medium | Implement proper XCM fee management | Using the FeeManager unit type without consideration leads to unintended fee burning rather than proper fee handling | Implement proper FeeManager that either deposits or distributes fees, with clear handling of fee-exempt locations | | Medium | Remove deprecated storage getters | Using deprecated storage getters may lead to compatibility issues in future versions | Replace deprecated getters with the recommended methods in updated frameworks | | Medium | Avoid hardcoded parameters and values | Hardcoding parameters can reduce flexibility and adaptability to different environments | Use configurable parameters to enhance adaptability | diff --git a/src/high/Be_Careful_With_Storage_Growth.md b/src/high/Be_Careful_With_Storage_Growth.md index f427b69..2011c15 100644 --- a/src/high/Be_Careful_With_Storage_Growth.md +++ b/src/high/Be_Careful_With_Storage_Growth.md @@ -37,7 +37,7 @@ pub enum Error { TooManyEntries, } -fn add_entry_limited(entry: u32) -> Result<(), Error> { +fn add_entry_limited(entry: u32) -> Result<(), DispatchError> { Entries::::try_append(entry).map_err(|_| Error::::TooManyEntries)?; Ok(()) } diff --git a/src/medium/Append_Entries_Efficiently.md b/src/medium/Append_Entries_Efficiently.md new file mode 100644 index 0000000..8b54207 --- /dev/null +++ b/src/medium/Append_Entries_Efficiently.md @@ -0,0 +1,48 @@ +# Append Entries Efficiently + +**Severity**: Medium + +## Description + +Using `try_append` instead of `try_mutate` for a Substrate `StorageValue` has several advantages. Specifically, +`try_append` is more efficient because it avoids reading and rewriting the entire storage value, resulting in lower +computation and storage costs when appending a single item. This makes it particularly useful for operations where the +goal is simply to append an entry without other modifications. By contrast, `try_mutate` reads the entire storage value +into memory, modifies it, and writes it back, which can be both computationally and storage-intensive for large +collections. + +## What should be avoided + +Using `try_mutate` has a severe penalty when compared to `try_append` and should be avoided whenever possible. + +```rust +#[pallet::storage] +pub type Entries = StorageValue<_, BoundedVec>; + +#[pallet::error] +pub enum Error { + /// MaxEntries limit reached. + TooManyEntries, +} + +fn add_entry(entry: u32) -> Result<(), DispatchError> { + // Adds a new entry by reading the whole storage item and editing it. This is inefficient. + Entries::::try_mutate(|entries| { + entries.try_push(entry).map_err(|_| Error::::TooManyEntries)?; + Ok(()) + }) +} +``` + +## Best practice + +Use `try_append` for improved readability and efficiency. + +```rust +fn add_entry(entry: u32) -> Result<(), DispatchError> { + // Adds a new entry efficiently. + Entries::::try_append(entry).map_err(|_| Error::::TooManyEntries)?; + Ok(()) +} +``` +