Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions examples/erase_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@

#![no_std]

extern crate alloc;
extern crate lang_items;

use alloc::vec;
use core::fmt::Write;
use ctap2::env::tock::take_storage;
use libtock_drivers::console::Console;
Expand All @@ -28,11 +30,9 @@ libtock_core::stack_size! {0x800}
fn is_page_erased(storage: &dyn Storage, page: usize) -> bool {
let index = StorageIndex { page, byte: 0 };
let length = storage.page_size();
storage
.read_slice(index, length)
.unwrap()
.iter()
.all(|&x| x == 0xff)
let mut content = vec![0; length];
storage.read_slice(index, &mut content).unwrap();
content.iter().all(|&x| x == 0xff)
}

fn main() {
Expand Down
6 changes: 3 additions & 3 deletions examples/store_latency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,9 @@ fn main() {

// Results for nrf52840dk_opensk:
// StorageConfig { page_size: 4096, num_pages: 20 }
// Overwrite Length Boot Compaction Insert Remove
// no 50 words 16.2 ms 143.8 ms 18.3 ms 8.4 ms
// yes 1 words 303.8 ms 97.9 ms 9.7 ms 4.7 ms
// Overwrite Length Boot Compaction Insert Remove
// no 50 words 21.6 ms 149.1 ms 23.3 ms 11.0 ms
// yes 1 words 417.0 ms 103.2 ms 13.2 ms 6.5 ms
}

fn align(x: &str, n: usize) {
Expand Down
39 changes: 25 additions & 14 deletions libraries/persistent_store/src/buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,9 @@ impl Storage for BufferStorage {
self.options.max_page_erases
}

fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> {
Ok(&self.storage[index.range(length, self)?])
fn read_slice(&self, index: StorageIndex, value: &mut [u8]) -> StorageResult<()> {
value.copy_from_slice(&self.storage[index.range(value.len(), self)?]);
Ok(())
}

fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> {
Expand Down Expand Up @@ -490,6 +491,16 @@ mod tests {
const SECOND_WORD: &[u8] = &[0xca, 0xc9, 0xa9, 0x65];
const THIRD_WORD: &[u8] = &[0x88, 0x88, 0x88, 0x44];

fn read_slice(
store: &BufferStorage,
index: StorageIndex,
length: usize,
) -> StorageResult<Vec<u8>> {
let mut result = vec![0; length];
store.read_slice(index, &mut result)?;
Ok(result)
}

fn new_storage() -> Box<[u8]> {
vec![0xff; NUM_PAGES * OPTIONS.page_size].into_boxed_slice()
}
Expand Down Expand Up @@ -522,10 +533,10 @@ mod tests {
let mut buffer = BufferStorage::new(new_storage(), OPTIONS);
let index = StorageIndex { page: 0, byte: 0 };
let next_index = StorageIndex { page: 0, byte: 4 };
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
assert_eq!(read_slice(&buffer, index, 4).unwrap(), BLANK_WORD);
buffer.write_slice(index, FIRST_WORD).unwrap();
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
assert_eq!(buffer.read_slice(next_index, 4).unwrap(), BLANK_WORD);
assert_eq!(read_slice(&buffer, index, 4).unwrap(), FIRST_WORD);
assert_eq!(read_slice(&buffer, next_index, 4).unwrap(), BLANK_WORD);
}

#[test]
Expand All @@ -535,11 +546,11 @@ mod tests {
let other_index = StorageIndex { page: 1, byte: 0 };
buffer.write_slice(index, FIRST_WORD).unwrap();
buffer.write_slice(other_index, FIRST_WORD).unwrap();
assert_eq!(buffer.read_slice(index, 4).unwrap(), FIRST_WORD);
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
assert_eq!(read_slice(&buffer, index, 4).unwrap(), FIRST_WORD);
assert_eq!(read_slice(&buffer, other_index, 4).unwrap(), FIRST_WORD);
buffer.erase_page(0).unwrap();
assert_eq!(buffer.read_slice(index, 4).unwrap(), BLANK_WORD);
assert_eq!(buffer.read_slice(other_index, 4).unwrap(), FIRST_WORD);
assert_eq!(read_slice(&buffer, index, 4).unwrap(), BLANK_WORD);
assert_eq!(read_slice(&buffer, other_index, 4).unwrap(), FIRST_WORD);
}

#[test]
Expand All @@ -551,15 +562,15 @@ mod tests {
let bad_page = StorageIndex { page: 2, byte: 0 };

// Reading a word in the storage is ok.
assert!(buffer.read_slice(index, 4).is_ok());
assert!(read_slice(&buffer, index, 4).is_ok());
// Reading a half-word in the storage is ok.
assert!(buffer.read_slice(half_index, 2).is_ok());
assert!(read_slice(&buffer, half_index, 2).is_ok());
// Reading even a single byte outside a page is not ok.
assert!(buffer.read_slice(over_index, 1).is_err());
assert!(read_slice(&buffer, over_index, 1).is_err());
// But reading an empty slice just after a page is ok.
assert!(buffer.read_slice(over_index, 0).is_ok());
assert!(read_slice(&buffer, over_index, 0).is_ok());
// Reading even an empty slice outside the storage is not ok.
assert!(buffer.read_slice(bad_page, 0).is_err());
assert!(read_slice(&buffer, bad_page, 0).is_err());

// Writing a word in the storage is ok.
assert!(buffer.write_slice(index, FIRST_WORD).is_ok());
Expand Down
4 changes: 2 additions & 2 deletions libraries/persistent_store/src/format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -883,8 +883,8 @@ impl Header {
///
/// If the user entry has no payload, the `footer` must be set to `None`. Otherwise it should be
/// the last word of the entry.
pub fn check(&self, footer: Option<&[u8]>) -> bool {
footer.map_or(0, count_zeros) == self.checksum
pub fn check(&self, footer: Option<WordSlice>) -> bool {
footer.map_or(0, |x| count_zeros(&x)) == self.checksum
}
}

Expand Down
4 changes: 2 additions & 2 deletions libraries/persistent_store/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ pub trait Storage {

/// Reads a byte slice from the storage.
///
/// The `index` must designate `length` bytes in the storage.
fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]>;
/// The `index` must designate `value.len()` bytes in the storage.
fn read_slice(&self, index: StorageIndex, value: &mut [u8]) -> StorageResult<()>;

/// Writes a word slice to the storage.
///
Expand Down
52 changes: 30 additions & 22 deletions libraries/persistent_store/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

use crate::format::{
is_erased, CompactInfo, Format, Header, InitInfo, InternalEntry, Padding, ParsedWord, Position,
Word, WordState,
Word, WordSlice, WordState,
};
#[cfg(feature = "std")]
pub use crate::model::{StoreModel, StoreOperation};
Expand Down Expand Up @@ -490,7 +490,8 @@ impl<S: Storage> Store<S> {
fn recover_initialize(&mut self) -> StoreResult<()> {
let word_size = self.format.word_size();
for page in 0..self.format.num_pages() {
let (init, rest) = self.read_page(page).split_at(word_size as usize);
let page_content = self.read_page(page);
let (init, rest) = page_content.split_at(word_size as usize);
if (page > 0 && !is_erased(init)) || !is_erased(rest) {
return Ok(());
}
Expand Down Expand Up @@ -890,15 +891,15 @@ impl<S: Storage> Store<S> {

/// Sets the padding bit of a user header.
fn set_padding(&mut self, pos: Position) -> StoreResult<()> {
let mut word = Word::from_slice(self.read_word(pos));
let mut word = Word::from_slice(&self.read_word(pos));
self.format.set_padding(&mut word)?;
self.write_slice(pos, &word.as_slice())?;
Ok(())
}

/// Sets the deleted bit of a user header.
fn set_deleted(&mut self, pos: Position) -> StoreResult<()> {
let mut word = Word::from_slice(self.read_word(pos));
let mut word = Word::from_slice(&self.read_word(pos));
self.format.set_deleted(&mut word);
self.write_slice(pos, &word.as_slice())?;
Ok(())
Expand Down Expand Up @@ -1007,7 +1008,7 @@ impl<S: Storage> Store<S> {
}
*pos += 1 + length;
ParsedEntry::User(header)
} else if footer.map_or(true, |x| is_erased(x)) {
} else if footer.map_or(true, |x| is_erased(&x)) {
self.parse_partial(pos)
} else {
*pos += 1 + length;
Expand All @@ -1028,7 +1029,7 @@ impl<S: Storage> Store<S> {
fn parse_partial(&self, pos: &mut Position) -> ParsedEntry {
let mut length = None;
for i in 0..self.format.max_prefix_len() {
if !is_erased(self.read_word(*pos + i)) {
if !is_erased(&self.read_word(*pos + i)) {
length = Some(i);
}
}
Expand All @@ -1044,57 +1045,63 @@ impl<S: Storage> Store<S> {
/// Parses the init info of a page.
fn parse_init(&self, page: Nat) -> StoreResult<WordState<InitInfo>> {
let index = self.format.index_init(page);
let word = self.storage_read_slice(index, self.format.word_size());
self.format.parse_init(Word::from_slice(word))
let mut word = WordSlice::default();
self.storage_read_slice(index, &mut word);
self.format.parse_init(Word::from_slice(&word))
}

/// Parses the compact info of a page.
fn parse_compact(&self, page: Nat) -> StoreResult<WordState<CompactInfo>> {
let index = self.format.index_compact(page);
let word = self.storage_read_slice(index, self.format.word_size());
self.format.parse_compact(Word::from_slice(word))
let mut word = WordSlice::default();
self.storage_read_slice(index, &mut word);
self.format.parse_compact(Word::from_slice(&word))
}

/// Parses a word from the virtual storage.
fn parse_word(&self, pos: Position) -> StoreResult<WordState<ParsedWord>> {
self.format
.parse_word(Word::from_slice(self.read_word(pos)))
.parse_word(Word::from_slice(&self.read_word(pos)))
}

/// Reads a slice from the virtual storage.
///
/// The slice may span 2 pages.
fn read_slice(&self, pos: Position, length: Nat) -> Vec<u8> {
let mut result = Vec::with_capacity(length as usize);
let mut result = vec![0; length as usize];
let index = pos.index(&self.format);
let max_length = self.format.page_size() - usize_to_nat(index.byte);
result.extend_from_slice(self.storage_read_slice(index, min(length, max_length)));
self.storage_read_slice(index, &mut result[..min(length, max_length) as usize]);
if length > max_length {
// The slice spans the next page.
let index = pos.next_page(&self.format).index(&self.format);
result.extend_from_slice(self.storage_read_slice(index, length - max_length));
self.storage_read_slice(index, &mut result[max_length as usize..]);
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Double checking: I was wondering how much more copying we will have to do with this PR. Looks like we have to copy up to 2 pages now, instead of just the final slice that has always been a copy?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, there will be more allocations and more copying with this PR.

However, for the specific code on which this comment is attached, there's actually no additional allocation or copy. That's probably one of the rare exceptions. For me the most problematic place would be when reading a page just to check that it's erased (this doesn't happen often, at boot and compaction) and when writing a slice to check that the write is valid (this happens at store mutation). When reading, we don't have additional penalty, which is actually ironic since the PR is only changing the reading API. This comes from the fact that the store API is already allocating and copying, so we don't add more. The allocation and copying during store mutation (insert, remove, compaction, etc) is acceptable because it's dominated by the cost of the actual flash write. However the allocation and copying for booting the store is more problematic. But since it happens at boot time, the extra cost is less sensitive.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you have a store benchmark you can quickly run, out of curiousity?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point, I forgot about it. I've updated the numbers. Here's a summary:

  • Boot is 35% slower
  • Compaction is 5% slower
  • Insert is 30% slower
  • Remove is 35% slower

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While that may not be problematic, how hard would it be to have an API that is still general over different use cases (files and embedded), but less performance issues on embedded? Just to understand the alternative.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Indeed, there's an alternative that we may prefer: #487 .

I believe there would be an ideal alternative. I don't think it's hard to implement, so I might just try.

}
result
}

/// Reads a word from the virtual storage.
fn read_word(&self, pos: Position) -> &[u8] {
self.storage_read_slice(pos.index(&self.format), self.format.word_size())
fn read_word(&self, pos: Position) -> WordSlice {
let mut word = WordSlice::default();
self.storage_read_slice(pos.index(&self.format), &mut word);
word
}

/// Reads a physical page.
fn read_page(&self, page: Nat) -> &[u8] {
fn read_page(&self, page: Nat) -> Vec<u8> {
let index = StorageIndex {
page: page as usize,
byte: 0,
};
self.storage_read_slice(index, self.format.page_size())
let mut page = vec![0; self.format.page_size() as usize];
self.storage_read_slice(index, &mut page);
page
}

/// Reads a slice from the physical storage.
fn storage_read_slice(&self, index: StorageIndex, length: Nat) -> &[u8] {
fn storage_read_slice(&self, index: StorageIndex, value: &mut [u8]) {
// The only possible failures are if the slice spans multiple pages.
self.storage.read_slice(index, length as usize).unwrap()
self.storage.read_slice(index, value).unwrap()
}

/// Writes a slice to the virtual storage.
Expand All @@ -1119,7 +1126,8 @@ impl<S: Storage> Store<S> {
fn storage_write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StoreResult<()> {
let word_size = self.format.word_size();
debug_assert!(usize_to_nat(value.len()) % word_size == 0);
let slice = self.storage.read_slice(index, value.len())?;
let mut slice = vec![0; value.len()];
self.storage.read_slice(index, &mut slice)?;
// Skip as many words that don't need to be written as possible.
for start in (0..usize_to_nat(value.len())).step_by(word_size as usize) {
if is_write_needed(
Expand All @@ -1142,7 +1150,7 @@ impl<S: Storage> Store<S> {

/// Erases a page if not already erased.
fn storage_erase_page(&mut self, page: Nat) -> StoreResult<()> {
if !is_erased(self.read_page(page)) {
if !is_erased(&self.read_page(page)) {
self.storage.erase_page(page as usize)?;
}
Ok(())
Expand Down
15 changes: 10 additions & 5 deletions src/env/tock/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,11 @@ impl TockStorage {
fn is_page_aligned(&self, x: usize) -> bool {
is_aligned(self.page_size, x)
}

fn find_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> {
let start = index.range(length, self)?.start;
find_slice(&self.storage_locations, start, length)
}
}

impl Storage for TockStorage {
Expand All @@ -196,23 +201,23 @@ impl Storage for TockStorage {
self.max_page_erases
}

fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]> {
let start = index.range(length, self)?.start;
find_slice(&self.storage_locations, start, length)
fn read_slice(&self, index: StorageIndex, value: &mut [u8]) -> StorageResult<()> {
value.copy_from_slice(self.find_slice(index, value.len())?);
Ok(())
}

fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()> {
if !self.is_word_aligned(index.byte) || !self.is_word_aligned(value.len()) {
return Err(StorageError::NotAligned);
}
let ptr = self.read_slice(index, value.len())?.as_ptr() as usize;
let ptr = self.find_slice(index, value.len())?.as_ptr() as usize;
write_slice(ptr, value)
}

fn erase_page(&mut self, page: usize) -> StorageResult<()> {
let index = StorageIndex { page, byte: 0 };
let length = self.page_size();
let ptr = self.read_slice(index, length)?.as_ptr() as usize;
let ptr = self.find_slice(index, length)?.as_ptr() as usize;
erase_page(ptr, length)
}
}
Expand Down