Skip to content

Commit

Permalink
Merge pull request #13 from vorner/check-min-size
Browse files Browse the repository at this point in the history
  • Loading branch information
kornelski committed Feb 13, 2021
2 parents 569bd9e + 9668113 commit 99e593c
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 19 deletions.
38 changes: 35 additions & 3 deletions src/ring_buffer/mod.rs
Expand Up @@ -11,7 +11,7 @@ use core::cmp::Ordering;
use core::fmt::{Debug, Error, Formatter};
use core::hash::{Hash, Hasher};
use core::iter::FromIterator;
use core::mem::MaybeUninit;
use core::mem::{replace, MaybeUninit};
use core::ops::{Bound, Range, RangeBounds};
use core::ops::{Index, IndexMut};

Expand Down Expand Up @@ -253,6 +253,7 @@ where
#[inline]
#[must_use]
pub fn unit(value: A) -> Self {
assert!(Self::CAPACITY >= 1);
let mut buffer = Self {
origin: 0.into(),
length: 1,
Expand All @@ -268,6 +269,7 @@ where
#[inline]
#[must_use]
pub fn pair(value1: A, value2: A) -> Self {
assert!(Self::CAPACITY >= 2);
let mut buffer = Self {
origin: 0.into(),
length: 2,
Expand Down Expand Up @@ -714,10 +716,22 @@ where
}
}
let mut index = self.raw(index);
for value in iter {
// Panic safety: unless and until we fill it fully, there's a hole somewhere in the middle
// and the destructor would drop non-existing elements. Therefore we pretend to be empty
// for a while (and leak the elements instead in case something bad happens).
let mut inserted = 0;
let length = replace(&mut self.length, 0);
for value in iter.take(insert_size) {
unsafe { self.force_write(index, value) };
index += 1;
inserted += 1;
}
// This would/could create a hole in the middle if it was less
assert_eq!(
inserted, insert_size,
"Iterator has fewer elements than advertised",
);
self.length = length;
}

/// Remove the value at index `index`, shifting all the following values to
Expand Down Expand Up @@ -787,8 +801,12 @@ impl<A: Clone, N: ChunkLength<A>> Clone for RingBuffer<A, N> {
let mut out = Self::new();
out.origin = self.origin;
out.length = self.length;
for index in out.range() {
let range = self.range();
// Panic safety. If we panic, we don't want to drop more than we have initialized.
out.length = 0;
for index in range {
unsafe { out.force_write(index, (&*self.ptr(index)).clone()) };
out.length += 1;
}
out
}
Expand Down Expand Up @@ -1003,6 +1021,8 @@ impl<'a, A, N: ChunkLength<A>> IntoIterator for &'a mut RingBuffer<A, N> {

#[cfg(test)]
mod test {
use typenum::U0;

use super::*;

#[test]
Expand Down Expand Up @@ -1121,4 +1141,16 @@ mod test {
}
assert_eq!(0, counter.load(Ordering::Relaxed));
}

#[test]
#[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
fn unit_on_empty() {
let _ = RingBuffer::<usize, U0>::unit(1);
}

#[test]
#[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
fn pair_on_empty() {
let _ = RingBuffer::<usize, U0>::pair(1, 2);
}
}
87 changes: 71 additions & 16 deletions src/sized_chunk/mod.rs
Expand Up @@ -125,9 +125,13 @@ where
fn clone(&self) -> Self {
let mut out = Self::new();
out.left = self.left;
out.right = self.right;
out.right = self.left;
for index in self.left..self.right {
unsafe { Chunk::force_write(index, (*self.ptr(index)).clone(), &mut out) }
// Panic safety, move the right index to cover only the really initialized things. This
// way we don't try to drop uninitialized, but also don't leak if we panic in the
// middle.
out.right = index + 1;
}
out
}
Expand All @@ -151,6 +155,7 @@ where

/// Construct a new chunk with one item.
pub fn unit(value: A) -> Self {
assert!(Self::CAPACITY >= 1);
let mut chunk = Self {
left: 0,
right: 1,
Expand All @@ -164,6 +169,7 @@ where

/// Construct a new chunk with two items.
pub fn pair(left: A, right: A) -> Self {
assert!(Self::CAPACITY >= 2);
let mut chunk = Self {
left: 0,
right: 2,
Expand Down Expand Up @@ -282,6 +288,49 @@ where
}
}

/// Write values from iterator into range starting at write_index.
///
/// Will overwrite values at the relevant range without dropping even in case the values were
/// already initialized (it is expected they are empty). Does not update the left or right
/// index.
///
/// # Safety
///
/// Range checks must already have been performed.
///
/// # Panics
///
/// If the iterator panics, the chunk becomes conceptually empty and will leak any previous
/// elements (even the ones outside the range).
#[inline]
unsafe fn write_from_iter<I>(mut write_index: usize, iter: I, chunk: &mut Self)
where
I: ExactSizeIterator<Item = A>,
{
// Panic safety. We make the array conceptually empty, so we never ever drop anything that
// is unitialized. We do so because we expect to be called when there's a potential "hole"
// in the array that makes the space for the new elements to be written. We return it back
// to original when everything goes fine, but leak any elements on panic. This is bad, but
// better than dropping non-existing stuff.
//
// Should we worry about some better panic recovery than this?
let left = replace(&mut chunk.left, 0);
let right = replace(&mut chunk.right, 0);
let len = iter.len();
let expected_end = write_index + len;
for value in iter.take(len) {
Chunk::force_write(write_index, value, chunk);
write_index += 1;
}
// Oops, we have a hole in here now. That would be bad, give up.
assert_eq!(
expected_end, write_index,
"ExactSizeIterator yielded fewer values than advertised",
);
chunk.left = left;
chunk.right = right;
}

/// Copy a range between chunks
#[inline]
unsafe fn force_copy_to(
Expand Down Expand Up @@ -583,32 +632,23 @@ where
if self.right == N::USIZE || (self.left >= insert_size && left_size < right_size) {
unsafe {
Chunk::force_copy(self.left, self.left - insert_size, left_size, self);
let mut write_index = real_index - insert_size;
for value in iter {
Chunk::force_write(write_index, value, self);
write_index += 1;
}
let write_index = real_index - insert_size;
Chunk::write_from_iter(write_index, iter, self);
}
self.left -= insert_size;
} else if self.left == 0 || (self.right + insert_size <= Self::CAPACITY) {
unsafe {
Chunk::force_copy(real_index, real_index + insert_size, right_size, self);
let mut write_index = real_index;
for value in iter {
Chunk::force_write(write_index, value, self);
write_index += 1;
}
let write_index = real_index;
Chunk::write_from_iter(write_index, iter, self);
}
self.right += insert_size;
} else {
unsafe {
Chunk::force_copy(self.left, 0, left_size, self);
Chunk::force_copy(real_index, left_size + insert_size, right_size, self);
let mut write_index = left_size;
for value in iter {
Chunk::force_write(write_index, value, self);
write_index += 1;
}
let write_index = left_size;
Chunk::write_from_iter(write_index, iter, self);
}
self.right -= self.left;
self.right += insert_size;
Expand Down Expand Up @@ -817,6 +857,7 @@ where
N: ChunkLength<A>,
{
fn from(array: &mut InlineArray<A, T>) -> Self {
assert!(array.len() <= Self::CAPACITY);
let mut out = Self::new();
out.left = 0;
out.right = array.len();
Expand Down Expand Up @@ -975,6 +1016,8 @@ where

#[cfg(test)]
mod test {
use typenum::U0;

use super::*;

#[test]
Expand Down Expand Up @@ -1183,4 +1226,16 @@ mod test {
}
assert_eq!(0, counter.load(Ordering::Relaxed));
}

#[test]
#[should_panic(expected = "assertion failed: Self::CAPACITY >= 1")]
fn unit_on_empty() {
Chunk::<usize, U0>::unit(1);
}

#[test]
#[should_panic(expected = "assertion failed: Self::CAPACITY >= 2")]
fn pair_on_empty() {
Chunk::<usize, U0>::pair(1, 2);
}
}

0 comments on commit 99e593c

Please sign in to comment.