Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use atomic reads and writes for available and used ring. #128

Merged
merged 1 commit into from
Apr 23, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 43 additions & 33 deletions src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#[cfg(test)]
use core::ptr;
use core::ptr::NonNull;
use core::sync::atomic::{fence, Ordering};
use core::sync::atomic::{fence, AtomicU16, Ordering};
use zerocopy::{AsBytes, FromBytes, FromZeroes};

/// The mechanism for bulk data transport on virtio devices.
Expand Down Expand Up @@ -71,7 +71,7 @@
pub fn new<T: Transport>(
transport: &mut T,
idx: u16,
indirect: bool,

Check warning on line 74 in src/queue.rs

View workflow job for this annotation

GitHub Actions / build

unused variable: `indirect`

Check warning on line 74 in src/queue.rs

View workflow job for this annotation

GitHub Actions / build

unused variable: `indirect`
event_idx: bool,
) -> Result<Self> {
if transport.queue_used(idx) {
Expand Down Expand Up @@ -192,12 +192,11 @@
self.avail_idx = self.avail_idx.wrapping_add(1);
// Safe because self.avail is properly aligned, dereferenceable and initialised.
unsafe {
(*self.avail.as_ptr()).idx = self.avail_idx;
(*self.avail.as_ptr())
.idx
.store(self.avail_idx, Ordering::Release);
}

// Write barrier so that device can see change to available index after this method returns.
fence(Ordering::SeqCst);

Ok(head)
}

Expand Down Expand Up @@ -324,29 +323,28 @@
if !self.event_idx {
// Safe because self.avail points to a valid, aligned, initialised, dereferenceable, readable
// instance of AvailRing.
unsafe { (*self.avail.as_ptr()).flags = avail_ring_flags }
unsafe {
(*self.avail.as_ptr())
.flags
.store(avail_ring_flags, Ordering::Release)
}
}
// Write barrier so that device can see change to available index after this method returns.
fence(Ordering::SeqCst);
}

/// Returns whether the driver should notify the device after adding a new buffer to the
/// virtqueue.
///
/// This will be false if the device has supressed notifications.
pub fn should_notify(&self) -> bool {
// Read barrier, so we read a fresh value from the device.
fence(Ordering::SeqCst);

if self.event_idx {
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
let avail_event = unsafe { (*self.used.as_ptr()).avail_event };
let avail_event = unsafe { (*self.used.as_ptr()).avail_event.load(Ordering::Acquire) };
self.avail_idx >= avail_event.wrapping_add(1)
} else {
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
unsafe { (*self.used.as_ptr()).flags & 0x0001 == 0 }
unsafe { (*self.used.as_ptr()).flags.load(Ordering::Acquire) & 0x0001 == 0 }
}
}

Expand All @@ -363,12 +361,9 @@

/// Returns whether there is a used element that can be popped.
pub fn can_pop(&self) -> bool {
// Read barrier, so we read a fresh value from the device.
fence(Ordering::SeqCst);

// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
self.last_used_idx != unsafe { (*self.used.as_ptr()).idx.load(Ordering::Acquire) }
}

/// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
Expand Down Expand Up @@ -506,7 +501,6 @@
if !self.can_pop() {
return Err(Error::NotReady);
}
// Read barrier not necessary, as can_pop already has one.

// Get the index of the start of the descriptor chain for the next element in the used ring.
let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
Expand All @@ -532,7 +526,9 @@

if self.event_idx {
unsafe {
(*self.avail.as_ptr()).used_event = self.last_used_idx;
(*self.avail.as_ptr())
.used_event
.store(self.last_used_idx, Ordering::Release);
}
}

Expand Down Expand Up @@ -761,24 +757,24 @@
#[repr(C)]
#[derive(Debug)]
struct AvailRing<const SIZE: usize> {
flags: u16,
flags: AtomicU16,
/// A driver MUST NOT decrement the idx.
idx: u16,
idx: AtomicU16,
ring: [u16; SIZE],
/// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
used_event: u16,
used_event: AtomicU16,
}

/// The used ring is where the device returns buffers once it is done with them:
/// it is only written to by the device, and read by the driver.
#[repr(C)]
#[derive(Debug)]
struct UsedRing<const SIZE: usize> {
flags: u16,
idx: u16,
flags: AtomicU16,
idx: AtomicU16,
ring: [UsedElem; SIZE],
/// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
avail_event: u16,
avail_event: AtomicU16,
}

#[repr(C)]
Expand Down Expand Up @@ -847,10 +843,13 @@
// nothing else accesses them during this block.
unsafe {
// Make sure there is actually at least one descriptor available to read from.
assert_ne!((*available_ring).idx, (*used_ring).idx);
assert_ne!(
(*available_ring).idx.load(Ordering::Acquire),
(*used_ring).idx.load(Ordering::Acquire)
);
// The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
// `used_ring.idx` marks the next descriptor we should take from the available ring.
let next_slot = (*used_ring).idx & (QUEUE_SIZE as u16 - 1);
let next_slot = (*used_ring).idx.load(Ordering::Acquire) & (QUEUE_SIZE as u16 - 1);
let head_descriptor_index = (*available_ring).ring[next_slot as usize];
let mut descriptor = &(*descriptors)[head_descriptor_index as usize];

Expand Down Expand Up @@ -951,7 +950,7 @@
// Mark the buffer as used.
(*used_ring).ring[next_slot as usize].id = head_descriptor_index as u32;
(*used_ring).ring[next_slot as usize].len = (input_length + output.len()) as u32;
(*used_ring).idx += 1;
(*used_ring).idx.fetch_add(1, Ordering::AcqRel);
}
}

Expand Down Expand Up @@ -1156,17 +1155,26 @@
let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();

// Check that the avail ring's flag is zero by default.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x0);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
0x0
);

queue.set_dev_notify(false);

// Check that the avail ring's flag is 1 after `disable_dev_notify`.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x1);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
0x1
);

queue.set_dev_notify(true);

// Check that the avail ring's flag is 0 after `enable_dev_notify`.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x0);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
0x0
);
}

/// Tests that the queue notifies the device about added buffers, if it hasn't suppressed
Expand Down Expand Up @@ -1197,7 +1205,7 @@
// initialised, and nothing else is accessing them at the same time.
unsafe {
// Suppress notifications.
(*queue.used.as_ptr()).flags = 0x01;
(*queue.used.as_ptr()).flags.store(0x01, Ordering::Release);
}

// Check that the transport would not be notified.
Expand Down Expand Up @@ -1232,7 +1240,9 @@
// initialised, and nothing else is accessing them at the same time.
unsafe {
// Suppress notifications.
(*queue.used.as_ptr()).avail_event = 1;
(*queue.used.as_ptr())
.avail_event
.store(1, Ordering::Release);
}

// Check that the transport would not be notified.
Expand Down
Loading