diff --git a/crates/virtio-queue/src/descriptor.rs b/crates/virtio-queue/src/descriptor.rs index bb80c3c0..a093de7a 100644 --- a/crates/virtio-queue/src/descriptor.rs +++ b/crates/virtio-queue/src/descriptor.rs @@ -10,7 +10,7 @@ // // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause -use vm_memory::{ByteValued, GuestAddress}; +use vm_memory::{ByteValued, GuestAddress, Le16, Le32, Le64}; use crate::defs::{VIRTQ_DESC_F_INDIRECT, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; @@ -19,38 +19,38 @@ use crate::defs::{VIRTQ_DESC_F_INDIRECT, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; #[derive(Default, Clone, Copy, Debug)] pub struct Descriptor { /// Guest physical address of device specific data - addr: u64, + addr: Le64, /// Length of device specific data - len: u32, + len: Le32, /// Includes next, write, and indirect bits - flags: u16, + flags: Le16, /// Index into the descriptor table of the next descriptor if flags has the next bit set - next: u16, + next: Le16, } #[allow(clippy::len_without_is_empty)] impl Descriptor { /// Return the guest physical address of descriptor buffer. pub fn addr(&self) -> GuestAddress { - GuestAddress(self.addr) + GuestAddress(self.addr.into()) } /// Return the length of descriptor buffer. pub fn len(&self) -> u32 { - self.len + self.len.into() } /// Return the flags for this descriptor, including next, write and indirect bits. pub fn flags(&self) -> u16 { - self.flags + self.flags.into() } /// Return the value stored in the `next` field of the descriptor. pub fn next(&self) -> u16 { - self.next + self.next.into() } /// Check whether this descriptor refers to a buffer containing an indirect descriptor table. @@ -68,7 +68,7 @@ impl Descriptor { /// If this is false, this descriptor is read only. /// Write only means the the emulated device can write and the driver can read. pub fn is_write_only(&self) -> bool { - self.flags & VIRTQ_DESC_F_WRITE != 0 + self.flags() & VIRTQ_DESC_F_WRITE != 0 } } @@ -77,31 +77,31 @@ impl Descriptor { /// Creates a new descriptor pub fn new(addr: u64, len: u32, flags: u16, next: u16) -> Self { Descriptor { - addr: addr.to_le(), - len: len.to_le(), - flags: flags.to_le(), - next: next.to_le(), + addr: addr.into(), + len: len.into(), + flags: flags.into(), + next: next.into(), } } /// Set the guest physical address of descriptor buffer pub fn set_addr(&mut self, addr: u64) { - self.addr = addr.to_le(); + self.addr = addr.into(); } /// Set the length of descriptor buffer. pub fn set_len(&mut self, len: u32) { - self.len = len.to_le(); + self.len = len.into(); } /// Set the flags for this descriptor. pub fn set_flags(&mut self, flags: u16) { - self.flags = flags.to_le(); + self.flags = flags.into(); } /// Set the value stored in the `next` field of the descriptor. pub fn set_next(&mut self, next: u16) { - self.next = next.to_le(); + self.next = next.into(); } } @@ -111,16 +111,16 @@ unsafe impl ByteValued for Descriptor {} #[repr(C)] #[derive(Clone, Copy, Default, Debug)] pub struct VirtqUsedElem { - id: u32, - len: u32, + id: Le32, + len: Le32, } impl VirtqUsedElem { /// Create a new `VirtqUsedElem` instance. - pub fn new(id: u16, len: u32) -> Self { + pub fn new(id: u32, len: u32) -> Self { VirtqUsedElem { - id: u32::from_le(id as u32), - len: len.to_le(), + id: id.into(), + len: len.into(), } } } @@ -130,12 +130,12 @@ impl VirtqUsedElem { impl VirtqUsedElem { /// Get id field of the used descriptor. pub fn id(&self) -> u32 { - u32::from_le(self.id) + self.id.into() } /// Get length field of the used descriptor. pub fn len(&self) -> u32 { - u32::from_le(self.len) + self.len.into() } } diff --git a/crates/virtio-queue/src/iterator.rs b/crates/virtio-queue/src/iterator.rs index 24ccb73c..16be508e 100644 --- a/crates/virtio-queue/src/iterator.rs +++ b/crates/virtio-queue/src/iterator.rs @@ -86,6 +86,7 @@ where let head_index: u16 = self .mem .load(addr, Ordering::Acquire) + .map(u16::from_le) .map_err(|_| error!("Failed to read from memory {:x}", addr.raw_value())) .ok()?; @@ -130,10 +131,10 @@ mod tests { vq.desc_table().store(j, desc); } - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().ring().ref_at(2).store(5); - vq.avail().idx().store(3); + vq.avail().ring().ref_at(0).store(u16::to_le(0)); + vq.avail().ring().ref_at(1).store(u16::to_le(2)); + vq.avail().ring().ref_at(2).store(u16::to_le(5)); + vq.avail().idx().store(u16::to_le(3)); let mut i = q.iter().unwrap(); @@ -205,9 +206,9 @@ mod tests { vq.desc_table().store(j, desc); } - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().idx().store(2); + vq.avail().ring().ref_at(0).store(u16::to_le(0)); + vq.avail().ring().ref_at(1).store(u16::to_le(2)); + vq.avail().idx().store(u16::to_le(2)); let mut i = q.iter().unwrap(); diff --git a/crates/virtio-queue/src/queue.rs b/crates/virtio-queue/src/queue.rs index c897d7bc..fc9113b9 100644 --- a/crates/virtio-queue/src/queue.rs +++ b/crates/virtio-queue/src/queue.rs @@ -266,16 +266,16 @@ mod tests { let vq = MockSplitQueue::new(m, 16); let mut q = vq.create_queue(m); - assert_eq!(vq.used().idx().load(), 0); + assert_eq!(u16::from_le(vq.used().idx().load()), 0); // index too large assert!(q.add_used(16, 0x1000).is_err()); - assert_eq!(vq.used().idx().load(), 0); + assert_eq!(u16::from_le(vq.used().idx().load()), 0); // should be ok q.add_used(1, 0x1000).unwrap(); assert_eq!(q.state.next_used, Wrapping(1)); - assert_eq!(vq.used().idx().load(), 1); + assert_eq!(u16::from_le(vq.used().idx().load()), 1); let x = vq.used().ring().ref_at(0).load(); assert_eq!(x.id(), 1); @@ -334,8 +334,11 @@ mod tests { assert_eq!(q.needs_notification().unwrap(), true); } - m.write_obj::(4, avail_addr.unchecked_add(4 + qsize as u64 * 2)) - .unwrap(); + m.write_obj::( + u16::to_le(4), + avail_addr.unchecked_add(4 + qsize as u64 * 2), + ) + .unwrap(); q.state.set_event_idx(true); // Incrementing up to this value causes an `u16` to wrap back to 0. @@ -383,26 +386,28 @@ mod tests { assert_eq!(q.state.event_idx_enabled, false); q.enable_notification().unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, 0); q.disable_notification().unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, VIRTQ_USED_F_NO_NOTIFY); q.enable_notification().unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, 0); q.set_event_idx(true); let avail_addr = vq.avail_addr(); - m.write_obj::(2, avail_addr.unchecked_add(2)).unwrap(); + m.write_obj::(u16::to_le(2), avail_addr.unchecked_add(2)) + .unwrap(); assert_eq!(q.enable_notification().unwrap(), true); q.state.next_avail = Wrapping(2); assert_eq!(q.enable_notification().unwrap(), false); - m.write_obj::(8, avail_addr.unchecked_add(2)).unwrap(); + m.write_obj::(u16::to_le(8), avail_addr.unchecked_add(2)) + .unwrap(); assert_eq!(q.enable_notification().unwrap(), true); q.state.next_avail = Wrapping(8); @@ -430,13 +435,13 @@ mod tests { vq.desc_table().store(i, desc); } - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().ring().ref_at(2).store(5); - vq.avail().ring().ref_at(3).store(7); - vq.avail().ring().ref_at(4).store(9); + vq.avail().ring().ref_at(0).store(u16::to_le(0)); + vq.avail().ring().ref_at(1).store(u16::to_le(2)); + vq.avail().ring().ref_at(2).store(u16::to_le(5)); + vq.avail().ring().ref_at(3).store(u16::to_le(7)); + vq.avail().ring().ref_at(4).store(u16::to_le(9)); // Let the device know it can consume chains with the index < 2. - vq.avail().idx().store(2); + vq.avail().idx().store(u16::to_le(2)); // No descriptor chains are consumed at this point. assert_eq!(q.next_avail(), 0); @@ -461,7 +466,7 @@ mod tests { // The next chain that can be consumed should have index 2. assert_eq!(q.next_avail(), 2); // Let the device know it can consume one more chain. - vq.avail().idx().store(3); + vq.avail().idx().store(u16::to_le(3)); i = 0; loop { @@ -476,7 +481,7 @@ mod tests { // ring. Ideally this should be done on a separate thread. // Because of this update, the loop should be iterated again to consume the new // available descriptor chains. - vq.avail().idx().store(4); + vq.avail().idx().store(u16::to_le(4)); if !q.enable_notification().unwrap() { break; } @@ -487,7 +492,7 @@ mod tests { // Set an `idx` that is bigger than the number of entries added in the ring. // This is an allowed scenario, but the indexes of the chain will have unexpected values. - vq.avail().idx().store(7); + vq.avail().idx().store(u16::to_le(7)); loop { q.disable_notification().unwrap(); @@ -525,11 +530,11 @@ mod tests { vq.desc_table().store(i, desc); } - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().ring().ref_at(2).store(5); + vq.avail().ring().ref_at(0).store(u16::to_le(0)); + vq.avail().ring().ref_at(1).store(u16::to_le(2)); + vq.avail().ring().ref_at(2).store(u16::to_le(5)); // Let the device know it can consume chains with the index < 2. - vq.avail().idx().store(3); + vq.avail().idx().store(u16::to_le(3)); // No descriptor chains are consumed at this point. assert_eq!(q.next_avail(), 0); @@ -552,7 +557,7 @@ mod tests { // Decrement `idx` which should be forbidden. We don't enforce this thing, but we should // test that we don't panic in case the driver decrements it. - vq.avail().idx().store(1); + vq.avail().idx().store(u16::to_le(1)); loop { q.disable_notification().unwrap(); diff --git a/crates/virtio-queue/src/state.rs b/crates/virtio-queue/src/state.rs index 511da781..fdbcdc7a 100644 --- a/crates/virtio-queue/src/state.rs +++ b/crates/virtio-queue/src/state.rs @@ -78,7 +78,8 @@ impl QueueState { let offset = VIRTQ_USED_RING_HEADER_SIZE + elem_sz; let addr = self.used_ring.unchecked_add(offset); - mem.store(val, addr, order).map_err(Error::GuestMemory) + mem.store(u16::to_le(val), addr, order) + .map_err(Error::GuestMemory) } // Set the value of the `flags` field of the used ring, applying the specified ordering. @@ -88,7 +89,7 @@ impl QueueState { val: u16, order: Ordering, ) -> Result<(), Error> { - mem.store(val, self.used_ring, order) + mem.store(u16::to_le(val), self.used_ring, order) .map_err(Error::GuestMemory) } @@ -133,6 +134,7 @@ impl QueueState { let used_event_addr = self.avail_ring.unchecked_add(offset); mem.load(used_event_addr, order) + .map(u16::from_le) .map(Wrapping) .map_err(Error::GuestMemory) } @@ -285,6 +287,7 @@ impl QueueStateT for QueueState { let addr = self.avail_ring.unchecked_add(2); mem.load(addr, order) + .map(u16::from_le) .map(Wrapping) .map_err(Error::GuestMemory) } @@ -307,13 +310,13 @@ impl QueueStateT for QueueState { let elem_sz = next_used_index * VIRTQ_USED_ELEMENT_SIZE; let offset = VIRTQ_USED_RING_HEADER_SIZE + elem_sz; let addr = self.used_ring.unchecked_add(offset); - mem.write_obj(VirtqUsedElem::new(head_index, len), addr) + mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr) .map_err(Error::GuestMemory)?; self.next_used += Wrapping(1); mem.store( - self.next_used.0, + u16::to_le(self.next_used.0), self.used_ring.unchecked_add(2), Ordering::Release, ) diff --git a/crates/virtio-queue/src/state_sync.rs b/crates/virtio-queue/src/state_sync.rs index 9c486514..faa1bad9 100644 --- a/crates/virtio-queue/src/state_sync.rs +++ b/crates/virtio-queue/src/state_sync.rs @@ -267,26 +267,28 @@ mod tests { assert_eq!(q.lock_state().event_idx_enabled, false); q.enable_notification(mem).unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, 0); q.disable_notification(m.memory()).unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, VIRTQ_USED_F_NO_NOTIFY); q.enable_notification(mem).unwrap(); - let v = m.read_obj::(used_addr).unwrap(); + let v = m.read_obj::(used_addr).map(u16::from_le).unwrap(); assert_eq!(v, 0); q.set_event_idx(true); let avail_addr = q.lock_state().avail_ring; - m.write_obj::(2, avail_addr.unchecked_add(2)).unwrap(); + m.write_obj::(u16::to_le(2), avail_addr.unchecked_add(2)) + .unwrap(); assert_eq!(q.enable_notification(mem).unwrap(), true); q.lock_state().next_avail = Wrapping(2); assert_eq!(q.enable_notification(mem).unwrap(), false); - m.write_obj::(8, avail_addr.unchecked_add(2)).unwrap(); + m.write_obj::(u16::to_le(8), avail_addr.unchecked_add(2)) + .unwrap(); assert_eq!(q.enable_notification(mem).unwrap(), true); q.lock_state().next_avail = Wrapping(8);