Skip to content

Commit

Permalink
fix: Set the member variables of the virtio queue to volatile to reso…
Browse files Browse the repository at this point in the history
…lve the issue of the virtioblk test case hanging under RISC-V.
  • Loading branch information
fslongjin committed Apr 22, 2024
1 parent 61ece50 commit c0bf4a0
Show file tree
Hide file tree
Showing 2 changed files with 148 additions and 52 deletions.
144 changes: 92 additions & 52 deletions src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@

use crate::hal::{BufferDirection, Dma, Hal, PhysAddr};
use crate::transport::Transport;
use crate::volatile::Volatile;
use crate::{align_up, nonnull_slice_from_raw_parts, pages, Error, Result, PAGE_SIZE};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
use bitflags::bitflags;
#[cfg(test)]
use core::cmp::min;
use core::fmt::Debug;
use core::hint::spin_loop;
use core::mem::{size_of, take};
#[cfg(test)]
Expand All @@ -22,7 +24,6 @@ use zerocopy::{AsBytes, FromBytes, FromZeroes};
///
/// * `SIZE`: The size of the queue. This is both the number of descriptors, and the number of slots
/// in the available and used rings.
#[derive(Debug)]
pub struct VirtQueue<H: Hal, const SIZE: usize> {
/// DMA guard
layout: VirtQueueLayout<H>,
Expand All @@ -42,16 +43,16 @@ pub struct VirtQueue<H: Hal, const SIZE: usize> {
used: NonNull<UsedRing<SIZE>>,

/// The index of queue
queue_idx: u16,
queue_idx: Volatile<u16>,
/// The number of descriptors currently in use.
num_used: u16,
num_used: Volatile<u16>,
/// The head desc index of the free list.
free_head: u16,
free_head: Volatile<u16>,
/// Our trusted copy of `desc` that the device can't access.
desc_shadow: [Descriptor; SIZE],
/// Our trusted copy of `avail.idx`.
avail_idx: u16,
last_used_idx: u16,
avail_idx: Volatile<u16>,
last_used_idx: Volatile<u16>,
/// Whether the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
event_idx: bool,
#[cfg(feature = "alloc")]
Expand All @@ -60,6 +61,12 @@ pub struct VirtQueue<H: Hal, const SIZE: usize> {
indirect_lists: [Option<NonNull<[Descriptor]>>; SIZE],
}

impl<H: Hal, const SIZE: usize> core::fmt::Debug for VirtQueue<H, SIZE> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("VirtQueue").finish()
}
}

impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
/// Creates a new VirtQueue.
///
Expand Down Expand Up @@ -122,12 +129,12 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
desc,
avail,
used,
queue_idx: idx,
num_used: 0,
free_head: 0,
queue_idx: Volatile::new(idx),
num_used: Volatile::new(0),
free_head: Volatile::new(0),
desc_shadow,
avail_idx: 0,
last_used_idx: 0,
avail_idx: Volatile::new(0),
last_used_idx: Volatile::new(0),
event_idx,
#[cfg(feature = "alloc")]
indirect,
Expand Down Expand Up @@ -158,9 +165,10 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
// Only consider indirect descriptors if the alloc feature is enabled, as they require
// allocation.
#[cfg(feature = "alloc")]
if self.num_used as usize + 1 > SIZE
if self.num_used.read_volatile() as usize + 1 > SIZE
|| descriptors_needed > SIZE
|| (!self.indirect && self.num_used as usize + descriptors_needed > SIZE)
|| (!self.indirect
&& self.num_used.read_volatile() as usize + descriptors_needed > SIZE)
{
return Err(Error::QueueFull);
}
Expand All @@ -178,7 +186,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
#[cfg(not(feature = "alloc"))]
let head = self.add_direct(inputs, outputs);

let avail_slot = self.avail_idx & (SIZE as u16 - 1);
let avail_slot = self.avail_idx.read_volatile() & (SIZE as u16 - 1);
// Safe because self.avail is properly aligned, dereferenceable and initialised.
unsafe {
(*self.avail.as_ptr()).ring[avail_slot as usize] = head;
Expand All @@ -189,10 +197,13 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
fence(Ordering::SeqCst);

// increase head of avail ring
self.avail_idx = self.avail_idx.wrapping_add(1);
self.avail_idx
.write_volatile(self.avail_idx.read_volatile().wrapping_add(1));
// Safe because self.avail is properly aligned, dereferenceable and initialised.
unsafe {
(*self.avail.as_ptr()).idx = self.avail_idx;
(*self.avail.as_ptr())
.idx
.write_volatile(self.avail_idx.read_volatile());
}

// Write barrier so that device can see change to available index after this method returns.
Expand All @@ -207,21 +218,21 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
outputs: &'a mut [&'b mut [u8]],
) -> u16 {
// allocate descriptors from free list
let head = self.free_head;
let mut last = self.free_head;
let head = self.free_head.read_volatile();
let mut last = self.free_head.read_volatile();

for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
assert_ne!(buffer.len(), 0);

// Write to desc_shadow then copy.
let desc = &mut self.desc_shadow[usize::from(self.free_head)];
let desc = &mut self.desc_shadow[usize::from(self.free_head.read_volatile())];
// Safe because our caller promises that the buffers live at least until `pop_used`
// returns them.
unsafe {
desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
}
last = self.free_head;
self.free_head = desc.next;
last = self.free_head.read_volatile();
self.free_head.write_volatile(desc.next);

self.write_desc(last);
}
Expand All @@ -243,7 +254,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
inputs: &'a [&'b [u8]],
outputs: &'a mut [&'b mut [u8]],
) -> u16 {
let head = self.free_head;
let head = self.free_head.read_volatile();

// Allocate and fill in indirect descriptor list.
let mut indirect_list = Descriptor::new_box_slice_zeroed(inputs.len() + outputs.len());
Expand Down Expand Up @@ -271,7 +282,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
// indirect list from being freed when this function returns; recycle_descriptors is instead
// responsible for freeing the memory after the buffer chain is popped.
let direct_desc = &mut self.desc_shadow[usize::from(head)];
self.free_head = direct_desc.next;
self.free_head.write_volatile(direct_desc.next);
unsafe {
direct_desc.set_buf::<H>(
Box::leak(indirect_list).as_bytes().into(),
Expand Down Expand Up @@ -303,7 +314,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {

// Notify the queue.
if self.should_notify() {
transport.notify(self.queue_idx);
transport.notify(self.queue_idx.read_volatile());
}

// Wait until there is at least one element in the used ring.
Expand All @@ -324,7 +335,11 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
if !self.event_idx {
// Safe because self.avail points to a valid, aligned, initialised, dereferenceable, readable
// instance of AvailRing.
unsafe { (*self.avail.as_ptr()).flags = avail_ring_flags }
unsafe {
(*self.avail.as_ptr())
.flags
.write_volatile(avail_ring_flags)
}
}
// Write barrier so that device can see change to available index after this method returns.
fence(Ordering::SeqCst);
Expand All @@ -341,12 +356,12 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
if self.event_idx {
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
let avail_event = unsafe { (*self.used.as_ptr()).avail_event };
self.avail_idx >= avail_event.wrapping_add(1)
let avail_event = unsafe { (*self.used.as_ptr()).avail_event.read_volatile() };
self.avail_idx.read_volatile() >= avail_event.wrapping_add(1)
} else {
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
unsafe { (*self.used.as_ptr()).flags & 0x0001 == 0 }
unsafe { (*self.used.as_ptr()).flags.read_volatile() & 0x0001 == 0 }
}
}

Expand All @@ -368,14 +383,14 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {

// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
// instance of UsedRing.
self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
self.last_used_idx.read_volatile() != unsafe { (*self.used.as_ptr()).idx.read_volatile() }
}

/// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
/// `None` if the used ring is empty.
pub fn peek_used(&self) -> Option<u16> {
if self.can_pop() {
let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
let last_used_slot = self.last_used_idx.read_volatile() & (SIZE as u16 - 1);
// Safe because self.used points to a valid, aligned, initialised, dereferenceable,
// readable instance of UsedRing.
Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
Expand All @@ -388,14 +403,14 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
pub fn available_desc(&self) -> usize {
#[cfg(feature = "alloc")]
if self.indirect {
return if usize::from(self.num_used) == SIZE {
return if usize::from(self.num_used.read_volatile()) == SIZE {
0
} else {
SIZE
};
}

SIZE - usize::from(self.num_used)
SIZE - usize::from(self.num_used.read_volatile())
}

/// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
Expand All @@ -414,8 +429,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
inputs: &'a [&'a [u8]],
outputs: &'a mut [&'a mut [u8]],
) {
let original_free_head = self.free_head;
self.free_head = head;
let original_free_head = self.free_head.read_volatile();
self.free_head.write_volatile(head);

let head_desc = &mut self.desc_shadow[usize::from(head)];
if head_desc.flags.contains(DescFlags::INDIRECT) {
Expand Down Expand Up @@ -509,7 +524,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
// Read barrier not necessary, as can_pop already has one.

// Get the index of the start of the descriptor chain for the next element in the used ring.
let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
let last_used_slot = self.last_used_idx.read_volatile() & (SIZE as u16 - 1);
let index;
let len;
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
Expand All @@ -528,11 +543,14 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
unsafe {
self.recycle_descriptors(index, inputs, outputs);
}
self.last_used_idx = self.last_used_idx.wrapping_add(1);
self.last_used_idx
.write_volatile(self.last_used_idx.read_volatile().wrapping_add(1));

if self.event_idx {
unsafe {
(*self.avail.as_ptr()).used_event = self.last_used_idx;
(*self.avail.as_ptr())
.used_event
.write_volatile(self.last_used_idx.read_volatile());
}
}

Expand Down Expand Up @@ -759,26 +777,36 @@ bitflags! {
/// each ring entry refers to the head of a descriptor chain.
/// It is only written by the driver and read by the device.
#[repr(C)]
#[derive(Debug)]
struct AvailRing<const SIZE: usize> {
flags: u16,
flags: Volatile<u16>,
/// A driver MUST NOT decrement the idx.
idx: u16,
idx: Volatile<u16>,
ring: [u16; SIZE],
/// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
used_event: u16,
used_event: Volatile<u16>,
}

impl<const SIZE: usize> Debug for AvailRing<SIZE> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("AvailRing").finish()
}
}

/// The used ring is where the device returns buffers once it is done with them:
/// it is only written to by the device, and read by the driver.
#[repr(C)]
#[derive(Debug)]
struct UsedRing<const SIZE: usize> {
flags: u16,
idx: u16,
flags: Volatile<u16>,
idx: Volatile<u16>,
ring: [UsedElem; SIZE],
/// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
avail_event: u16,
avail_event: Volatile<u16>,
}

impl<const SIZE: usize> Debug for UsedRing<SIZE> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("UsedRing").finish()
}
}

#[repr(C)]
Expand Down Expand Up @@ -847,10 +875,13 @@ pub(crate) fn fake_read_write_queue<const QUEUE_SIZE: usize>(
// nothing else accesses them during this block.
unsafe {
// Make sure there is actually at least one descriptor available to read from.
assert_ne!((*available_ring).idx, (*used_ring).idx);
assert_ne!(
(*available_ring).idx.read_volatile(),
(*used_ring).idx.read_volatile()
);
// The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
// `used_ring.idx` marks the next descriptor we should take from the available ring.
let next_slot = (*used_ring).idx & (QUEUE_SIZE as u16 - 1);
let next_slot = (*used_ring).idx.read_volatile() & (QUEUE_SIZE as u16 - 1);
let head_descriptor_index = (*available_ring).ring[next_slot as usize];
let mut descriptor = &(*descriptors)[head_descriptor_index as usize];

Expand Down Expand Up @@ -1156,17 +1187,26 @@ mod tests {
let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();

// Check that the avail ring's flag is zero by default.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x0);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.read_volatile() },
0x0
);

queue.set_dev_notify(false);

// Check that the avail ring's flag is 1 after `disable_dev_notify`.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x1);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.read_volatile() },
0x1
);

queue.set_dev_notify(true);

// Check that the avail ring's flag is 0 after `enable_dev_notify`.
assert_eq!(unsafe { (*queue.avail.as_ptr()).flags }, 0x0);
assert_eq!(
unsafe { (*queue.avail.as_ptr()).flags.read_volatile() },
0x0
);
}

/// Tests that the queue notifies the device about added buffers, if it hasn't suppressed
Expand Down Expand Up @@ -1197,7 +1237,7 @@ mod tests {
// initialised, and nothing else is accessing them at the same time.
unsafe {
// Suppress notifications.
(*queue.used.as_ptr()).flags = 0x01;
(*queue.used.as_ptr()).flags.write_volatile(0x01);
}

// Check that the transport would not be notified.
Expand Down Expand Up @@ -1232,7 +1272,7 @@ mod tests {
// initialised, and nothing else is accessing them at the same time.
unsafe {
// Suppress notifications.
(*queue.used.as_ptr()).avail_event = 1;
(*queue.used.as_ptr()).avail_event.write_volatile(1);
}

// Check that the transport would not be notified.
Expand Down
Loading

0 comments on commit c0bf4a0

Please sign in to comment.