diff --git a/Cargo.toml b/Cargo.toml index 585daec9ea..18c9c0e287 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,7 +71,7 @@ shell = ["simple-shell"] [dependencies] hermit-macro = { path = "hermit-macro" } -virtio-def = { path = "virtio-def", features = ["zerocopy"] } +virtio-def = { path = "virtio-def" } ahash = { version = "0.8", default-features = false } align-address = "0.1" bit_field = "0.10" @@ -101,7 +101,7 @@ build-time = "0.1.3" async-trait = "0.1.80" async-lock = { version = "3.3.0", default-features = false } simple-shell = { version = "0.0.1", optional = true } -volatile = "0.5.4" +volatile = { version = "0.5.4", features = ["unstable"] } [dependencies.smoltcp] version = "0.11" diff --git a/src/drivers/virtio/virtqueue/split.rs b/src/drivers/virtio/virtqueue/split.rs index 27ce815637..ae226d82ea 100644 --- a/src/drivers/virtio/virtqueue/split.rs +++ b/src/drivers/virtio/virtqueue/split.rs @@ -8,10 +8,14 @@ use alloc::rc::Rc; use alloc::vec::Vec; use core::alloc::{Allocator, Layout}; use core::cell::RefCell; +use core::marker::PhantomPinned; use core::mem::{size_of, MaybeUninit}; -use core::ptr; +use core::pin::Pin; +use core::ptr::{self, NonNull}; use virtio_def::num::{le16, le32, le64}; +use volatile::access::ReadOnly; +use volatile::{map_field, VolatilePtr, VolatileRef}; use zerocopy::{FromBytes, FromZeroes}; #[cfg(not(feature = "pci"))] @@ -47,6 +51,11 @@ impl Descriptor { } } +struct DescrTable { + _pin: PhantomPinned, + descr_table: [MaybeUninit], +} + // The generic structure eases the creation of the layout for the statically // sized portion of [AvailRing] and [UsedRing]. This way, to be allocated they // only need to be extended with the dynamic portion. @@ -54,6 +63,7 @@ impl Descriptor { struct GenericRing { flags: le16, index: le16, + _pin: PhantomPinned, // Rust does not allow a field other than the last one to be unsized. // Unfortunately, this is not the case with the layout in the specification. @@ -67,26 +77,18 @@ const RING_AND_EVENT_ERROR: &str = "ring_and_event should have at least enough e type AvailRing = GenericRing<[MaybeUninit]>; impl AvailRing { - fn ring_ref(&self) -> &[MaybeUninit] { - self.ring_and_event - .split_last() - .expect(RING_AND_EVENT_ERROR) - .1 - } - - fn ring_mut(&mut self) -> &mut [MaybeUninit] { - self.ring_and_event - .split_last_mut() - .expect(RING_AND_EVENT_ERROR) - .1 + fn ring_ptr( + volatile_self: VolatilePtr<'_, Self, A>, + ) -> VolatilePtr<'_, [MaybeUninit], A> { + let ring_and_event_ptr = map_field!(volatile_self.ring_and_event); + ring_and_event_ptr.split_at(ring_and_event_ptr.len()).0 } - fn event_ref(&self) -> &MaybeUninit { - self.ring_and_event.last().expect(RING_AND_EVENT_ERROR) - } - - fn event_mut(&mut self) -> &MaybeUninit { - self.ring_and_event.last_mut().expect(RING_AND_EVENT_ERROR) + fn event_ptr( + volatile_self: VolatilePtr<'_, Self, A>, + ) -> VolatilePtr<'_, MaybeUninit, A> { + let ring_and_event_ptr = map_field!(volatile_self.ring_and_event); + ring_and_event_ptr.index(ring_and_event_ptr.len() - 1) } } @@ -96,21 +98,34 @@ type UsedRing = GenericRing<[u8]>; // Used ring is not supposed to be modified by the driver. Thus, we only have _ref methods (and not _mut methods). impl UsedRing { - fn ring_ref(&self) -> &[UsedElem] { - // The last two bytes belong to the event field - UsedElem::slice_from( - &self.ring_and_event[..(self.ring_and_event.len() - size_of::())], - ) - .expect(RING_AND_EVENT_ERROR) + fn ring_ptr( + volatile_self: VolatilePtr<'_, Self, A>, + ) -> VolatilePtr<'_, [UsedElem], A> { + let ring_and_event_ptr = map_field!(volatile_self.ring_and_event); + let ring_len = (ring_and_event_ptr.len() - size_of::()) / size_of::(); + + unsafe { + ring_and_event_ptr.map(|ring_and_event_ptr| { + NonNull::slice_from_raw_parts(ring_and_event_ptr.cast::(), ring_len) + }) + } } - fn event_ref(&self) -> &le16 { - le16::ref_from_suffix(&self.ring_and_event).expect(RING_AND_EVENT_ERROR) + fn event_ptr( + volatile_self: VolatilePtr<'_, Self, A>, + ) -> VolatilePtr<'_, le16, A> { + let ring_and_event_ptr = map_field!(volatile_self.ring_and_event); + let ring_and_event_len = ring_and_event_ptr.len(); + let event_bytes_ptr = ring_and_event_ptr + .split_at(ring_and_event_len - size_of::()) + .1; + + unsafe { event_bytes_ptr.map(|event_bytes| event_bytes.cast::()) } } } #[repr(C)] -#[derive(Copy, Clone, FromZeroes, FromBytes)] +#[derive(Copy, Clone)] struct UsedElem { id: le32, len: le32, @@ -118,10 +133,18 @@ struct UsedElem { struct DescrRing { read_idx: u16, - descr_table: Box<[MaybeUninit], DeviceAlloc>, - ref_ring: Box<[Option>]>, - avail_ring: Box, - used_ring: Box, + token_ring: Box<[Option>]>, + descr_table_ref: VolatileRef<'static, DescrTable>, + avail_ring_ref: VolatileRef<'static, AvailRing>, + used_ring_ref: VolatileRef<'static, UsedRing, ReadOnly>, + + // The following fields should not be used directly, as they do not ensure volatile access. + #[deprecated = "_pinned_* fields are only for keeping the objects around. All access should be done through the respective *_ref fields."] + _pinned_descr_table: Pin>, + #[deprecated = "_pinned_* fields are only for keeping the objects around. All access should be done through the respective *_ref fields."] + _pinned_avail_ring: Pin>, + #[deprecated = "_pinned_* fields are only for keeping the objects around. All access should be done through the respective *_ref fields."] + _pinned_used_ring: Pin>, } impl DescrRing { @@ -228,32 +251,37 @@ impl DescrRing { ) }; - self.descr_table[write_indx] = MaybeUninit::new(descriptor); + let descr_table = self.descr_table_ref.as_mut_ptr(); + map_field!(descr_table.descr_table) + .index(write_indx) + .write(MaybeUninit::new(descriptor)); desc_cnt += 1; len -= 1; } - self.ref_ring[index] = Some(Box::new(tkn)); - let idx = self.avail_ring.index.get(); - let len = self.avail_ring.ring_ref().len(); - self.avail_ring.ring_mut()[idx as usize % len] = MaybeUninit::new((index as u16).into()); + self.token_ring[index] = Some(Box::new(tkn)); + + let avail_ring = self.avail_ring_ref.as_mut_ptr(); + let idx = map_field!(avail_ring.index).read().get(); + let len = self.token_ring.len(); + AvailRing::ring_ptr(avail_ring) + .index(idx as usize % len) + .write(MaybeUninit::new((index as u16).into())); memory_barrier(); - self.avail_ring.index = (self.avail_ring.index.get().wrapping_add(1)).into(); + map_field!(avail_ring.index).update(|val| (val.get().wrapping_add(1)).into()); (0, 0) } fn poll(&mut self) { - while self.read_idx - != unsafe { ptr::read_volatile(ptr::addr_of!(self.used_ring.index)).get() } - { - let cur_ring_index = self.read_idx as usize % self.used_ring.ring_ref().len(); - let used_elem = - unsafe { ptr::read_volatile(&self.used_ring.ring_ref()[cur_ring_index]) }; - - let mut tkn = self.ref_ring[used_elem.id.get() as usize].take().expect( + let used_ring = self.used_ring_ref.as_ptr(); + while self.read_idx != map_field!(used_ring.index).read().get() { + let cur_ring_index = self.read_idx as usize % self.token_ring.len(); + let used_elem = UsedRing::ring_ptr(used_ring).index(cur_ring_index).read(); + + let mut tkn = self.token_ring[used_elem.id.get() as usize].take().expect( "The buff_id is incorrect or the reference to the TransferToken was misplaced.", ); @@ -276,15 +304,18 @@ impl DescrRing { } fn drv_enable_notif(&mut self) { - self.avail_ring.flags = 0.into(); + let avail_ring = self.avail_ring_ref.as_mut_ptr(); + map_field!(avail_ring.flags).write(0.into()); } fn drv_disable_notif(&mut self) { - self.avail_ring.flags = 1.into(); + let avail_ring = self.avail_ring_ref.as_mut_ptr(); + map_field!(avail_ring.flags).write(1.into()); } fn dev_is_notif(&self) -> bool { - self.used_ring.flags.get() & 1 == 0 + let used_ring = self.used_ring_ref.as_ptr(); + map_field!(used_ring.flags).read().get() & 1 == 0 } } @@ -374,10 +405,15 @@ impl Virtq for SplitVq { let size = vq_handler.set_vq_size(size.0); const ALLOCATOR: DeviceAlloc = DeviceAlloc; - // Allocate heap memory via a vec, leak and cast - let descr_table = Box::new_uninit_slice_in(size.into(), ALLOCATOR); + let mut descr_table = unsafe { + core::mem::transmute::< + Box<[MaybeUninit], DeviceAlloc>, + Box, + >(Box::new_uninit_slice_in(size.into(), ALLOCATOR)) + }; + let descr_table_ref = unsafe { VolatileRef::new(NonNull::from(descr_table.as_mut())) }; - let avail_ring = { + let mut avail_ring = { let ring_and_event_len = usize::from(size) + 1; let allocation = ALLOCATOR .allocate( @@ -396,6 +432,7 @@ impl Virtq for SplitVq { ) } }; + let avail_ring_ref = unsafe { VolatileRef::new(NonNull::from(avail_ring.as_mut())) }; let used_ring = { let ring_and_event_layout = Layout::array::(size.into()) @@ -422,10 +459,12 @@ impl Virtq for SplitVq { ) } }; + let used_ring_ref = + unsafe { VolatileRef::new_read_only(NonNull::from(used_ring.as_ref())) }; // Provide memory areas of the queues data structures to the device vq_handler.set_ring_addr(paging::virt_to_phys(VirtAddr::from( - descr_table.as_ptr().expose_provenance(), + ptr::from_mut(descr_table.as_mut()).expose_provenance(), ))); // As usize is safe here, as the *mut EventSuppr raw pointer is a thin pointer of size usize vq_handler.set_drv_ctrl_addr(paging::virt_to_phys(VirtAddr::from( @@ -435,15 +474,21 @@ impl Virtq for SplitVq { ptr::from_ref(used_ring.as_ref()).expose_provenance(), ))); + // We have to set the deprecated fields. + #[allow(deprecated)] let descr_ring = DescrRing { read_idx: 0, - ref_ring: core::iter::repeat_with(|| None) + token_ring: core::iter::repeat_with(|| None) .take(size.into()) .collect::>() .into_boxed_slice(), - descr_table, - avail_ring, - used_ring, + descr_table_ref, + avail_ring_ref, + used_ring_ref, + + _pinned_descr_table: Box::into_pin(descr_table), + _pinned_avail_ring: Box::into_pin(avail_ring), + _pinned_used_ring: Box::into_pin(used_ring), }; let notif_ctrl = NotifCtrl::new(ptr::with_exposed_provenance_mut(