From 8228d21000c611ff091eff841681b2cceb0a10b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=87a=C4=9Fatay=20Yi=C4=9Fit=20=C5=9Eahin?= Date: Thu, 23 Nov 2023 19:25:30 +0100 Subject: [PATCH] remove Pinned It seems like Pinned was more of a single bit reference counter than an actual Pin. This commit replaces it with Rc. --- src/drivers/net/virtio_net.rs | 1 - src/drivers/virtio/virtqueue/mod.rs | 196 ++----------------------- src/drivers/virtio/virtqueue/packed.rs | 145 ++++++------------ src/drivers/virtio/virtqueue/split.rs | 95 +++++------- src/lib.rs | 1 + 5 files changed, 95 insertions(+), 343 deletions(-) diff --git a/src/drivers/net/virtio_net.rs b/src/drivers/net/virtio_net.rs index 790cc32114..ccd1bd47a1 100644 --- a/src/drivers/net/virtio_net.rs +++ b/src/drivers/net/virtio_net.rs @@ -181,7 +181,6 @@ impl RxQueues { Ok(transfer) } else { warn!("Unfinished transfer in post processing. Returning buffer to queue. This will need explicit cleanup."); - transfer.close(); Err(VirtioNetError::ProcessOngoing) } } diff --git a/src/drivers/virtio/virtqueue/mod.rs b/src/drivers/virtio/virtqueue/mod.rs index c234b50cd5..0e37e4dfe8 100644 --- a/src/drivers/virtio/virtqueue/mod.rs +++ b/src/drivers/virtio/virtqueue/mod.rs @@ -211,18 +211,6 @@ impl Virtq { } } - /// Does maintenance of the queue. This involces currently only, checking if early dropped transfers - /// have been finished and removes them and frees their ID's and memory areas. - /// - /// This function is especially useful if ones memory pool is empty and one uses early drop of transfers - /// in order to fire-and-forget. - pub fn clean_up(&self) { - match self { - Virtq::Packed(vq) => vq.clean_up(), - Virtq::Split(vq) => vq.clean_up(), - } - } - /// Dispatches a batch of TransferTokens. The actual behaviour depends on the respective /// virtqueue implementation. Please see the respective docs for details. /// @@ -508,16 +496,6 @@ impl Virtq { Virtq::Split(vq) => vq.prep_buffer(rc_self, send, recv), } } - - /// Early drop provides a mechanism for the queue to detect, if an ongoing transfer or a transfer not yet polled by the driver - /// has been dropped. The queue implementation is responsible for taking care what should happen to the respective TransferToken - /// and BufferToken. - fn early_drop(&self, transfer_tk: Pinned) { - match self { - Virtq::Packed(vq) => vq.early_drop(transfer_tk), - Virtq::Split(vq) => vq.early_drop(transfer_tk), - } - } } /// The trait needs to be implemented on structures which are to be used via the `prep_transfer()` function of virtqueues and for @@ -588,19 +566,7 @@ pub trait AsSliceU8 { pub struct Transfer { /// Needs to be `Option>` in order to prevent deallocation via None // See custom drop function for clarity - transfer_tkn: Option>, -} - -impl Drop for Transfer { - /// When an unclosed transfer is dropped. The [Pinned]<[TransferToken]> is returned to the respective - /// virtqueue, who is responsible of handling these situations. - fn drop(&mut self) { - if let Some(tkn) = self.transfer_tkn.take() { - // Unwrapping is okay here, as TransferToken MUST hold a BufferToken - let vq_ref = Rc::clone(&tkn.buff_tkn.as_ref().unwrap().vq); - vq_ref.early_drop(tkn) - } - } + transfer_tkn: Option>, } // Public Interface of Transfer @@ -704,13 +670,12 @@ impl Transfer { send_buff, recv_buff, .. - } = self - .transfer_tkn - .as_mut() - .unwrap() - .buff_tkn - .as_mut() - .unwrap(); + } = unsafe { + Rc::get_mut_unchecked(self.transfer_tkn.as_mut().unwrap()) + .buff_tkn + .as_mut() + .unwrap() + }; (send_buff.as_mut(), recv_buff.as_mut()) }; @@ -870,7 +835,8 @@ impl Transfer { match state { TransferState::Finished => { // Desctructure Token - let mut transfer_tkn = self.transfer_tkn.take().unwrap().unpin(); + let mut transfer_tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) + .expect("There should not have been other references to the token."); let mut buffer_tkn = transfer_tkn.buff_tkn.take().unwrap(); @@ -919,23 +885,6 @@ impl Transfer { } } - /// Closes an transfer. If the transfer was ongoing the respective transfer token will be returned to the virtqueue. - /// If it was finished the resources will be cleaned up. - pub fn close(mut self) { - match self.transfer_tkn.as_ref().unwrap().state { - TransferState::Processing => { - // Unwrapping is okay here, as TransferToken must hold a BufferToken - let vq = Rc::clone(&self.transfer_tkn.as_ref().unwrap().get_vq()); - let transfer_tkn = self.transfer_tkn.take().unwrap(); - vq.early_drop(transfer_tkn); - } - TransferState::Ready => { - unreachable!("Transfers MUST have tokens of states Processing or Finished.") - } - TransferState::Finished => (), // Do nothing and free everything. - } - } - /// If the transfer was finished returns the BufferToken inside the transfer else returns an error. /// /// **WARN:** @@ -955,11 +904,8 @@ impl Transfer { .unwrap() .reusable { - let tkn = self - .transfer_tkn - .take() - .unwrap() - .unpin() + let tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) + .expect("There should not have been other references to the token") .buff_tkn .take() .unwrap(); @@ -995,11 +941,8 @@ impl Transfer { .unwrap() .reusable { - let tkn = self - .transfer_tkn - .take() - .unwrap() - .unpin() + let tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) + .expect("There should not have been other references to the token") .buff_tkn .take() .unwrap(); @@ -1058,15 +1001,7 @@ impl TransferToken { pub fn dispatch_await(mut self, await_queue: Rc>>, notif: bool) { self.await_queue = Some(Rc::clone(&await_queue)); - // Prevent TransferToken from being dropped - // I.e. do NOT run the custom constructor which will - // deallocate memory. - self.get_vq() - .dispatch(self, notif) - .transfer_tkn - .take() - .unwrap() - .into_raw(); + self.get_vq().dispatch(self, notif); } /// Dispatches the provided TransferToken to the respective queue and returns a transfer. @@ -2373,109 +2308,6 @@ pub enum BuffSpec<'a> { Indirect(&'a [Bytes]), } -/// Ensures `T` is pinned at the same memory location. -/// This allows to refer to structures via raw pointers. -/// -/// **WARN:** -/// -/// Assuming a raw pointer `*mut T / *const T` is valid, is only safe as long as -/// the `Pinned` does life! -/// -/// **Properties:** -/// -/// * `Pinned` behaves like T and implements `Deref`. -/// * Drops `T` upon drop. -pub struct Pinned { - raw_ptr: *mut T, - _drop_inner: bool, -} - -impl Pinned { - /// Turns a `Pinned` into a *mut T. Memory will remain valid. - fn into_raw(mut self) -> *mut T { - self._drop_inner = false; - self.raw_ptr - } - - /// Creates a new pinned `T` by boxing and leaking it. - /// Be aware that this will result in a new heap allocation - /// for `T` to be boxed. - fn pin(val: T) -> Pinned { - let boxed = Box::new(val); - Pinned { - raw_ptr: Box::into_raw(boxed), - _drop_inner: true, - } - } - - /// Creates a new pinned `T` from a boxed `T`. - fn from_boxed(boxed: Box) -> Pinned { - Pinned { - raw_ptr: Box::into_raw(boxed), - _drop_inner: true, - } - } - - /// Create a new pinned `T` from a `*mut T` - fn from_raw(raw_ptr: *mut T) -> Pinned { - Pinned { - raw_ptr, - _drop_inner: true, - } - } - - /// Unpins the pinned value and returns it. This is only - /// save as long as no one relies on the - /// memory location of `T`, as this location - /// will no longer be constant. - fn unpin(mut self) -> T { - self._drop_inner = false; - - unsafe { *Box::from_raw(self.raw_ptr) } - } - - /// Returns a pointer to `T`. The pointer - /// can be assumed to be constant over the lifetime of - /// `Pinned`. - fn raw_addr(&self) -> *mut T { - self.raw_ptr - } -} - -impl Deref for Pinned { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*(self.raw_ptr) } - } -} - -impl DerefMut for Pinned { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self.raw_ptr) } - } -} - -impl Drop for Pinned { - fn drop(&mut self) { - if self._drop_inner { - unsafe { - drop(Box::from_raw(self.raw_ptr)); - } - } - } -} - -//impl Deref for Pinned> { -// type Target = [K]; -// fn deref(&self) -> &Self::Target { -// let vec = unsafe { -// & *(self.raw_ptr) -// }; -// -// vec.as_slice() -// } -//} - /// Virtqueue descr flags as defined in the specification. /// /// See Virtio specification v1.1. - 2.6.5 diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index 97e033b5f0..ac7ef73705 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -20,8 +20,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Pinned, - Transfer, TransferState, TransferToken, Virtq, VqIndex, VqSize, + AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, + TransferState, TransferToken, Virtq, VqIndex, VqSize, }; use crate::arch::mm::paging::{BasePageSize, PageSize}; use crate::arch::mm::{paging, VirtAddr}; @@ -88,7 +88,7 @@ impl WrapCount { struct DescriptorRing { ring: &'static mut [Descriptor], //ring: Pinned>, - tkn_ref_ring: Box<[*mut TransferToken]>, + tkn_ref_ring: Box<[Option>]>, // Controlling variables for the ring // @@ -117,7 +117,7 @@ impl DescriptorRing { // Descriptor ID's run from 1 to size_of_queue. In order to index directly into the // reference ring via an ID it is much easier to simply have an array of size = size_of_queue + 1 // and do not care about the first element being unused. - let tkn_ref_ring = vec![ptr::null_mut(); size + 1].into_boxed_slice(); + let tkn_ref_ring = vec![None; size + 1].into_boxed_slice(); DescriptorRing { ring, @@ -135,33 +135,27 @@ impl DescriptorRing { fn poll(&mut self) { let mut ctrl = self.get_read_ctrler(); - if let Some(tkn) = ctrl.poll_next() { + if let Some(mut tkn) = ctrl.poll_next() { // The state of the TransferToken up to this point MUST NOT be // finished. As soon as we mark the token as finished, we can not // be sure, that the token is not dropped, which would making // the dereferencing operation undefined behaviour as also // all operations on the reference. - let tkn = unsafe { &mut *(tkn) }; - - match tkn.await_queue { - Some(_) => { - tkn.state = TransferState::Finished; - let queue = tkn.await_queue.take().unwrap(); + unsafe { + let tkn_ref = Rc::get_mut_unchecked(&mut tkn); + tkn_ref.state = TransferState::Finished; + if let Some(queue) = tkn_ref.await_queue.take() { // Turn the raw pointer into a Pinned again, which will hold ownership of the Token queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(Pinned::from_raw(ptr::from_mut(tkn))), + transfer_tkn: Some(tkn), }); } - None => tkn.state = TransferState::Finished, } } } - fn push_batch( - &mut self, - tkn_lst: Vec, - ) -> (Vec>, usize, u8) { + fn push_batch(&mut self, tkn_lst: Vec) -> (Vec>, usize, u8) { // Catch empty push, in order to allow zero initialized first_ctrl_settings struct // which will be overwritten in the first iteration of the for-loop assert!(!tkn_lst.is_empty()); @@ -169,13 +163,10 @@ impl DescriptorRing { let mut first_ctrl_settings: (usize, u16, WrapCount) = (0, 0, WrapCount::new()); let mut pind_lst = Vec::with_capacity(tkn_lst.len()); - for (i, tkn) in tkn_lst.into_iter().enumerate() { - // fix memory address of token - let mut pinned = Pinned::pin(tkn); - + for (i, mut tkn) in tkn_lst.into_iter().enumerate() { // Check length and if its fits. This should always be true due to the restriction of // the memory pool, but to be sure. - assert!(pinned.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); + assert!(tkn.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); // create an counter that wrappes to the first element // after reaching a the end of the ring @@ -190,8 +181,8 @@ impl DescriptorRing { // * write descriptors in the correct order // * make them available in the right order (reversed order or i.e. lastly where device polls) match ( - &pinned.buff_tkn.as_ref().unwrap().send_buff, - &pinned.buff_tkn.as_ref().unwrap().recv_buff, + &tkn.buff_tkn.as_ref().unwrap().send_buff, + &tkn.buff_tkn.as_ref().unwrap().recv_buff, ) { (Some(send_buff), Some(recv_buff)) => { // It is important to differentiate between indirect and direct descriptors here and if @@ -282,22 +273,25 @@ impl DescriptorRing { (None, None) => unreachable!("Empty Transfers are not allowed!"), // This should already be caught at creation of BufferToken } + // Update the state of the actual Token + tkn.state = TransferState::Processing; + + let tkn = Rc::new(tkn); + if i == 0 { first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.wrap_at_init); } else { // Update flags of the first descriptor and set new write_index - ctrl.make_avail(pinned.raw_addr()); + ctrl.make_avail(tkn.clone()); } - // Update the state of the actual Token - pinned.state = TransferState::Processing; - pind_lst.push(pinned); + pind_lst.push(tkn.clone()); } // Manually make the first buffer available lastly // // Providing the first buffer in the list manually // provide reference, in order to let TransferToken now upon finish. - self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = pind_lst[0].raw_addr(); + self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = Some(pind_lst[0].clone()); // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 fence(Ordering::SeqCst); @@ -311,13 +305,10 @@ impl DescriptorRing { ) } - fn push(&mut self, tkn: TransferToken) -> (Pinned, usize, u8) { - // fix memory address of token - let mut pinned = Pinned::pin(tkn); - + fn push(&mut self, mut tkn: TransferToken) -> (Rc, usize, u8) { // Check length and if its fits. This should always be true due to the restriction of // the memory pool, but to be sure. - assert!(pinned.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); + assert!(tkn.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); // create an counter that wrappes to the first element // after reaching a the end of the ring @@ -332,8 +323,8 @@ impl DescriptorRing { // * write descriptors in the correct order // * make them available in the right order (reversed order or i.e. lastly where device polls) match ( - &pinned.buff_tkn.as_ref().unwrap().send_buff, - &pinned.buff_tkn.as_ref().unwrap().recv_buff, + &tkn.buff_tkn.as_ref().unwrap().send_buff, + &tkn.buff_tkn.as_ref().unwrap().recv_buff, ) { (Some(send_buff), Some(recv_buff)) => { // It is important to differentiate between indirect and direct descriptors here and if @@ -418,15 +409,19 @@ impl DescriptorRing { } fence(Ordering::SeqCst); - // Update flags of the first descriptor and set new write_index - ctrl.make_avail(pinned.raw_addr()); + // Update the state of the actual Token + tkn.state = TransferState::Processing; + fence(Ordering::SeqCst); + let tkn = Rc::new(tkn); - // Update the state of the actual Token - pinned.state = TransferState::Processing; + fence(Ordering::SeqCst); + // Update flags of the first descriptor and set new write_index + ctrl.make_avail(tkn.clone()); + fence(Ordering::SeqCst); // Converting a boolean as u8 is fine - (pinned, ctrl.start, ctrl.wrap_at_init.0 as u8) + (tkn, ctrl.start, ctrl.wrap_at_init.0 as u8) } /// # Unsafe @@ -472,26 +467,22 @@ struct ReadCtrl<'a> { impl<'a> ReadCtrl<'a> { /// Polls the ring for a new finished buffer. If buffer is marked as used, takes care of /// updating the queue and returns the respective TransferToken. - fn poll_next(&mut self) -> Option<*mut TransferToken> { + fn poll_next(&mut self) -> Option> { // Check if descriptor has been marked used. if self.desc_ring.ring[self.position].flags & WrapCount::flag_mask() == self.desc_ring.dev_wc.as_flags_used() { - let tkn = unsafe { - let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id); - let raw_tkn = self.desc_ring.tkn_ref_ring[buff_id]; - // unset the reference in the reference ring for security! - self.desc_ring.tkn_ref_ring[buff_id] = ptr::null_mut(); - assert!(!raw_tkn.is_null()); - &mut *raw_tkn - }; + let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id); + let mut tkn = self.desc_ring.tkn_ref_ring[buff_id] + .take() + .expect("TransferToken at position does not exist"); let (send_buff, recv_buff) = { let BufferToken { send_buff, recv_buff, .. - } = tkn.buff_tkn.as_mut().unwrap(); + } = unsafe { Rc::get_mut_unchecked(&mut tkn).buff_tkn.as_mut().unwrap() }; (recv_buff.as_mut(), send_buff.as_mut()) }; @@ -542,7 +533,7 @@ impl<'a> ReadCtrl<'a> { (None, None) => unreachable!("Empty Transfers are not allowed..."), } - Some(ptr::from_mut(tkn)) + Some(tkn) } else { None } @@ -798,14 +789,14 @@ impl<'a> WriteCtrl<'a> { } } - fn make_avail(&mut self, raw_tkn: *mut TransferToken) { + fn make_avail(&mut self, raw_tkn: Rc) { // We fail if one wants to make a buffer available without inserting one element! assert!(self.start != self.position); // We also fail if buff_id is not set! assert!(self.buff_id != 0); // provide reference, in order to let TransferToken now upon finish. - self.desc_ring.tkn_ref_ring[usize::from(self.buff_id)] = raw_tkn; + self.desc_ring.tkn_ref_ring[usize::from(self.buff_id)] = Some(raw_tkn); // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 fence(Ordering::SeqCst); @@ -997,10 +988,6 @@ pub struct PackedVq { /// The virtqueues index. This identifies the virtqueue to the /// device and is unique on a per device basis. index: VqIndex, - /// Holds all early dropped `TransferToken` - /// If `TransferToken.state == TransferState::Finished` - /// the Token can be safely dropped. - dropped: RefCell>>, } // Public interface of PackedVq @@ -1018,17 +1005,6 @@ impl PackedVq { self.drv_event.borrow_mut().disable_notif(); } - /// This function does check if early dropped TransferTokens are finished - /// and removes them if this is the case. - pub fn clean_up(&self) { - // remove and drop all finished Transfers - if !self.dropped.borrow().is_empty() { - self.dropped - .borrow_mut() - .retain(|tkn| tkn.state != TransferState::Finished); - } - } - /// See `Virtq.poll()` documentation pub fn poll(&self) { self.descr_ring.borrow_mut().poll(); @@ -1110,7 +1086,7 @@ impl PackedVq { tkn.await_queue = Some(Rc::clone(&await_queue)); } - let (pin_tkn_lst, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); + let (_, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); if notif { self.drv_event @@ -1137,13 +1113,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - for pinned in pin_tkn_lst { - // Prevent TransferToken from being dropped - // I.e. do NOT run the custom constructor which will - // deallocate memory. - pinned.into_raw(); - } } /// See `Virtq.prep_transfer()` documentation. @@ -1185,24 +1154,6 @@ impl PackedVq { } } - /// The packed virtqueue handles early dropped transfers by moving the respective tokens into - /// an vector. Here they will remain until they are finished. In order to ensure this the queue - /// will check these descriptors from time to time during its poll function. - /// - /// Also see `Virtq.early_drop()` documentation. - pub fn early_drop(&self, tkn: Pinned) { - match tkn.state { - TransferState::Finished => (), // Drop the pinned token -> Dealloc everything - TransferState::Ready => { - unreachable!("Early dropped transfers are not allowed to be state == Ready") - } - TransferState::Processing => { - // Keep token until state is finished. This needs to be checked/cleaned up later - self.dropped.borrow_mut().push(tkn); - } - } - } - /// See `Virtq.index()` documentation pub fn index(&self) -> VqIndex { self.index @@ -1294,9 +1245,6 @@ impl PackedVq { // Initialize new memory pool. let mem_pool = Rc::new(MemPool::new(vq_size)); - // Initialize an empty vector for future dropped transfers - let dropped: RefCell>> = RefCell::new(Vec::new()); - vq_handler.enable_queue(); info!("Created PackedVq: idx={}, size={}", index.0, vq_size); @@ -1309,7 +1257,6 @@ impl PackedVq { mem_pool, size: VqSize::from(vq_size), index, - dropped, }) } diff --git a/src/drivers/virtio/virtqueue/split.rs b/src/drivers/virtio/virtqueue/split.rs index a92c551543..f1758f8e89 100644 --- a/src/drivers/virtio/virtqueue/split.rs +++ b/src/drivers/virtio/virtqueue/split.rs @@ -17,8 +17,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Pinned, - Transfer, TransferState, TransferToken, Virtq, VqIndex, VqSize, + AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, + TransferState, TransferToken, Virtq, VqIndex, VqSize, }; use crate::arch::memory_barrier; use crate::arch::mm::paging::{BasePageSize, PageSize}; @@ -72,19 +72,19 @@ struct UsedElem { struct DescrRing { read_idx: u16, descr_table: DescrTable, - ref_ring: Box<[*mut TransferToken]>, + ref_ring: Box<[Option>]>, avail_ring: AvailRing, used_ring: UsedRing, } impl DescrRing { - fn push(&mut self, tkn: TransferToken) -> (Pinned, u16, u16) { - let pin = Pinned::pin(tkn); + fn push(&mut self, tkn: TransferToken) -> (Rc, u16, u16) { + let tkn = Rc::new(tkn); let mut desc_lst = Vec::new(); let mut is_indirect = false; - if let Some(buff) = pin.buff_tkn.as_ref().unwrap().send_buff.as_ref() { + if let Some(buff) = tkn.buff_tkn.as_ref().unwrap().send_buff.as_ref() { if buff.is_indirect() { desc_lst.push((buff.get_ctrl_desc().unwrap(), false)); is_indirect = true; @@ -95,7 +95,7 @@ impl DescrRing { } } - if let Some(buff) = pin.buff_tkn.as_ref().unwrap().recv_buff.as_ref() { + if let Some(buff) = tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref() { if buff.is_indirect() { if desc_lst.is_empty() { desc_lst.push((buff.get_ctrl_desc().unwrap(), true)); @@ -114,7 +114,7 @@ impl DescrRing { } } - let mut len = pin.buff_tkn.as_ref().unwrap().num_consuming_descr(); + let mut len = tkn.buff_tkn.as_ref().unwrap().num_consuming_descr(); assert!(!desc_lst.is_empty()); // Minus 1, comes from the fact that ids run from one to 255 and not from 0 to 254 for u8::MAX sized pool @@ -189,14 +189,14 @@ impl DescrRing { len -= 1; } - self.ref_ring[index] = pin.raw_addr(); + self.ref_ring[index] = Some(tkn.clone()); self.avail_ring.ring[*self.avail_ring.index as usize % self.avail_ring.ring.len()] = index as u16; memory_barrier(); *self.avail_ring.index = self.avail_ring.index.wrapping_add(1); - (pin, 0, 0) + (tkn, 0, 0) } fn poll(&mut self) { @@ -204,26 +204,33 @@ impl DescrRing { let cur_ring_index = self.read_idx as usize % self.used_ring.ring.len(); let used_elem = unsafe { ptr::read_volatile(&self.used_ring.ring[cur_ring_index]) }; - let tkn = unsafe { &mut *(self.ref_ring[used_elem.id as usize]) }; + let mut tkn = self.ref_ring[used_elem.id as usize] + .take() + .expect("Invalid TransferToken reference in reference ring"); - if tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref().is_some() { - tkn.buff_tkn - .as_mut() + unsafe { + let tkn_ref = Rc::get_mut_unchecked(&mut tkn); + if tkn_ref + .buff_tkn + .as_ref() .unwrap() - .restr_size(None, Some(used_elem.len as usize)) - .unwrap(); - } - match tkn.await_queue { - Some(_) => { - tkn.state = TransferState::Finished; - let queue = tkn.await_queue.take().unwrap(); - - // Turn the raw pointer into a Pinned again, which will hold ownership of the Token + .recv_buff + .as_ref() + .is_some() + { + tkn_ref + .buff_tkn + .as_mut() + .unwrap() + .restr_size(None, Some(used_elem.len as usize)) + .unwrap(); + } + tkn_ref.state = TransferState::Finished; + if let Some(queue) = tkn_ref.await_queue.take() { queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(Pinned::from_raw(ptr::from_mut(tkn))), - }); + transfer_tkn: Some(tkn), + }) } - None => tkn.state = TransferState::Finished, } memory_barrier(); self.read_idx = self.read_idx.wrapping_add(1); @@ -248,7 +255,6 @@ pub struct SplitVq { ring: RefCell, mem_pool: Rc, size: VqSize, - dropped: RefCell>>, index: VqIndex, notif_ctrl: NotifCtrl, @@ -265,17 +271,6 @@ impl SplitVq { self.ring.borrow_mut().drv_disable_notif(); } - /// This function does check if early dropped TransferTokens are finished - /// and removes them if this is the case. - pub fn clean_up(&self) { - // remove and drop all finished Transfers - if !self.dropped.borrow().is_empty() { - self.dropped - .borrow_mut() - .retain(|tkn| tkn.state != TransferState::Finished); - } - } - /// See `Virtq.poll()` documentation pub fn poll(&self) { self.ring.borrow_mut().poll() @@ -352,24 +347,6 @@ impl SplitVq { } } - /// The packed virtqueue handles early dropped transfers by moving the respective tokens into - /// a vector. Here they will remain until they are finished. In order to ensure this the queue - /// will check these descriptors from time to time during its poll function. - /// - /// Also see `Virtq.early_drop()` documentation - pub fn early_drop(&self, tkn: Pinned) { - match tkn.state { - TransferState::Finished => (), // Drop the pinned token -> Dealloc everything - TransferState::Ready => { - unreachable!("Early dropped transfers are not allowed to be state == Ready") - } - TransferState::Processing => { - // Keep token until state is finished. This needs to be checked/cleaned up later. - self.dropped.borrow_mut().push(tkn); - } - } - } - /// See `Virtq.index()` documentation pub fn index(&self) -> VqIndex { self.index @@ -451,7 +428,7 @@ impl SplitVq { let descr_ring = DescrRing { read_idx: 0, - ref_ring: vec![ptr::null_mut(); size as usize].into_boxed_slice(), + ref_ring: vec![None; size as usize].into_boxed_slice(), descr_table, avail_ring, used_ring, @@ -466,9 +443,6 @@ impl SplitVq { // Initialize new memory pool. let mem_pool = Rc::new(MemPool::new(size)); - // Initialize an empty vector for future dropped transfers - let dropped: RefCell>> = RefCell::new(Vec::new()); - vq_handler.enable_queue(); info!("Created SplitVq: idx={}, size={}", index.0, size); @@ -479,7 +453,6 @@ impl SplitVq { mem_pool, size: VqSize(size), index, - dropped, }) } diff --git a/src/lib.rs b/src/lib.rs index 0c0a0d52f7..f7669d754d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,6 +16,7 @@ #![feature(allocator_api)] #![feature(asm_const)] #![feature(exposed_provenance)] +#![feature(get_mut_unchecked)] #![feature(linked_list_cursors)] #![feature(maybe_uninit_slice)] #![feature(naked_functions)]