diff --git a/src/drivers/net/virtio_net.rs b/src/drivers/net/virtio_net.rs index 790cc32114..ccd1bd47a1 100644 --- a/src/drivers/net/virtio_net.rs +++ b/src/drivers/net/virtio_net.rs @@ -181,7 +181,6 @@ impl RxQueues { Ok(transfer) } else { warn!("Unfinished transfer in post processing. Returning buffer to queue. This will need explicit cleanup."); - transfer.close(); Err(VirtioNetError::ProcessOngoing) } } diff --git a/src/drivers/virtio/virtqueue/mod.rs b/src/drivers/virtio/virtqueue/mod.rs index 947cd084c8..633f773bb5 100644 --- a/src/drivers/virtio/virtqueue/mod.rs +++ b/src/drivers/virtio/virtqueue/mod.rs @@ -127,11 +127,11 @@ impl Virtq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { + fn dispatch(&self, tkn: TransferToken, notif: bool) { match self { Virtq::Packed(vq) => vq.dispatch(tkn, notif), Virtq::Split(vq) => vq.dispatch(tkn, notif), - } + }; } } @@ -211,18 +211,6 @@ impl Virtq { } } - /// Does maintenance of the queue. This involces currently only, checking if early dropped transfers - /// have been finished and removes them and frees their ID's and memory areas. - /// - /// This function is especially useful if ones memory pool is empty and one uses early drop of transfers - /// in order to fire-and-forget. - pub fn clean_up(&self) { - match self { - Virtq::Packed(vq) => vq.clean_up(), - Virtq::Split(vq) => vq.clean_up(), - } - } - /// Dispatches a batch of TransferTokens. The actual behaviour depends on the respective /// virtqueue implementation. Please see the respective docs for details. /// @@ -237,7 +225,7 @@ impl Virtq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(tkns: Vec, notif: bool) -> Vec { + pub fn dispatch_batch(tkns: Vec, notif: bool) { let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); // Sort the TransferTokens depending in the queue their coming from. @@ -267,17 +255,12 @@ impl Virtq { } } - let mut transfer_lst = Vec::new(); for (vq_ref, tkn_lst) in used_vqs { match vq_ref.as_ref() { - Virtq::Packed(vq) => { - transfer_lst.append(vq.dispatch_batch(tkn_lst, notif).as_mut()) - } - Virtq::Split(vq) => transfer_lst.append(vq.dispatch_batch(tkn_lst, notif).as_mut()), + Virtq::Packed(vq) => vq.dispatch_batch(tkn_lst, notif), + Virtq::Split(vq) => vq.dispatch_batch(tkn_lst, notif), } } - - transfer_lst } /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` @@ -508,16 +491,6 @@ impl Virtq { Virtq::Split(vq) => vq.prep_buffer(rc_self, send, recv), } } - - /// Early drop provides a mechanism for the queue to detect, if an ongoing transfer or a transfer not yet polled by the driver - /// has been dropped. The queue implementation is responsible for taking care what should happen to the respective TransferToken - /// and BufferToken. - fn early_drop(&self, transfer_tk: Pinned) { - match self { - Virtq::Packed(vq) => vq.early_drop(transfer_tk), - Virtq::Split(vq) => vq.early_drop(transfer_tk), - } - } } /// The trait needs to be implemented on structures which are to be used via the `prep_transfer()` function of virtqueues and for @@ -562,8 +535,8 @@ pub trait AsSliceU8 { } } -/// The [Transfer] will be received when a [TransferToken] is dispatched via `TransferToken.dispatch()` or -/// via `TransferToken.dispatch_blocking()`. +/// The [Transfer] will be received when a [TransferToken] is dispatched via [TransferToken::dispatch_blocking] or through the await +/// queue given to [TransferToken::dispatch_await]. /// /// The struct represents an ongoing transfer or an active transfer. While this does NOT mean, that the transfer is at all times inside /// actual virtqueue. The Transfers state can be polled via `Transfer.poll()`, which returns a bool if the transfer is finished. @@ -571,36 +544,10 @@ pub trait AsSliceU8 { /// **Finished Transfers:** /// * Finished transfers are able to return their send and receive buffers. Either as a copy via `Transfer.ret_cpy()` or as the actual /// buffers via `Transfer.ret()`. -/// * Finished transfers should be closed via `Transfer.close()` or via `Transfer.ret()`. /// * Finished transfers can be reused via `Transfer.reuse()`. -/// * This closes the transfer -/// * And returns an normal BufferToken (One should be cautious with reusing transfers where buffers were created from raw pointers) -/// -/// **Early dropped Transfers:** -/// -/// If a transfer is dropped without being closed (independent of being finished or ongoing), the transfer will return the respective -/// `Pinned` to the handling virtqueue, which will take of handling graceful shutdown. Which generally should take -/// care of waiting till the device handled the respective transfer and free the memory afterwards. -/// -/// One could "abuse" this procedure in order to realize a "fire-and-forget" transfer. -/// A warning here: The respective queue implementation is taking care of handling this case and there are no guarantees that the queue -/// won't be unusable afterwards. +/// * This returns an normal BufferToken (One should be cautious with reusing transfers where buffers were created from raw pointers) pub struct Transfer { - /// Needs to be `Option>` in order to prevent deallocation via None - // See custom drop function for clarity - transfer_tkn: Option>, -} - -impl Drop for Transfer { - /// When an unclosed transfer is dropped. The [Pinned]<[TransferToken]> is returned to the respective - /// virtqueue, who is responsible of handling these situations. - fn drop(&mut self) { - if let Some(tkn) = self.transfer_tkn.take() { - // Unwrapping is okay here, as TransferToken MUST hold a BufferToken - let vq_ref = Rc::clone(&tkn.buff_tkn.as_ref().unwrap().vq); - vq_ref.early_drop(tkn) - } - } + transfer_tkn: Option>, } // Public Interface of Transfer @@ -870,7 +817,7 @@ impl Transfer { match state { TransferState::Finished => { // Desctructure Token - let mut transfer_tkn = self.transfer_tkn.take().unwrap().unpin(); + let mut transfer_tkn = self.transfer_tkn.take().unwrap(); let mut buffer_tkn = transfer_tkn.buff_tkn.take().unwrap(); @@ -919,23 +866,6 @@ impl Transfer { } } - /// Closes an transfer. If the transfer was ongoing the respective transfer token will be returned to the virtqueue. - /// If it was finished the resources will be cleaned up. - pub fn close(mut self) { - match self.transfer_tkn.as_ref().unwrap().state { - TransferState::Processing => { - // Unwrapping is okay here, as TransferToken must hold a BufferToken - let vq = Rc::clone(&self.transfer_tkn.as_ref().unwrap().get_vq()); - let transfer_tkn = self.transfer_tkn.take().unwrap(); - vq.early_drop(transfer_tkn); - } - TransferState::Ready => { - unreachable!("Transfers MUST have tokens of states Processing or Finished.") - } - TransferState::Finished => (), // Do nothing and free everything. - } - } - /// If the transfer was finished returns the BufferToken inside the transfer else returns an error. /// /// **WARN:** @@ -955,14 +885,7 @@ impl Transfer { .unwrap() .reusable { - let tkn = self - .transfer_tkn - .take() - .unwrap() - .unpin() - .buff_tkn - .take() - .unwrap(); + let tkn = self.transfer_tkn.take().unwrap().buff_tkn.take().unwrap(); Ok(tkn.reset()) } else { @@ -995,14 +918,7 @@ impl Transfer { .unwrap() .reusable { - let tkn = self - .transfer_tkn - .take() - .unwrap() - .unpin() - .buff_tkn - .take() - .unwrap(); + let tkn = self.transfer_tkn.take().unwrap().buff_tkn.take().unwrap(); Ok(tkn.reset_purge()) } else { @@ -1058,24 +974,16 @@ impl TransferToken { pub fn dispatch_await(mut self, await_queue: Rc>>, notif: bool) { self.await_queue = Some(Rc::clone(&await_queue)); - // Prevent TransferToken from being dropped - // I.e. do NOT run the custom constructor which will - // deallocate memory. - self.get_vq() - .dispatch(self, notif) - .transfer_tkn - .take() - .unwrap() - .into_raw(); + self.get_vq().dispatch(self, notif); } - /// Dispatches the provided TransferToken to the respective queue and returns a transfer. + /// Dispatches the provided TransferToken to the respective queue. /// /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(self, notif: bool) -> Transfer { - self.get_vq().dispatch(self, notif) + pub fn dispatch(self, notif: bool) { + self.get_vq().dispatch(self, notif); } /// Dispatches the provided TransferToken to the respectuve queue and does @@ -1090,18 +998,20 @@ impl TransferToken { /// Upon finish notifications are enabled again. pub fn dispatch_blocking(self) -> Result { let vq = self.get_vq(); - let transfer = self.get_vq().dispatch(self, false); + let rcv_queue = Rc::new(RefCell::new(VecDeque::with_capacity(1))); + self.dispatch_await(rcv_queue.clone(), false); vq.disable_notifs(); - while transfer.transfer_tkn.as_ref().unwrap().state != TransferState::Finished { - // Keep Spinning until the state changes to Finished + while rcv_queue.borrow().is_empty() { + // Keep Spinning until the receive queue is filled vq.poll() } vq.enable_notifs(); - Ok(transfer) + let result = Ok(rcv_queue.borrow_mut().pop_front().unwrap()); + result } } @@ -2373,109 +2283,6 @@ pub enum BuffSpec<'a> { Indirect(&'a [Bytes]), } -/// Ensures `T` is pinned at the same memory location. -/// This allows to refer to structures via raw pointers. -/// -/// **WARN:** -/// -/// Assuming a raw pointer `*mut T / *const T` is valid, is only safe as long as -/// the `Pinned` does life! -/// -/// **Properties:** -/// -/// * `Pinned` behaves like T and implements `Deref`. -/// * Drops `T` upon drop. -pub struct Pinned { - raw_ptr: *mut T, - _drop_inner: bool, -} - -impl Pinned { - /// Turns a `Pinned` into a *mut T. Memory will remain valid. - fn into_raw(mut self) -> *mut T { - self._drop_inner = false; - self.raw_ptr - } - - /// Creates a new pinned `T` by boxing and leaking it. - /// Be aware that this will result in a new heap allocation - /// for `T` to be boxed. - fn pin(val: T) -> Pinned { - let boxed = Box::new(val); - Pinned { - raw_ptr: Box::into_raw(boxed), - _drop_inner: true, - } - } - - /// Creates a new pinned `T` from a boxed `T`. - fn from_boxed(boxed: Box) -> Pinned { - Pinned { - raw_ptr: Box::into_raw(boxed), - _drop_inner: true, - } - } - - /// Create a new pinned `T` from a `*mut T` - fn from_raw(raw_ptr: *mut T) -> Pinned { - Pinned { - raw_ptr, - _drop_inner: true, - } - } - - /// Unpins the pinned value and returns it. This is only - /// save as long as no one relies on the - /// memory location of `T`, as this location - /// will no longer be constant. - fn unpin(mut self) -> T { - self._drop_inner = false; - - unsafe { *Box::from_raw(self.raw_ptr) } - } - - /// Returns a pointer to `T`. The pointer - /// can be assumed to be constant over the lifetime of - /// `Pinned`. - fn raw_addr(&self) -> *mut T { - self.raw_ptr - } -} - -impl Deref for Pinned { - type Target = T; - fn deref(&self) -> &Self::Target { - unsafe { &*(self.raw_ptr) } - } -} - -impl DerefMut for Pinned { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self.raw_ptr) } - } -} - -impl Drop for Pinned { - fn drop(&mut self) { - if self._drop_inner { - unsafe { - drop(Box::from_raw(self.raw_ptr)); - } - } - } -} - -//impl Deref for Pinned> { -// type Target = [K]; -// fn deref(&self) -> &Self::Target { -// let vec = unsafe { -// & *(self.raw_ptr) -// }; -// -// vec.as_slice() -// } -//} - /// Virtqueue descr flags as defined in the specification. /// /// See Virtio specification v1.1. - 2.6.5 diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index d38a51f691..fbd3b262ab 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -20,8 +20,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Pinned, - Transfer, TransferState, TransferToken, Virtq, VqIndex, VqSize, + AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, + TransferState, TransferToken, Virtq, VqIndex, VqSize, }; use crate::arch::mm::paging::{BasePageSize, PageSize}; use crate::arch::mm::{paging, VirtAddr}; @@ -82,13 +82,9 @@ impl WrapCount { } /// Structure which allows to control raw ring and operate easily on it -/// -/// WARN: NEVER PUSH TO THE RING AFTER DESCRIPTORRING HAS BEEN INITIALIZED AS THIS WILL PROBABLY RESULT IN A -/// RELOCATION OF THE VECTOR AND HENCE THE DEVICE WILL NO LONGER NO THE RINGS ADDRESS! struct DescriptorRing { ring: &'static mut [Descriptor], - //ring: Pinned>, - tkn_ref_ring: Box<[*mut TransferToken]>, + tkn_ref_ring: Box<[Option>]>, // Controlling variables for the ring // @@ -117,7 +113,11 @@ impl DescriptorRing { // Descriptor ID's run from 1 to size_of_queue. In order to index directly into the // reference ring via an ID it is much easier to simply have an array of size = size_of_queue + 1 // and do not care about the first element being unused. - let tkn_ref_ring = vec![ptr::null_mut(); size + 1].into_boxed_slice(); + // `Box` is not Clone, so neither is `None::>`. Hence, we need to produce `None`s with a closure. + let tkn_ref_ring = core::iter::repeat_with(|| None) + .take(size + 1) + .collect::>() + .into_boxed_slice(); DescriptorRing { ring, @@ -130,52 +130,34 @@ impl DescriptorRing { } } - /// Polls poll index and sets states of eventually used TransferTokens to finished. - /// If Await_qeue is available, the Transfer will be provieded to the queue. + /// Polls poll index and sets the state of any finished TransferTokens. + /// If [TransferToken::await_queue] is available, the Transfer will be moved to the queue. fn poll(&mut self) { let mut ctrl = self.get_read_ctrler(); - if let Some(tkn) = ctrl.poll_next() { - // The state of the TransferToken up to this point MUST NOT be - // finished. As soon as we mark the token as finished, we can not - // be sure, that the token is not dropped, which would making - // the dereferencing operation undefined behaviour as also - // all operations on the reference. - let tkn = unsafe { &mut *(tkn) }; - - match tkn.await_queue { - Some(_) => { - tkn.state = TransferState::Finished; - let queue = tkn.await_queue.take().unwrap(); - - // Turn the raw pointer into a Pinned again, which will hold ownership of the Token - queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(Pinned::from_raw(ptr::from_mut(tkn))), - }); - } - None => tkn.state = TransferState::Finished, + if let Some(mut tkn) = ctrl.poll_next() { + tkn.state = TransferState::Finished; + if let Some(queue) = tkn.await_queue.take() { + // Place the TransferToken in a Transfer, which will hold ownership of the token + queue.borrow_mut().push_back(Transfer { + transfer_tkn: Some(tkn), + }); } } } - fn push_batch( - &mut self, - tkn_lst: Vec, - ) -> (Vec>, usize, u8) { + fn push_batch(&mut self, tkn_lst: Vec) -> (usize, u8) { // Catch empty push, in order to allow zero initialized first_ctrl_settings struct // which will be overwritten in the first iteration of the for-loop assert!(!tkn_lst.is_empty()); let mut first_ctrl_settings: (usize, u16, WrapCount) = (0, 0, WrapCount::new()); - let mut pind_lst = Vec::with_capacity(tkn_lst.len()); - - for (i, tkn) in tkn_lst.into_iter().enumerate() { - // fix memory address of token - let mut pinned = Pinned::pin(tkn); + let mut first_buffer = None; + for (i, mut tkn) in tkn_lst.into_iter().enumerate() { // Check length and if its fits. This should always be true due to the restriction of // the memory pool, but to be sure. - assert!(pinned.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); + assert!(tkn.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); // create an counter that wrappes to the first element // after reaching a the end of the ring @@ -190,8 +172,8 @@ impl DescriptorRing { // * write descriptors in the correct order // * make them available in the right order (reversed order or i.e. lastly where device polls) match ( - &pinned.buff_tkn.as_ref().unwrap().send_buff, - &pinned.buff_tkn.as_ref().unwrap().recv_buff, + &tkn.buff_tkn.as_ref().unwrap().send_buff, + &tkn.buff_tkn.as_ref().unwrap().recv_buff, ) { (Some(send_buff), Some(recv_buff)) => { // It is important to differentiate between indirect and direct descriptors here and if @@ -282,42 +264,34 @@ impl DescriptorRing { (None, None) => unreachable!("Empty Transfers are not allowed!"), // This should already be caught at creation of BufferToken } + tkn.state = TransferState::Processing; + if i == 0 { first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.wrap_at_init); + first_buffer = Some(Box::new(tkn)); } else { // Update flags of the first descriptor and set new write_index - ctrl.make_avail(pinned.raw_addr()); + ctrl.make_avail(Box::new(tkn)); } - - // Update the state of the actual Token - pinned.state = TransferState::Processing; - pind_lst.push(pinned); } // Manually make the first buffer available lastly // // Providing the first buffer in the list manually // provide reference, in order to let TransferToken now upon finish. - self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = pind_lst[0].raw_addr(); + self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = first_buffer; // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 fence(Ordering::SeqCst); self.ring[first_ctrl_settings.0].flags |= first_ctrl_settings.2.as_flags_avail(); // Converting a boolean as u8 is fine - ( - pind_lst, - first_ctrl_settings.0, - first_ctrl_settings.2 .0 as u8, - ) + (first_ctrl_settings.0, first_ctrl_settings.2 .0 as u8) } - fn push(&mut self, tkn: TransferToken) -> (Pinned, usize, u8) { - // fix memory address of token - let mut pinned = Pinned::pin(tkn); - + fn push(&mut self, mut tkn: TransferToken) -> (usize, u8) { // Check length and if its fits. This should always be true due to the restriction of // the memory pool, but to be sure. - assert!(pinned.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); + assert!(tkn.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); // create an counter that wrappes to the first element // after reaching a the end of the ring @@ -332,8 +306,8 @@ impl DescriptorRing { // * write descriptors in the correct order // * make them available in the right order (reversed order or i.e. lastly where device polls) match ( - &pinned.buff_tkn.as_ref().unwrap().send_buff, - &pinned.buff_tkn.as_ref().unwrap().recv_buff, + &tkn.buff_tkn.as_ref().unwrap().send_buff, + &tkn.buff_tkn.as_ref().unwrap().recv_buff, ) { (Some(send_buff), Some(recv_buff)) => { // It is important to differentiate between indirect and direct descriptors here and if @@ -417,16 +391,17 @@ impl DescriptorRing { (None, None) => unreachable!("Empty Transfers are not allowed!"), // This should already be caught at creation of BufferToken } + fence(Ordering::SeqCst); + // Update the state of the actual Token + tkn.state = TransferState::Processing; + fence(Ordering::SeqCst); // Update flags of the first descriptor and set new write_index - ctrl.make_avail(pinned.raw_addr()); + ctrl.make_avail(Box::new(tkn)); fence(Ordering::SeqCst); - // Update the state of the actual Token - pinned.state = TransferState::Processing; - // Converting a boolean as u8 is fine - (pinned, ctrl.start, ctrl.wrap_at_init.0 as u8) + (ctrl.start, ctrl.wrap_at_init.0 as u8) } /// # Unsafe @@ -470,21 +445,17 @@ struct ReadCtrl<'a> { } impl<'a> ReadCtrl<'a> { - /// Polls the ring for a new finished buffer. If buffer is marked as used, takes care of + /// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of /// updating the queue and returns the respective TransferToken. - fn poll_next(&mut self) -> Option<*mut TransferToken> { + fn poll_next(&mut self) -> Option> { // Check if descriptor has been marked used. if self.desc_ring.ring[self.position].flags & WrapCount::flag_mask() == self.desc_ring.dev_wc.as_flags_used() { - let tkn = unsafe { - let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id); - let raw_tkn = self.desc_ring.tkn_ref_ring[buff_id]; - // unset the reference in the reference ring for security! - self.desc_ring.tkn_ref_ring[buff_id] = ptr::null_mut(); - assert!(!raw_tkn.is_null()); - &mut *raw_tkn - }; + let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id); + let mut tkn = self.desc_ring.tkn_ref_ring[buff_id].take().expect( + "The buff_id is incorrect or the reference to the TransferToken was misplaced.", + ); let (send_buff, recv_buff) = { let BufferToken { @@ -542,7 +513,7 @@ impl<'a> ReadCtrl<'a> { (None, None) => unreachable!("Empty Transfers are not allowed..."), } - Some(ptr::from_mut(tkn)) + Some(tkn) } else { None } @@ -798,14 +769,14 @@ impl<'a> WriteCtrl<'a> { } } - fn make_avail(&mut self, raw_tkn: *mut TransferToken) { + fn make_avail(&mut self, raw_tkn: Box) { // We fail if one wants to make a buffer available without inserting one element! assert!(self.start != self.position); // We also fail if buff_id is not set! assert!(self.buff_id != 0); - // provide reference, in order to let TransferToken now upon finish. - self.desc_ring.tkn_ref_ring[usize::from(self.buff_id)] = raw_tkn; + // provide reference, in order to let TransferToken know upon finish. + self.desc_ring.tkn_ref_ring[usize::from(self.buff_id)] = Some(raw_tkn); // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 fence(Ordering::SeqCst); @@ -997,10 +968,6 @@ pub struct PackedVq { /// The virtqueues index. This identifies the virtqueue to the /// device and is unique on a per device basis. index: VqIndex, - /// Holds all early dropped `TransferToken` - /// If `TransferToken.state == TransferState::Finished` - /// the Token can be safely dropped. - dropped: RefCell>>, } // Public interface of PackedVq @@ -1018,17 +985,6 @@ impl PackedVq { self.drv_event.borrow_mut().disable_notif(); } - /// This function does check if early dropped TransferTokens are finished - /// and removes them if this is the case. - pub fn clean_up(&self) { - // remove and drop all finished Transfers - if !self.dropped.borrow().is_empty() { - self.dropped - .borrow_mut() - .retain(|tkn| tkn.state != TransferState::Finished); - } - } - /// See `Virtq.poll()` documentation pub fn poll(&self) { self.descr_ring.borrow_mut().poll(); @@ -1041,11 +997,11 @@ impl PackedVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, tkns: Vec, notif: bool) -> Vec { + pub fn dispatch_batch(&self, tkns: Vec, notif: bool) { // Zero transfers are not allowed assert!(!tkns.is_empty()); - let (pin_tkn_lst, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); if notif { self.drv_event @@ -1072,16 +1028,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - let mut transfer_lst = Vec::with_capacity(pin_tkn_lst.len()); - - for pinned in pin_tkn_lst { - transfer_lst.push(Transfer { - transfer_tkn: Some(pinned), - }) - } - - transfer_lst } /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` @@ -1110,7 +1056,7 @@ impl PackedVq { tkn.await_queue = Some(Rc::clone(&await_queue)); } - let (pin_tkn_lst, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); if notif { self.drv_event @@ -1137,13 +1083,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - for pinned in pin_tkn_lst { - // Prevent TransferToken from being dropped - // I.e. do NOT run the custom constructor which will - // deallocate memory. - pinned.into_raw(); - } } /// See `Virtq.prep_transfer()` documentation. @@ -1151,8 +1090,8 @@ impl PackedVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { - let (pin_tkn, next_off, next_wrap) = self.descr_ring.borrow_mut().push(tkn); + pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push(tkn); if notif { self.drv_event @@ -1179,28 +1118,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - Transfer { - transfer_tkn: Some(pin_tkn), - } - } - - /// The packed virtqueue handles early dropped transfers by moving the respective tokens into - /// an vector. Here they will remain until they are finished. In order to ensure this the queue - /// will check these descriptors from time to time during its poll function. - /// - /// Also see `Virtq.early_drop()` documentation. - pub fn early_drop(&self, tkn: Pinned) { - match tkn.state { - TransferState::Finished => (), // Drop the pinned token -> Dealloc everything - TransferState::Ready => { - unreachable!("Early dropped transfers are not allowed to be state == Ready") - } - TransferState::Processing => { - // Keep token until state is finished. This needs to be checked/cleaned up later - self.dropped.borrow_mut().push(tkn); - } - } } /// See `Virtq.index()` documentation @@ -1294,9 +1211,6 @@ impl PackedVq { // Initialize new memory pool. let mem_pool = Rc::new(MemPool::new(vq_size)); - // Initialize an empty vector for future dropped transfers - let dropped: RefCell>> = RefCell::new(Vec::new()); - vq_handler.enable_queue(); info!("Created PackedVq: idx={}, size={}", index.0, vq_size); @@ -1309,7 +1223,6 @@ impl PackedVq { mem_pool, size: VqSize::from(vq_size), index, - dropped, }) } diff --git a/src/drivers/virtio/virtqueue/split.rs b/src/drivers/virtio/virtqueue/split.rs index a92c551543..010723f850 100644 --- a/src/drivers/virtio/virtqueue/split.rs +++ b/src/drivers/virtio/virtqueue/split.rs @@ -17,8 +17,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl}; use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl}; use super::error::VirtqError; use super::{ - AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Pinned, - Transfer, TransferState, TransferToken, Virtq, VqIndex, VqSize, + AsSliceU8, BuffSpec, Buffer, BufferToken, Bytes, DescrFlags, MemDescr, MemPool, Transfer, + TransferState, TransferToken, Virtq, VqIndex, VqSize, }; use crate::arch::memory_barrier; use crate::arch::mm::paging::{BasePageSize, PageSize}; @@ -72,19 +72,17 @@ struct UsedElem { struct DescrRing { read_idx: u16, descr_table: DescrTable, - ref_ring: Box<[*mut TransferToken]>, + ref_ring: Box<[Option>]>, avail_ring: AvailRing, used_ring: UsedRing, } impl DescrRing { - fn push(&mut self, tkn: TransferToken) -> (Pinned, u16, u16) { - let pin = Pinned::pin(tkn); - + fn push(&mut self, tkn: TransferToken) -> (u16, u16) { let mut desc_lst = Vec::new(); let mut is_indirect = false; - if let Some(buff) = pin.buff_tkn.as_ref().unwrap().send_buff.as_ref() { + if let Some(buff) = tkn.buff_tkn.as_ref().unwrap().send_buff.as_ref() { if buff.is_indirect() { desc_lst.push((buff.get_ctrl_desc().unwrap(), false)); is_indirect = true; @@ -95,7 +93,7 @@ impl DescrRing { } } - if let Some(buff) = pin.buff_tkn.as_ref().unwrap().recv_buff.as_ref() { + if let Some(buff) = tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref() { if buff.is_indirect() { if desc_lst.is_empty() { desc_lst.push((buff.get_ctrl_desc().unwrap(), true)); @@ -114,7 +112,7 @@ impl DescrRing { } } - let mut len = pin.buff_tkn.as_ref().unwrap().num_consuming_descr(); + let mut len = tkn.buff_tkn.as_ref().unwrap().num_consuming_descr(); assert!(!desc_lst.is_empty()); // Minus 1, comes from the fact that ids run from one to 255 and not from 0 to 254 for u8::MAX sized pool @@ -189,14 +187,14 @@ impl DescrRing { len -= 1; } - self.ref_ring[index] = pin.raw_addr(); + self.ref_ring[index] = Some(Box::new(tkn)); self.avail_ring.ring[*self.avail_ring.index as usize % self.avail_ring.ring.len()] = index as u16; memory_barrier(); *self.avail_ring.index = self.avail_ring.index.wrapping_add(1); - (pin, 0, 0) + (0, 0) } fn poll(&mut self) { @@ -204,7 +202,9 @@ impl DescrRing { let cur_ring_index = self.read_idx as usize % self.used_ring.ring.len(); let used_elem = unsafe { ptr::read_volatile(&self.used_ring.ring[cur_ring_index]) }; - let tkn = unsafe { &mut *(self.ref_ring[used_elem.id as usize]) }; + let mut tkn = self.ref_ring[used_elem.id as usize].take().expect( + "The buff_id is incorrect or the reference to the TransferToken was misplaced.", + ); if tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref().is_some() { tkn.buff_tkn @@ -213,17 +213,11 @@ impl DescrRing { .restr_size(None, Some(used_elem.len as usize)) .unwrap(); } - match tkn.await_queue { - Some(_) => { - tkn.state = TransferState::Finished; - let queue = tkn.await_queue.take().unwrap(); - - // Turn the raw pointer into a Pinned again, which will hold ownership of the Token - queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(Pinned::from_raw(ptr::from_mut(tkn))), - }); - } - None => tkn.state = TransferState::Finished, + tkn.state = TransferState::Finished; + if let Some(queue) = tkn.await_queue.take() { + queue.borrow_mut().push_back(Transfer { + transfer_tkn: Some(tkn), + }) } memory_barrier(); self.read_idx = self.read_idx.wrapping_add(1); @@ -248,7 +242,6 @@ pub struct SplitVq { ring: RefCell, mem_pool: Rc, size: VqSize, - dropped: RefCell>>, index: VqIndex, notif_ctrl: NotifCtrl, @@ -265,17 +258,6 @@ impl SplitVq { self.ring.borrow_mut().drv_disable_notif(); } - /// This function does check if early dropped TransferTokens are finished - /// and removes them if this is the case. - pub fn clean_up(&self) { - // remove and drop all finished Transfers - if !self.dropped.borrow().is_empty() { - self.dropped - .borrow_mut() - .retain(|tkn| tkn.state != TransferState::Finished); - } - } - /// See `Virtq.poll()` documentation pub fn poll(&self) { self.ring.borrow_mut().poll() @@ -288,7 +270,7 @@ impl SplitVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, _tkns: Vec, _notif: bool) -> Vec { + pub fn dispatch_batch(&self, _tkns: Vec, _notif: bool) { unimplemented!(); } @@ -318,8 +300,8 @@ impl SplitVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { - let (pin_tkn, next_off, next_wrap) = self.ring.borrow_mut().push(tkn); + pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + let (next_off, next_wrap) = self.ring.borrow_mut().push(tkn); if notif { // TODO: Check whether the splitvirtquue has notifications for specific descriptors @@ -346,28 +328,6 @@ impl SplitVq { self.notif_ctrl.notify_dev(¬if_data) } - - Transfer { - transfer_tkn: Some(pin_tkn), - } - } - - /// The packed virtqueue handles early dropped transfers by moving the respective tokens into - /// a vector. Here they will remain until they are finished. In order to ensure this the queue - /// will check these descriptors from time to time during its poll function. - /// - /// Also see `Virtq.early_drop()` documentation - pub fn early_drop(&self, tkn: Pinned) { - match tkn.state { - TransferState::Finished => (), // Drop the pinned token -> Dealloc everything - TransferState::Ready => { - unreachable!("Early dropped transfers are not allowed to be state == Ready") - } - TransferState::Processing => { - // Keep token until state is finished. This needs to be checked/cleaned up later. - self.dropped.borrow_mut().push(tkn); - } - } } /// See `Virtq.index()` documentation @@ -451,7 +411,10 @@ impl SplitVq { let descr_ring = DescrRing { read_idx: 0, - ref_ring: vec![ptr::null_mut(); size as usize].into_boxed_slice(), + ref_ring: core::iter::repeat_with(|| None) + .take(size.into()) + .collect::>() + .into_boxed_slice(), descr_table, avail_ring, used_ring, @@ -466,9 +429,6 @@ impl SplitVq { // Initialize new memory pool. let mem_pool = Rc::new(MemPool::new(size)); - // Initialize an empty vector for future dropped transfers - let dropped: RefCell>> = RefCell::new(Vec::new()); - vq_handler.enable_queue(); info!("Created SplitVq: idx={}, size={}", index.0, size); @@ -479,7 +439,6 @@ impl SplitVq { mem_pool, size: VqSize(size), index, - dropped, }) } diff --git a/src/lib.rs b/src/lib.rs index d9e7980cff..44edea89a8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,6 +19,7 @@ #![feature(linked_list_cursors)] #![feature(maybe_uninit_slice)] #![feature(naked_functions)] +#![feature(new_uninit)] #![feature(noop_waker)] #![feature(pointer_is_aligned)] #![feature(slice_from_ptr_range)]