From b3d0c1cbc99b4a531892feb4f9b2f374cd4bb11c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=87a=C4=9Fatay=20Yi=C4=9Fit=20=C5=9Eahin?= Date: Tue, 2 Jan 2024 01:26:54 +0100 Subject: [PATCH] virtqueue: do not return Transfers to dispatchers Sharing the transfer with the sender is not really necessary, they can be notified through a await queue. Change the relevant methods to let the virtqueue own the Transfers and avoid reference counting complications. --- src/drivers/virtio/virtqueue/mod.rs | 81 +++++++------------ src/drivers/virtio/virtqueue/packed.rs | 106 +++++++++---------------- src/drivers/virtio/virtqueue/split.rs | 64 ++++++--------- src/lib.rs | 2 +- 4 files changed, 90 insertions(+), 163 deletions(-) diff --git a/src/drivers/virtio/virtqueue/mod.rs b/src/drivers/virtio/virtqueue/mod.rs index 31be02c989..633f773bb5 100644 --- a/src/drivers/virtio/virtqueue/mod.rs +++ b/src/drivers/virtio/virtqueue/mod.rs @@ -127,11 +127,11 @@ impl Virtq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { + fn dispatch(&self, tkn: TransferToken, notif: bool) { match self { Virtq::Packed(vq) => vq.dispatch(tkn, notif), Virtq::Split(vq) => vq.dispatch(tkn, notif), - } + }; } } @@ -225,7 +225,7 @@ impl Virtq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(tkns: Vec, notif: bool) -> Vec { + pub fn dispatch_batch(tkns: Vec, notif: bool) { let mut used_vqs: Vec<(Rc, Vec)> = Vec::new(); // Sort the TransferTokens depending in the queue their coming from. @@ -255,17 +255,12 @@ impl Virtq { } } - let mut transfer_lst = Vec::new(); for (vq_ref, tkn_lst) in used_vqs { match vq_ref.as_ref() { - Virtq::Packed(vq) => { - transfer_lst.append(vq.dispatch_batch(tkn_lst, notif).as_mut()) - } - Virtq::Split(vq) => transfer_lst.append(vq.dispatch_batch(tkn_lst, notif).as_mut()), + Virtq::Packed(vq) => vq.dispatch_batch(tkn_lst, notif), + Virtq::Split(vq) => vq.dispatch_batch(tkn_lst, notif), } } - - transfer_lst } /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` @@ -540,8 +535,8 @@ pub trait AsSliceU8 { } } -/// The [Transfer] will be received when a [TransferToken] is dispatched via `TransferToken.dispatch()` or -/// via `TransferToken.dispatch_blocking()`. +/// The [Transfer] will be received when a [TransferToken] is dispatched via [TransferToken::dispatch_blocking] or through the await +/// queue given to [TransferToken::dispatch_await]. /// /// The struct represents an ongoing transfer or an active transfer. While this does NOT mean, that the transfer is at all times inside /// actual virtqueue. The Transfers state can be polled via `Transfer.poll()`, which returns a bool if the transfer is finished. @@ -549,24 +544,10 @@ pub trait AsSliceU8 { /// **Finished Transfers:** /// * Finished transfers are able to return their send and receive buffers. Either as a copy via `Transfer.ret_cpy()` or as the actual /// buffers via `Transfer.ret()`. -/// * Finished transfers should be closed via `Transfer.close()` or via `Transfer.ret()`. /// * Finished transfers can be reused via `Transfer.reuse()`. -/// * This closes the transfer -/// * And returns an normal BufferToken (One should be cautious with reusing transfers where buffers were created from raw pointers) -/// -/// **Early dropped Transfers:** -/// -/// If a transfer is dropped without being closed (independent of being finished or ongoing), the transfer will return the respective -/// `Pinned` to the handling virtqueue, which will take of handling graceful shutdown. Which generally should take -/// care of waiting till the device handled the respective transfer and free the memory afterwards. -/// -/// One could "abuse" this procedure in order to realize a "fire-and-forget" transfer. -/// A warning here: The respective queue implementation is taking care of handling this case and there are no guarantees that the queue -/// won't be unusable afterwards. +/// * This returns an normal BufferToken (One should be cautious with reusing transfers where buffers were created from raw pointers) pub struct Transfer { - /// Needs to be `Option>` in order to prevent deallocation via None - // See custom drop function for clarity - transfer_tkn: Option>, + transfer_tkn: Option>, } // Public Interface of Transfer @@ -670,12 +651,13 @@ impl Transfer { send_buff, recv_buff, .. - } = unsafe { - Rc::get_mut_unchecked(self.transfer_tkn.as_mut().unwrap()) - .buff_tkn - .as_mut() - .unwrap() - }; + } = self + .transfer_tkn + .as_mut() + .unwrap() + .buff_tkn + .as_mut() + .unwrap(); (send_buff.as_mut(), recv_buff.as_mut()) }; @@ -835,8 +817,7 @@ impl Transfer { match state { TransferState::Finished => { // Desctructure Token - let mut transfer_tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) - .expect("There should not have been other references to the token."); + let mut transfer_tkn = self.transfer_tkn.take().unwrap(); let mut buffer_tkn = transfer_tkn.buff_tkn.take().unwrap(); @@ -904,11 +885,7 @@ impl Transfer { .unwrap() .reusable { - let tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) - .expect("There should not have been other references to the token") - .buff_tkn - .take() - .unwrap(); + let tkn = self.transfer_tkn.take().unwrap().buff_tkn.take().unwrap(); Ok(tkn.reset()) } else { @@ -941,11 +918,7 @@ impl Transfer { .unwrap() .reusable { - let tkn = Rc::into_inner(self.transfer_tkn.take().unwrap()) - .expect("There should not have been other references to the token") - .buff_tkn - .take() - .unwrap(); + let tkn = self.transfer_tkn.take().unwrap().buff_tkn.take().unwrap(); Ok(tkn.reset_purge()) } else { @@ -1004,13 +977,13 @@ impl TransferToken { self.get_vq().dispatch(self, notif); } - /// Dispatches the provided TransferToken to the respective queue and returns a transfer. + /// Dispatches the provided TransferToken to the respective queue. /// /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(self, notif: bool) -> Transfer { - self.get_vq().dispatch(self, notif) + pub fn dispatch(self, notif: bool) { + self.get_vq().dispatch(self, notif); } /// Dispatches the provided TransferToken to the respectuve queue and does @@ -1025,18 +998,20 @@ impl TransferToken { /// Upon finish notifications are enabled again. pub fn dispatch_blocking(self) -> Result { let vq = self.get_vq(); - let transfer = self.get_vq().dispatch(self, false); + let rcv_queue = Rc::new(RefCell::new(VecDeque::with_capacity(1))); + self.dispatch_await(rcv_queue.clone(), false); vq.disable_notifs(); - while transfer.transfer_tkn.as_ref().unwrap().state != TransferState::Finished { - // Keep Spinning until the state changes to Finished + while rcv_queue.borrow().is_empty() { + // Keep Spinning until the receive queue is filled vq.poll() } vq.enable_notifs(); - Ok(transfer) + let result = Ok(rcv_queue.borrow_mut().pop_front().unwrap()); + result } } diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index 0dc0087f82..fbd3b262ab 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -82,13 +82,9 @@ impl WrapCount { } /// Structure which allows to control raw ring and operate easily on it -/// -/// WARN: NEVER PUSH TO THE RING AFTER DESCRIPTORRING HAS BEEN INITIALIZED AS THIS WILL PROBABLY RESULT IN A -/// RELOCATION OF THE VECTOR AND HENCE THE DEVICE WILL NO LONGER NO THE RINGS ADDRESS! struct DescriptorRing { ring: &'static mut [Descriptor], - //ring: Pinned>, - tkn_ref_ring: Box<[Option>]>, + tkn_ref_ring: Box<[Option>]>, // Controlling variables for the ring // @@ -117,7 +113,11 @@ impl DescriptorRing { // Descriptor ID's run from 1 to size_of_queue. In order to index directly into the // reference ring via an ID it is much easier to simply have an array of size = size_of_queue + 1 // and do not care about the first element being unused. - let tkn_ref_ring = vec![None; size + 1].into_boxed_slice(); + // `Box` is not Clone, so neither is `None::>`. Hence, we need to produce `None`s with a closure. + let tkn_ref_ring = core::iter::repeat_with(|| None) + .take(size + 1) + .collect::>() + .into_boxed_slice(); DescriptorRing { ring, @@ -130,38 +130,29 @@ impl DescriptorRing { } } - /// Polls poll index and sets states of eventually used TransferTokens to finished. - /// If Await_qeue is available, the Transfer will be provieded to the queue. + /// Polls poll index and sets the state of any finished TransferTokens. + /// If [TransferToken::await_queue] is available, the Transfer will be moved to the queue. fn poll(&mut self) { let mut ctrl = self.get_read_ctrler(); if let Some(mut tkn) = ctrl.poll_next() { - // The state of the TransferToken up to this point MUST NOT be - // finished. As soon as we mark the token as finished, we can not - // be sure, that the token is not dropped, which would making - // the dereferencing operation undefined behaviour as also - // all operations on the reference. - - unsafe { - let tkn_ref = Rc::get_mut_unchecked(&mut tkn); - tkn_ref.state = TransferState::Finished; - if let Some(queue) = tkn_ref.await_queue.take() { - // Turn the raw pointer into a Pinned again, which will hold ownership of the Token - queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(tkn), - }); - } + tkn.state = TransferState::Finished; + if let Some(queue) = tkn.await_queue.take() { + // Place the TransferToken in a Transfer, which will hold ownership of the token + queue.borrow_mut().push_back(Transfer { + transfer_tkn: Some(tkn), + }); } } } - fn push_batch(&mut self, tkn_lst: Vec) -> (Vec>, usize, u8) { + fn push_batch(&mut self, tkn_lst: Vec) -> (usize, u8) { // Catch empty push, in order to allow zero initialized first_ctrl_settings struct // which will be overwritten in the first iteration of the for-loop assert!(!tkn_lst.is_empty()); let mut first_ctrl_settings: (usize, u16, WrapCount) = (0, 0, WrapCount::new()); - let mut pind_lst = Vec::with_capacity(tkn_lst.len()); + let mut first_buffer = None; for (i, mut tkn) in tkn_lst.into_iter().enumerate() { // Check length and if its fits. This should always be true due to the restriction of @@ -273,39 +264,31 @@ impl DescriptorRing { (None, None) => unreachable!("Empty Transfers are not allowed!"), // This should already be caught at creation of BufferToken } - // Update the state of the actual Token tkn.state = TransferState::Processing; - let tkn = Rc::new(tkn); - if i == 0 { first_ctrl_settings = (ctrl.start, ctrl.buff_id, ctrl.wrap_at_init); + first_buffer = Some(Box::new(tkn)); } else { // Update flags of the first descriptor and set new write_index - ctrl.make_avail(tkn.clone()); + ctrl.make_avail(Box::new(tkn)); } - - pind_lst.push(tkn.clone()); } // Manually make the first buffer available lastly // // Providing the first buffer in the list manually // provide reference, in order to let TransferToken now upon finish. - self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = Some(pind_lst[0].clone()); + self.tkn_ref_ring[usize::from(first_ctrl_settings.1)] = first_buffer; // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 fence(Ordering::SeqCst); self.ring[first_ctrl_settings.0].flags |= first_ctrl_settings.2.as_flags_avail(); // Converting a boolean as u8 is fine - ( - pind_lst, - first_ctrl_settings.0, - first_ctrl_settings.2 .0 as u8, - ) + (first_ctrl_settings.0, first_ctrl_settings.2 .0 as u8) } - fn push(&mut self, mut tkn: TransferToken) -> (Rc, usize, u8) { + fn push(&mut self, mut tkn: TransferToken) -> (usize, u8) { // Check length and if its fits. This should always be true due to the restriction of // the memory pool, but to be sure. assert!(tkn.buff_tkn.as_ref().unwrap().num_consuming_descr() <= self.capacity); @@ -412,16 +395,13 @@ impl DescriptorRing { // Update the state of the actual Token tkn.state = TransferState::Processing; - fence(Ordering::SeqCst); - let tkn = Rc::new(tkn); - fence(Ordering::SeqCst); // Update flags of the first descriptor and set new write_index - ctrl.make_avail(tkn.clone()); + ctrl.make_avail(Box::new(tkn)); fence(Ordering::SeqCst); // Converting a boolean as u8 is fine - (tkn, ctrl.start, ctrl.wrap_at_init.0 as u8) + (ctrl.start, ctrl.wrap_at_init.0 as u8) } /// # Unsafe @@ -465,24 +445,24 @@ struct ReadCtrl<'a> { } impl<'a> ReadCtrl<'a> { - /// Polls the ring for a new finished buffer. If buffer is marked as used, takes care of + /// Polls the ring for a new finished buffer. If buffer is marked as finished, takes care of /// updating the queue and returns the respective TransferToken. - fn poll_next(&mut self) -> Option> { + fn poll_next(&mut self) -> Option> { // Check if descriptor has been marked used. if self.desc_ring.ring[self.position].flags & WrapCount::flag_mask() == self.desc_ring.dev_wc.as_flags_used() { let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id); - let mut tkn = self.desc_ring.tkn_ref_ring[buff_id] - .take() - .expect("TransferToken at position does not exist"); + let mut tkn = self.desc_ring.tkn_ref_ring[buff_id].take().expect( + "The buff_id is incorrect or the reference to the TransferToken was misplaced.", + ); let (send_buff, recv_buff) = { let BufferToken { send_buff, recv_buff, .. - } = unsafe { Rc::get_mut_unchecked(&mut tkn).buff_tkn.as_mut().unwrap() }; + } = tkn.buff_tkn.as_mut().unwrap(); (recv_buff.as_mut(), send_buff.as_mut()) }; @@ -789,13 +769,13 @@ impl<'a> WriteCtrl<'a> { } } - fn make_avail(&mut self, raw_tkn: Rc) { + fn make_avail(&mut self, raw_tkn: Box) { // We fail if one wants to make a buffer available without inserting one element! assert!(self.start != self.position); // We also fail if buff_id is not set! assert!(self.buff_id != 0); - // provide reference, in order to let TransferToken now upon finish. + // provide reference, in order to let TransferToken know upon finish. self.desc_ring.tkn_ref_ring[usize::from(self.buff_id)] = Some(raw_tkn); // The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step. // See Virtio specfification v1.1. - 2.7.21 @@ -1017,11 +997,11 @@ impl PackedVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, tkns: Vec, notif: bool) -> Vec { + pub fn dispatch_batch(&self, tkns: Vec, notif: bool) { // Zero transfers are not allowed assert!(!tkns.is_empty()); - let (pin_tkn_lst, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); if notif { self.drv_event @@ -1048,16 +1028,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - let mut transfer_lst = Vec::with_capacity(pin_tkn_lst.len()); - - for pinned in pin_tkn_lst { - transfer_lst.push(Transfer { - transfer_tkn: Some(pinned), - }) - } - - transfer_lst } /// Dispatches a batch of TransferTokens. The Transfers will be placed in to the `await_queue` @@ -1086,7 +1056,7 @@ impl PackedVq { tkn.await_queue = Some(Rc::clone(&await_queue)); } - let (_, next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push_batch(tkns); if notif { self.drv_event @@ -1120,8 +1090,8 @@ impl PackedVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { - let (pin_tkn, next_off, next_wrap) = self.descr_ring.borrow_mut().push(tkn); + pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + let (next_off, next_wrap) = self.descr_ring.borrow_mut().push(tkn); if notif { self.drv_event @@ -1148,10 +1118,6 @@ impl PackedVq { self.notif_ctrl.notify_dev(¬if_data) } - - Transfer { - transfer_tkn: Some(pin_tkn), - } } /// See `Virtq.index()` documentation diff --git a/src/drivers/virtio/virtqueue/split.rs b/src/drivers/virtio/virtqueue/split.rs index f1758f8e89..010723f850 100644 --- a/src/drivers/virtio/virtqueue/split.rs +++ b/src/drivers/virtio/virtqueue/split.rs @@ -72,15 +72,13 @@ struct UsedElem { struct DescrRing { read_idx: u16, descr_table: DescrTable, - ref_ring: Box<[Option>]>, + ref_ring: Box<[Option>]>, avail_ring: AvailRing, used_ring: UsedRing, } impl DescrRing { - fn push(&mut self, tkn: TransferToken) -> (Rc, u16, u16) { - let tkn = Rc::new(tkn); - + fn push(&mut self, tkn: TransferToken) -> (u16, u16) { let mut desc_lst = Vec::new(); let mut is_indirect = false; @@ -189,14 +187,14 @@ impl DescrRing { len -= 1; } - self.ref_ring[index] = Some(tkn.clone()); + self.ref_ring[index] = Some(Box::new(tkn)); self.avail_ring.ring[*self.avail_ring.index as usize % self.avail_ring.ring.len()] = index as u16; memory_barrier(); *self.avail_ring.index = self.avail_ring.index.wrapping_add(1); - (tkn, 0, 0) + (0, 0) } fn poll(&mut self) { @@ -204,33 +202,22 @@ impl DescrRing { let cur_ring_index = self.read_idx as usize % self.used_ring.ring.len(); let used_elem = unsafe { ptr::read_volatile(&self.used_ring.ring[cur_ring_index]) }; - let mut tkn = self.ref_ring[used_elem.id as usize] - .take() - .expect("Invalid TransferToken reference in reference ring"); + let mut tkn = self.ref_ring[used_elem.id as usize].take().expect( + "The buff_id is incorrect or the reference to the TransferToken was misplaced.", + ); - unsafe { - let tkn_ref = Rc::get_mut_unchecked(&mut tkn); - if tkn_ref - .buff_tkn - .as_ref() + if tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref().is_some() { + tkn.buff_tkn + .as_mut() .unwrap() - .recv_buff - .as_ref() - .is_some() - { - tkn_ref - .buff_tkn - .as_mut() - .unwrap() - .restr_size(None, Some(used_elem.len as usize)) - .unwrap(); - } - tkn_ref.state = TransferState::Finished; - if let Some(queue) = tkn_ref.await_queue.take() { - queue.borrow_mut().push_back(Transfer { - transfer_tkn: Some(tkn), - }) - } + .restr_size(None, Some(used_elem.len as usize)) + .unwrap(); + } + tkn.state = TransferState::Finished; + if let Some(queue) = tkn.await_queue.take() { + queue.borrow_mut().push_back(Transfer { + transfer_tkn: Some(tkn), + }) } memory_barrier(); self.read_idx = self.read_idx.wrapping_add(1); @@ -283,7 +270,7 @@ impl SplitVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch_batch(&self, _tkns: Vec, _notif: bool) -> Vec { + pub fn dispatch_batch(&self, _tkns: Vec, _notif: bool) { unimplemented!(); } @@ -313,8 +300,8 @@ impl SplitVq { /// The `notif` parameter indicates if the driver wants to have a notification for this specific /// transfer. This is only for performance optimization. As it is NOT ensured, that the device sees the /// updated notification flags before finishing transfers! - pub fn dispatch(&self, tkn: TransferToken, notif: bool) -> Transfer { - let (pin_tkn, next_off, next_wrap) = self.ring.borrow_mut().push(tkn); + pub fn dispatch(&self, tkn: TransferToken, notif: bool) { + let (next_off, next_wrap) = self.ring.borrow_mut().push(tkn); if notif { // TODO: Check whether the splitvirtquue has notifications for specific descriptors @@ -341,10 +328,6 @@ impl SplitVq { self.notif_ctrl.notify_dev(¬if_data) } - - Transfer { - transfer_tkn: Some(pin_tkn), - } } /// See `Virtq.index()` documentation @@ -428,7 +411,10 @@ impl SplitVq { let descr_ring = DescrRing { read_idx: 0, - ref_ring: vec![None; size as usize].into_boxed_slice(), + ref_ring: core::iter::repeat_with(|| None) + .take(size.into()) + .collect::>() + .into_boxed_slice(), descr_table, avail_ring, used_ring, diff --git a/src/lib.rs b/src/lib.rs index 76941c013b..44edea89a8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,10 +16,10 @@ #![feature(allocator_api)] #![feature(asm_const)] #![feature(exposed_provenance)] -#![feature(get_mut_unchecked)] #![feature(linked_list_cursors)] #![feature(maybe_uninit_slice)] #![feature(naked_functions)] +#![feature(new_uninit)] #![feature(noop_waker)] #![feature(pointer_is_aligned)] #![feature(slice_from_ptr_range)]