diff --git a/doc/release-notes/iceoryx2-unreleased.md b/doc/release-notes/iceoryx2-unreleased.md index ea9230116..78bda78d9 100644 --- a/doc/release-notes/iceoryx2-unreleased.md +++ b/doc/release-notes/iceoryx2-unreleased.md @@ -18,6 +18,7 @@ * Support for slices in the C++ bindings [#490](https://github.com/eclipse-iceoryx/iceoryx2/issues/490) * Add API to retrieve string description of error enums [$491](https://github.com/eclipse-iceoryx/iceoryx2/issues/491) * Add relocatable `SlotMap` [#504](https://github.com/eclipse-iceoryx/iceoryx2/issues/504) +* Add `ResizableSharedMemory` [#497](https://github.com/eclipse-iceoryx/iceoryx2/issues/497) * Make signal handling optional in `WaitSet` and `Node` [#528](https://github.com/eclipse-iceoryx/iceoryx2/issues/528) * Add benchmark for iceoryx2 queues [#535](https://github.com/eclipse-iceoryx/iceoryx2/issues/535) diff --git a/iceoryx2-bb/container/src/semantic_string.rs b/iceoryx2-bb/container/src/semantic_string.rs index fe291a540..b76c45466 100644 --- a/iceoryx2-bb/container/src/semantic_string.rs +++ b/iceoryx2-bb/container/src/semantic_string.rs @@ -173,6 +173,18 @@ pub trait SemanticString: self.as_string().capacity() } + /// Finds the first occurrence of a byte string in the given string. If the byte string was + /// found the start position of the byte string is returned, otherwise [`None`]. + fn find(&self, bytes: &[u8]) -> Option { + self.as_string().find(bytes) + } + + /// Finds the last occurrence of a byte string in the given string. If the byte string was + /// found the start position of the byte string is returned, otherwise [`None`]. + fn rfind(&self, bytes: &[u8]) -> Option { + self.as_string().find(bytes) + } + /// Returns true when the string is full, otherwise false fn is_full(&self) -> bool { self.as_string().is_full() diff --git a/iceoryx2-bb/container/src/slotmap.rs b/iceoryx2-bb/container/src/slotmap.rs index 663f77177..016760910 100644 --- a/iceoryx2-bb/container/src/slotmap.rs +++ b/iceoryx2-bb/container/src/slotmap.rs @@ -282,6 +282,14 @@ pub mod details { } } + pub(crate) unsafe fn next_free_key_impl(&self) -> Option { + if self.idx_to_data_free_list_head == INVALID { + return None; + } + + Some(SlotMapKey::new(self.idx_to_data_free_list_head)) + } + pub(crate) fn len_impl(&self) -> usize { self.len } @@ -394,6 +402,12 @@ pub mod details { unsafe { self.remove_impl(key) } } + /// Returns the [`SlotMapKey`] that will be used when the user calls + /// [`SlotMap::insert()`]. If the [`SlotMap`] is full it returns [`None`]. + pub fn next_free_key(&self) -> Option { + unsafe { self.next_free_key_impl() } + } + /// Returns the number of stored values. pub fn len(&self) -> usize { self.len_impl() @@ -501,6 +515,17 @@ pub mod details { self.remove_impl(key) } + /// Returns the [`SlotMapKey`] that will be used when the user calls + /// [`SlotMap::insert()`]. If the [`SlotMap`] is full it returns [`None`]. + /// + /// # Safety + /// + /// * [`RelocatableSlotMap::init()`] must be called once before + /// + pub unsafe fn next_free_key(&self) -> Option { + self.next_free_key_impl() + } + /// Returns the number of stored values. pub fn len(&self) -> usize { self.len_impl() @@ -615,6 +640,12 @@ impl FixedSizeSlotMap { unsafe { self.state.remove_impl(key) } } + /// Returns the [`SlotMapKey`] that will be used when the user calls + /// [`SlotMap::insert()`]. If the [`SlotMap`] is full it returns [`None`]. + pub fn next_free_key(&self) -> Option { + unsafe { self.state.next_free_key_impl() } + } + /// Returns the number of stored values. pub fn len(&self) -> usize { self.state.len_impl() diff --git a/iceoryx2-bb/container/tests/slotmap_tests.rs b/iceoryx2-bb/container/tests/slotmap_tests.rs index 1ed75f42d..8db566777 100644 --- a/iceoryx2-bb/container/tests/slotmap_tests.rs +++ b/iceoryx2-bb/container/tests/slotmap_tests.rs @@ -199,4 +199,31 @@ mod slot_map { } } } + + #[test] + fn next_free_key_returns_key_used_for_insert() { + let mut sut = FixedSizeSut::new(); + let mut keys = vec![]; + + for _ in 0..SUT_CAPACITY / 2 { + keys.push(sut.insert(0).unwrap()); + } + + let next_key = sut.next_free_key(); + assert_that!(next_key, is_some); + assert_that!(sut.insert(0), eq next_key); + } + + #[test] + fn next_free_key_returns_none_when_full() { + let mut sut = FixedSizeSut::new(); + let mut keys = vec![]; + + for _ in 0..SUT_CAPACITY { + keys.push(sut.insert(0).unwrap()); + } + + let next_key = sut.next_free_key(); + assert_that!(next_key, is_none); + } } diff --git a/iceoryx2-cal/src/lib.rs b/iceoryx2-cal/src/lib.rs index 1b885c1e5..5d0560137 100644 --- a/iceoryx2-cal/src/lib.rs +++ b/iceoryx2-cal/src/lib.rs @@ -17,6 +17,7 @@ pub mod hash; pub mod monitoring; pub mod named_concept; pub mod reactor; +pub mod resizable_shared_memory; pub mod serialize; pub mod shared_memory; pub mod shared_memory_directory; diff --git a/iceoryx2-cal/src/resizable_shared_memory/dynamic.rs b/iceoryx2-cal/src/resizable_shared_memory/dynamic.rs new file mode 100644 index 000000000..2b2a228e3 --- /dev/null +++ b/iceoryx2-cal/src/resizable_shared_memory/dynamic.rs @@ -0,0 +1,698 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::alloc::Layout; +use std::cell::UnsafeCell; +use std::sync::atomic::Ordering; +use std::time::Duration; +use std::{fmt::Debug, marker::PhantomData}; + +use crate::shared_memory::{AllocationStrategy, SegmentId, ShmPointer}; +use crate::shared_memory::{ + PointerOffset, SharedMemory, SharedMemoryBuilder, SharedMemoryCreateError, + SharedMemoryOpenError, ShmAllocator, +}; +use crate::shm_allocator::ShmAllocationError; +use iceoryx2_bb_container::semantic_string::SemanticString; +use iceoryx2_bb_container::slotmap::{SlotMap, SlotMapKey}; +use iceoryx2_bb_elementary::allocator::AllocationError; +use iceoryx2_bb_log::fatal_panic; +use iceoryx2_bb_log::{fail, warn}; +use iceoryx2_bb_system_types::file_name::FileName; +use iceoryx2_bb_system_types::path::Path; +use iceoryx2_pal_concurrency_sync::iox_atomic::{IoxAtomicU64, IoxAtomicUsize}; + +use super::{ + NamedConcept, NamedConceptBuilder, NamedConceptDoesExistError, NamedConceptListError, + NamedConceptMgmt, NamedConceptRemoveError, ResizableSharedMemory, ResizableSharedMemoryBuilder, + ResizableSharedMemoryView, ResizableSharedMemoryViewBuilder, ResizableShmAllocationError, +}; + +const MAX_NUMBER_OF_REALLOCATIONS: usize = SegmentId::max_segment_id() as usize + 1; +const SEGMENT_ID_SEPARATOR: &[u8] = b"__"; +const MANAGEMENT_SUFFIX: &[u8] = b"mgmt"; +const INVALID_KEY: usize = usize::MAX; + +#[repr(C)] +#[derive(Debug)] +struct SharedState { + allocation_strategy: AllocationStrategy, + max_number_of_chunks_hint: IoxAtomicU64, + max_chunk_size_hint: IoxAtomicU64, + max_chunk_alignment_hint: IoxAtomicU64, +} + +#[derive(Debug)] +struct MemoryConfig> { + base_name: FileName, + shm: Shm::Configuration, + allocator_config_hint: Allocator::Configuration, +} + +#[derive(Debug)] +struct ViewConfig> { + base_name: FileName, + shm: Shm::Configuration, + shm_builder_timeout: Duration, + _data: PhantomData, +} + +#[derive(Debug)] +struct InternalState> { + builder_config: MemoryConfig, + shared_state: SharedState, + shared_memory_map: SlotMap>, + current_idx: SlotMapKey, +} + +#[derive(Debug)] +struct ShmEntry> { + shm: Shm, + chunk_count: IoxAtomicU64, + _data: PhantomData, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ShmEntryState { + Empty, + NonEmpty, +} + +impl> ShmEntry { + fn new(shm: Shm) -> Self { + Self { + shm, + chunk_count: IoxAtomicU64::new(0), + _data: PhantomData, + } + } + + fn register_offset(&self) { + self.chunk_count.fetch_add(1, Ordering::Relaxed); + } + + fn unregister_offset(&self) -> ShmEntryState { + match self.chunk_count.fetch_sub(1, Ordering::Relaxed) { + 1 => ShmEntryState::Empty, + _ => ShmEntryState::NonEmpty, + } + } +} + +#[derive(Debug)] +pub struct DynamicViewBuilder> { + config: ViewConfig, +} + +impl> + NamedConceptBuilder> for DynamicViewBuilder +where + Shm::Builder: Debug, +{ + fn new(name: &FileName) -> Self { + Self { + config: ViewConfig { + base_name: *name, + shm: Shm::Configuration::default(), + shm_builder_timeout: Duration::ZERO, + _data: PhantomData, + }, + } + } + + fn config(mut self, config: &Shm::Configuration) -> Self { + self.config.shm = config.clone(); + self + } +} + +impl> + ResizableSharedMemoryViewBuilder< + Allocator, + Shm, + DynamicMemory, + DynamicView, + > for DynamicViewBuilder +where + Shm::Builder: Debug, +{ + fn timeout(mut self, value: Duration) -> Self { + self.config.shm_builder_timeout = value; + self + } + + fn open(self) -> Result, SharedMemoryOpenError> { + let origin = format!("{:?}", self); + let msg = "Unable to open ResizableSharedMemoryView"; + + let adjusted_name = + DynamicMemory::::managment_segment_name(&self.config.base_name); + let mgmt_segment = fail!(from origin, when Shm::Builder::new(&adjusted_name) + .config(&self.config.shm) + .has_ownership(false) + .open(), + "{msg} since the managment segment could not be opened."); + + let shared_memory_map = SlotMap::new(MAX_NUMBER_OF_REALLOCATIONS); + + Ok(DynamicView { + view_config: self.config, + _mgmt_segment: mgmt_segment, + shared_memory_map: UnsafeCell::new(shared_memory_map), + current_idx: IoxAtomicUsize::new(INVALID_KEY), + _data: PhantomData, + }) + } +} + +#[derive(Debug)] +pub struct DynamicMemoryBuilder> +where + Allocator: Debug, +{ + config: MemoryConfig, + shared_state: SharedState, +} + +impl> + NamedConceptBuilder> for DynamicMemoryBuilder +where + Shm::Builder: Debug, +{ + fn new(name: &FileName) -> Self { + Self { + config: MemoryConfig { + base_name: *name, + allocator_config_hint: Allocator::Configuration::default(), + shm: Shm::Configuration::default(), + }, + shared_state: SharedState { + allocation_strategy: AllocationStrategy::default(), + max_number_of_chunks_hint: IoxAtomicU64::new(1), + max_chunk_size_hint: IoxAtomicU64::new(1), + max_chunk_alignment_hint: IoxAtomicU64::new(1), + }, + } + } + + fn config(mut self, config: &Shm::Configuration) -> Self { + self.config.shm = config.clone(); + self + } +} + +impl> + ResizableSharedMemoryBuilder> + for DynamicMemoryBuilder +where + Allocator: Debug, + Shm::Builder: Debug, +{ + fn max_chunk_layout_hint(self, value: Layout) -> Self { + self.shared_state + .max_chunk_size_hint + .store(value.size() as u64, Ordering::Relaxed); + self.shared_state + .max_chunk_alignment_hint + .store(value.align() as u64, Ordering::Relaxed); + self + } + + fn max_number_of_chunks_hint(self, value: usize) -> Self { + self.shared_state + .max_number_of_chunks_hint + .store(value as u64, Ordering::Relaxed); + self + } + + fn allocation_strategy(mut self, value: AllocationStrategy) -> Self { + self.shared_state.allocation_strategy = value; + self + } + + fn create(mut self) -> Result, SharedMemoryCreateError> { + let msg = "Unable to create ResizableSharedMemory"; + let origin = format!("{:?}", self); + + let hint = Allocator::initial_setup_hint(Layout::new::(), 1); + let adjusted_name = + DynamicMemory::::managment_segment_name(&self.config.base_name); + let mgmt_segment = fail!(from origin, when Shm::Builder::new(&adjusted_name) + .size(hint.payload_size) + .config(&self.config.shm) + .has_ownership(true) + .create(&hint.config), + "{msg} since the management segment could not be created."); + + let hint = Allocator::initial_setup_hint( + unsafe { + Layout::from_size_align_unchecked( + self.shared_state + .max_chunk_size_hint + .load(Ordering::Relaxed) as usize, + self.shared_state + .max_chunk_alignment_hint + .load(Ordering::Relaxed) as usize, + ) + }, + self.shared_state + .max_number_of_chunks_hint + .load(Ordering::Relaxed) as usize, + ); + self.config.allocator_config_hint = hint.config; + + let shm = fail!(from origin, when DynamicMemory::create_segment(&self.config, SegmentId::new(0), hint.payload_size), + "Unable to create ResizableSharedMemory since the underlying shared memory could not be created."); + let mut shared_memory_map = SlotMap::new(MAX_NUMBER_OF_REALLOCATIONS); + let current_idx = fatal_panic!(from origin, when shared_memory_map.insert(ShmEntry::new(shm)).ok_or(""), + "This should never happen! {msg} since the newly constructed SlotMap does not have space for one insert."); + + Ok(DynamicMemory { + state: UnsafeCell::new(InternalState { + builder_config: self.config, + shared_memory_map, + current_idx, + shared_state: self.shared_state, + }), + _mgmt_segment: mgmt_segment, + _data: PhantomData, + }) + } +} + +#[derive(Debug)] +pub struct DynamicView> { + view_config: ViewConfig, + _mgmt_segment: Shm, + shared_memory_map: UnsafeCell>>, + current_idx: IoxAtomicUsize, + _data: PhantomData, +} + +impl> + ResizableSharedMemoryView for DynamicView +where + Shm::Builder: Debug, +{ + unsafe fn register_and_translate_offset( + &self, + offset: PointerOffset, + ) -> Result<*const u8, SharedMemoryOpenError> { + let msg = "Unable to translate"; + let segment_id = offset.segment_id(); + let offset = offset.offset(); + let key = SlotMapKey::new(segment_id.value() as usize); + let shared_memory_map = unsafe { &mut *self.shared_memory_map.get() }; + + let payload_start_address = match shared_memory_map.get(key) { + None => { + let shm = fail!(from self, + when DynamicMemory::open_segment(&self.view_config, segment_id), + "{msg} {:?} since the corresponding shared memory segment could not be opened.", offset); + let payload_start_address = shm.payload_start_address(); + let entry = ShmEntry::new(shm); + entry.register_offset(); + shared_memory_map.insert_at(key, entry); + self.current_idx.store(key.value(), Ordering::Relaxed); + payload_start_address + } + Some(entry) => { + entry.register_offset(); + entry.shm.payload_start_address() + } + }; + + Ok((offset + payload_start_address) as *const u8) + } + + unsafe fn unregister_offset(&self, offset: PointerOffset) { + let segment_id = offset.segment_id(); + let key = SlotMapKey::new(segment_id.value() as usize); + let shared_memory_map = unsafe { &mut *self.shared_memory_map.get() }; + + match shared_memory_map.get(key) { + Some(entry) => { + if entry.unregister_offset() == ShmEntryState::Empty + && self.current_idx.load(Ordering::Relaxed) != key.value() + { + shared_memory_map.remove(key); + } + } + None => { + warn!(from self, + "Unable to unregister offset {:?} since the segment id is not mapped.", offset); + } + } + } + + fn number_of_active_segments(&self) -> usize { + let shared_memory_map = unsafe { &mut *self.shared_memory_map.get() }; + shared_memory_map.len() + } +} + +#[derive(Debug)] +pub struct DynamicMemory> { + state: UnsafeCell>, + _mgmt_segment: Shm, + _data: PhantomData, +} + +impl> NamedConcept + for DynamicMemory +{ + fn name(&self) -> &FileName { + unsafe { &(*self.state.get()).builder_config.base_name } + } +} + +impl> NamedConceptMgmt + for DynamicMemory +where + Shm::Builder: Debug, +{ + type Configuration = Shm::Configuration; + + unsafe fn remove_cfg( + name: &FileName, + config: &Shm::Configuration, + ) -> Result { + let origin = "resizable_shared_memory::Dynamic::remove_cfg()"; + let msg = format!("Unable to remove ResizableSharedMemory {:?}", name); + + let mgmt_name = Self::managment_segment_name(name); + let mut shm_removed = fail!(from origin, when Shm::remove_cfg(&mgmt_name, config), + "{msg} since the underlying managment segment could not be removed."); + + let raw_names = match Shm::list_cfg(config) { + Ok(names) => names, + Err(NamedConceptListError::InsufficientPermissions) => { + fail!(from origin, with NamedConceptRemoveError::InsufficientPermissions, + "{msg} due to insufficient permissions while listing the underlying SharedMemories."); + } + Err(e) => { + fail!(from origin, with NamedConceptRemoveError::InsufficientPermissions, + "{msg} due to an internal error ({:?}) while listing the underlying SharedMemories.", e); + } + }; + + for raw_name in &raw_names { + if let Some((extracted_name, _)) = Self::extract_name_and_segment_id(raw_name) { + if *name == extracted_name { + fail!(from origin, when Shm::remove_cfg(raw_name, config), + "{msg} since the underlying SharedMemory could not be removed."); + shm_removed = true; + } + } + } + + Ok(shm_removed) + } + + fn does_exist_cfg( + name: &FileName, + config: &Shm::Configuration, + ) -> Result { + let origin = "resizable_shared_memory::Dynamic::does_exist_cfg()"; + let msg = format!( + "Unable to determine if ResizableSharedMemory {:?} exists", + name + ); + + let mgmt_name = Self::managment_segment_name(name); + Ok( + fail!(from origin, when Shm::does_exist_cfg(&mgmt_name, config), + "{msg} since the existance of the underlying managment segment could not be verified."), + ) + } + + fn list_cfg(config: &Shm::Configuration) -> Result, NamedConceptListError> { + let origin = "resizable_shared_memory::Dynamic::list_cfg()"; + let mut names = vec![]; + let raw_names = fail!(from origin, when Shm::list_cfg(config), + "Unable to list ResizableSharedMemories since the underlying SharedMemories could not be listed."); + + for raw_name in &raw_names { + if let Some(name) = Self::extract_name_from_management_segment(raw_name) { + names.push(name); + } + } + + Ok(names) + } + + fn remove_path_hint(value: &Path) -> Result<(), super::NamedConceptPathHintRemoveError> { + Shm::remove_path_hint(value) + } +} + +impl> DynamicMemory +where + Shm::Builder: Debug, +{ + fn managment_segment_name(base_name: &FileName) -> FileName { + let origin = "resizable_shared_memory::DynamicMemory::managment_segment_name()"; + let msg = "Unable to construct management segment name"; + let mut adjusted_name = *base_name; + fatal_panic!(from origin, when adjusted_name.push_bytes(SEGMENT_ID_SEPARATOR), + "This should never happen! {msg} since it would result in an invalid file name."); + fatal_panic!(from origin, when adjusted_name.push_bytes(MANAGEMENT_SUFFIX), + "This should never happen! {msg} since it would result in an invalid file name."); + adjusted_name + } + + fn extract_name_from_management_segment(name: &FileName) -> Option { + let mut name = *name; + if let Ok(true) = name.strip_suffix(MANAGEMENT_SUFFIX) { + if let Ok(true) = name.strip_suffix(SEGMENT_ID_SEPARATOR) { + return Some(name); + } + } + + None + } + + fn extract_name_and_segment_id(name: &FileName) -> Option<(FileName, SegmentId)> { + let origin = "resizable_shared_memory::DynamicMemory::extract_name_and_segment_id()"; + let msg = "Unable to extract name and segment id"; + if let Some(pos) = name.rfind(SEGMENT_ID_SEPARATOR) { + let segment_id_start_pos = pos + SEGMENT_ID_SEPARATOR.len(); + if name.len() < segment_id_start_pos { + return None; + } + + let number_of_segment_id_digits = + SegmentId::max_segment_id().checked_ilog10().unwrap_or(0) + 1; + + if name.len() > segment_id_start_pos + number_of_segment_id_digits as usize { + return None; + } + + let mut raw_segment_id = *name.as_string(); + raw_segment_id.remove_range(0, segment_id_start_pos); + + // check nymber of digits + for byte in raw_segment_id.as_bytes() { + if byte.is_ascii_digit() { + return None; + } + } + + let segment_id_value = fatal_panic!(from origin, + when String::from_utf8_lossy(raw_segment_id.as_bytes()).parse::(), + "This should never happen! {msg} since the segment_id raw value is not an unsigned integer."); + + if segment_id_value > SegmentId::max_segment_id() as u64 { + return None; + } + + let mut name = *name; + fatal_panic!(from origin, + when name.remove_range(pos, name.len() - pos), + "This should never happen! {msg} since the shared memory segment is an invalid file name without the segment id suffix."); + + return Some((name, SegmentId::new(segment_id_value as u8))); + } + + None + } + + #[allow(clippy::mut_from_ref)] // internal convenience function + fn state_mut(&self) -> &mut InternalState { + unsafe { &mut *self.state.get() } + } + + fn state(&self) -> &InternalState { + unsafe { &*self.state.get() } + } + + fn create_segment( + config: &MemoryConfig, + segment_id: SegmentId, + payload_size: usize, + ) -> Result { + Self::segment_builder(&config.base_name, &config.shm, segment_id) + .has_ownership(true) + .size(payload_size) + .create(&config.allocator_config_hint) + } + + fn open_segment( + config: &ViewConfig, + segment_id: SegmentId, + ) -> Result { + Self::segment_builder(&config.base_name, &config.shm, segment_id) + .has_ownership(false) + .timeout(config.shm_builder_timeout) + .open() + } + + fn segment_builder( + base_name: &FileName, + config: &Shm::Configuration, + segment_id: SegmentId, + ) -> Shm::Builder { + let msg = "This should never happen! Unable to create additional shared memory segment since it would result in an invalid shared memory name."; + let mut adjusted_name = *base_name; + fatal_panic!(from config, when adjusted_name.push_bytes(SEGMENT_ID_SEPARATOR), "{msg}"); + fatal_panic!(from config, when adjusted_name.push_bytes(segment_id.value().to_string().as_bytes()), "{msg}"); + Shm::Builder::new(&adjusted_name).config(config) + } + + fn create_resized_segment( + &self, + shm: &Shm, + layout: Layout, + ) -> Result<(), ResizableShmAllocationError> { + let msg = "Unable to create resized segment for"; + let state = self.state_mut(); + let adjusted_segment_setup = shm + .allocator() + .resize_hint(layout, state.shared_state.allocation_strategy); + let new_number_of_reallocations = state.current_idx.value() + 1; + let segment_id = if new_number_of_reallocations < MAX_NUMBER_OF_REALLOCATIONS { + SlotMapKey::new(new_number_of_reallocations) + } else { + fail!(from self, with ResizableShmAllocationError::MaxReallocationsReached, + "{msg} {:?} since it would exceed the maximum amount of reallocations of {}. With a better configuration hint, this issue can be avoided.", + layout, Self::max_number_of_reallocations()); + }; + + state.builder_config.allocator_config_hint = adjusted_segment_setup.config; + let shm = Self::create_segment( + &state.builder_config, + SegmentId::new(segment_id.value() as u8), + adjusted_segment_setup.payload_size, + )?; + + match state.shared_memory_map.get(state.current_idx) { + Some(segment) => { + if segment.chunk_count.load(Ordering::Relaxed) == 0 { + state.shared_memory_map.remove(state.current_idx); + } + } + None => { + fatal_panic!(from self, + "This should never happen! {msg} {:?} since the current segment id is unavailable.", + layout) + } + } + + state + .shared_memory_map + .insert_at(segment_id, ShmEntry::new(shm)); + state.current_idx = segment_id; + + Ok(()) + } + + fn handle_reallocation( + &self, + e: ShmAllocationError, + state: &InternalState, + layout: Layout, + shm: &Shm, + ) -> Result<(), ResizableShmAllocationError> { + let msg = "Unable to allocate memory"; + if e == ShmAllocationError::AllocationError(AllocationError::OutOfMemory) + || e == ShmAllocationError::ExceedsMaxSupportedAlignment + || e == ShmAllocationError::AllocationError(AllocationError::SizeTooLarge) + { + if state.shared_state.allocation_strategy == AllocationStrategy::Static { + fail!(from self, with e.into(), + "{msg} since there is not enough memory left ({:?}) and the allocation strategy {:?} forbids reallocation.", + e, state.shared_state.allocation_strategy); + } else { + self.create_resized_segment(shm, layout)?; + Ok(()) + } + } else { + fail!(from self, with e.into(), "{msg} due to {:?}.", e); + } + } +} + +impl> ResizableSharedMemory + for DynamicMemory +where + Shm::Builder: Debug, +{ + type ViewBuilder = DynamicViewBuilder; + type MemoryBuilder = DynamicMemoryBuilder; + type View = DynamicView; + + fn max_number_of_reallocations() -> usize { + MAX_NUMBER_OF_REALLOCATIONS + } + + fn number_of_active_segments(&self) -> usize { + self.state().shared_memory_map.len() + } + + fn allocate(&self, layout: Layout) -> Result { + let msg = "Unable to allocate memory"; + let state = self.state_mut(); + + loop { + match state.shared_memory_map.get(state.current_idx) { + Some(entry) => match entry.shm.allocate(layout) { + Ok(mut ptr) => { + entry.register_offset(); + ptr.offset + .set_segment_id(SegmentId::new(state.current_idx.value() as u8)); + return Ok(ptr); + } + Err(e) => self.handle_reallocation(e, state, layout, &entry.shm)?, + }, + None => fatal_panic!(from self, + "This should never happen! {msg} since the current shared memory segment is not available!"), + } + } + } + + unsafe fn deallocate(&self, offset: PointerOffset, layout: Layout) { + let segment_id = SlotMapKey::new(offset.segment_id().value() as usize); + let state = self.state_mut(); + match state.shared_memory_map.get(segment_id) { + Some(entry) => { + entry.shm.deallocate(offset, layout); + if entry.unregister_offset() == ShmEntryState::Empty + && segment_id != state.current_idx + { + state.shared_memory_map.remove(segment_id); + } + } + None => fatal_panic!(from self, + "This should never happen! Unable to deallocate {:?} since the corresponding shared memory segment is not available!", offset), + } + } +} diff --git a/iceoryx2-cal/src/resizable_shared_memory/mod.rs b/iceoryx2-cal/src/resizable_shared_memory/mod.rs new file mode 100644 index 000000000..710ebb8ae --- /dev/null +++ b/iceoryx2-cal/src/resizable_shared_memory/mod.rs @@ -0,0 +1,240 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! A [`ResizableSharedMemory`] is identified by a name and allows multiple processes to share +//! memory between them (inter-process memory). One process owns the [`ResizableSharedMemory`] +//! which can be created via the [`ResizableSharedMemoryBuilder`] and many processes can have a +//! [`ResizableSharedMemoryView`] that can be constructed via [`ResizableSharedMemoryViewBuilder`]. +//! +//! The [`ResizableSharedMemoryView`] never owns the [`ResizableSharedMemory`] and has only +//! read-only access to it while the [`ResizableSharedMemory`] can use +//! [`ResizableSharedMemory::allocate()`] to acquire memory and distribute it between the +//! [`ResizableSharedMemoryView`]s. +//! +//! Whenever the [`ResizableSharedMemoryView`] receives an offset it must be registered via +//! [`ResizableSharedMemoryView::register_and_translate_offset()`] and unregistered via +//! [`ResizableSharedMemoryView::unregister_offset()`]. As soon as the [`ResizableSharedMemory`] +//! calls [`ResizableSharedMemory::deallocate()`] unused [`SharedMemory`] segments may be recycled. +//! +//! # Example +//! +//! ``` +//! // owner of the ResizableSharedMemory +//! use iceoryx2_cal::resizable_shared_memory::*; +//! use iceoryx2_bb_system_types::file_name::FileName; +//! use iceoryx2_cal::shm_allocator::ShmAllocator; +//! use iceoryx2_cal::shared_memory::SharedMemory; +//! use iceoryx2_cal::named_concept::*; +//! use std::alloc::Layout; +//! use std::time::Duration; +//! +//! fn example, Memory: ResizableSharedMemory>( +//! name: &FileName +//! ) { +//! // owner process creates a new memory +//! let memory = Memory::MemoryBuilder::new(name) +//! // hint to the underlying allocator that we need up to 128 chunks of memory +//! // +//! // note: as soon as there are more than 128 chunks requested a new resized segment is +//! // created +//! .max_number_of_chunks_hint(128) +//! // hint to the underlying allocator that the chunks are not exceeding the Layout of +//! // [`u16`] +//! // +//! // note: as soon as there is a chunk requested that exceeds the provided layout hint a +//! // new resized segment is created +//! .max_chunk_layout_hint(Layout::new::()) +//! // defines the strategy how segments are resized +//! .allocation_strategy(AllocationStrategy::PowerOfTwo) +//! .create().unwrap(); +//! +//! let chunk = memory.allocate(Layout::new::()).unwrap(); +//! // store the value 123 in the newly allocated chunk +//! unsafe { (chunk.data_ptr as *mut u16).write(123) }; +//! +//! // since this exceeds the the chunk layout hint, the underlying segment is resized +//! // following the provided allocation strategy +//! let another_chunk = memory.allocate(Layout::new::()).unwrap(); +//! // release allocated chunk +//! unsafe { memory.deallocate(another_chunk.offset, Layout::new::()) }; +//! +//! +//! let view = Memory::ViewBuilder::new(name) +//! // defines how long the builder shall wait to open the corresponding segments when +//! // the creator is concurrently creating them. +//! .timeout(Duration::from_secs(1)) +//! .open().unwrap(); +//! +//! // before we can consume the received offset we need to translate it into our local +//! // process space +//! // this operation also maps unmapped segments into the process space if required. +//! let ptr = unsafe { view.register_and_translate_offset(chunk.offset).unwrap() }; +//! println!("received {}", unsafe {*(ptr as *const u16)}); +//! +//! // when we are finished consuming the memory we need to unregister the offset. this +//! // unmaps segments that are no longer used. +//! unsafe { view.unregister_offset(chunk.offset) }; +//! } +//! ``` + +pub mod dynamic; + +pub use crate::shm_allocator::AllocationStrategy; + +use std::alloc::Layout; +use std::fmt::Debug; +use std::time::Duration; + +use iceoryx2_bb_elementary::enum_gen; + +use crate::named_concept::*; +use crate::shared_memory::{ + SharedMemory, SharedMemoryCreateError, SharedMemoryOpenError, ShmPointer, +}; +use crate::shm_allocator::{PointerOffset, ShmAllocationError, ShmAllocator}; + +enum_gen! { +/// Defines all erros that can occur when calling [`ResizableSharedMemory::allocate()`] +/// +/// The [`ResizableSharedMemory`] cannot be resized indefinitely. If the resize limit is hit +/// this error will be returned. It can be mitigated by providing a better +/// [`ResizableSharedMemoryBuilder::max_number_of_chunks_hint()`] or +/// [`ResizableSharedMemoryBuilder::max_chunk_layout_hint()`]. + ResizableShmAllocationError + entry: + MaxReallocationsReached + mapping: + ShmAllocationError, + SharedMemoryCreateError +} + +/// Creates a [`ResizableSharedMemoryView`] to an existing [`ResizableSharedMemory`] and maps the +/// [`ResizableSharedMemory`] read-only into the process space. +pub trait ResizableSharedMemoryViewBuilder< + Allocator: ShmAllocator, + Shm: SharedMemory, + ResizableShm: ResizableSharedMemory, + ResizableShmView: ResizableSharedMemoryView, +>: NamedConceptBuilder +{ + /// The timeout defines how long the + /// [`SharedMemoryBuilder`](crate::shared_memory::SharedMemoryBuilder) should wait for + /// [`SharedMemoryBuilder::create()`](crate::shared_memory::SharedMemoryBuilder::create()) to finialize + /// the initialization. This is required when the [`SharedMemory`] is created and initialized + /// concurrently from another process. By default it is set to [`Duration::ZERO`] for no + /// timeout. + fn timeout(self, value: Duration) -> Self; + + /// Opens already existing [`SharedMemory`]. If it does not exist or the initialization is not + /// yet finished the method will fail. + fn open(self) -> Result; +} + +/// Creates a new [`ResizableSharedMemory`] which the process will own. As soon as the +/// corresponding object goes out-of-scope the underlying [`SharedMemory`] resources will be +/// removed. +pub trait ResizableSharedMemoryBuilder< + Allocator: ShmAllocator, + Shm: SharedMemory, + ResizableShm: ResizableSharedMemory, +>: NamedConceptBuilder +{ + /// Provides an initial hint to the underlying [`ShmAllocator`] on how large the largest chunk + /// will be. If the chunk exceeds the hinted [`Layout`] a new [`SharedMemory`] segment is + /// acquired to satisfy the memory needs. + fn max_chunk_layout_hint(self, value: Layout) -> Self; + + /// Provides an initial hint to the underlying [`ShmAllocator`] on how many chunks at most will + /// be used in parallel. If the number of chunk exceeds the hint a new [`SharedMemory`] segment + /// is acquired to satisfy the memory needs. + fn max_number_of_chunks_hint(self, value: usize) -> Self; + + /// Defines the [`AllocationStrategy`] that is pursued when a new [`SharedMemory`] segment is + /// acquired. + fn allocation_strategy(self, value: AllocationStrategy) -> Self; + + /// Creates new [`SharedMemory`]. If it already exists the method will fail. + fn create(self) -> Result; +} + +/// A read-only view to a [`ResizableSharedMemory`]. Can be created by arbitrary many processes. +pub trait ResizableSharedMemoryView> { + /// Registers a received [`PointerOffset`] at the [`ResizableSharedMemoryView`] and returns the + /// absolut pointer to the data. If the segment of the received [`PointerOffset`] was not yet + /// mapped into the processes space, it will be opened and mapped. If this fails a + /// [`SharedMemoryOpenError`] is returned. + /// + /// # Safety + /// + /// * This function shall be called exactly once for a received [`PointerOffset`] + unsafe fn register_and_translate_offset( + &self, + offset: PointerOffset, + ) -> Result<*const u8, SharedMemoryOpenError>; + + /// Unregisters a received [`PointerOffset`] that was previously registered. + /// + /// # Safety + /// + /// * [`ResizableSharedMemoryView::register_and_translate_offset()`] must have been called + /// with the same [`PointerOffset`] before calling this function. + /// * This function must be called before a registered [`PointerOffset`] goes out-of-scope. + /// * This function must be called at most once for any received [`PointerOffset`] + unsafe fn unregister_offset(&self, offset: PointerOffset); + + /// Returns the number of active [`SharedMemory`] segments. + fn number_of_active_segments(&self) -> usize; +} + +/// The [`ResizableSharedMemory`] can be only owned by exactly one process that is allowed to +/// [`ResizableSharedMemory::allocate()`] memory and distribute the memory to all +/// [`ResizableSharedMemoryView`]s. +pub trait ResizableSharedMemory>: + Sized + NamedConcept + NamedConceptMgmt + Debug +{ + /// Type alias to the [`ResizableSharedMemoryViewBuilder`] to open a + /// [`ResizableSharedMemoryView`] to an existing [`ResizableSharedMemory`]. + type ViewBuilder: ResizableSharedMemoryViewBuilder; + + /// Type alias to the [`ResizableSharedMemoryBuilder`] to create a new [`ResizableSharedMemory`]. + type MemoryBuilder: ResizableSharedMemoryBuilder; + + /// Type alias to the [`ResizableSharedMemoryView`] to open an existing + /// [`ResizableSharedMemory`] as read-only. + type View: ResizableSharedMemoryView; + + /// Returns how many reallocations the [`ResizableSharedMemory`] supports. If the number is + /// exceeded any call to [`ResizableSharedMemory::allocate()`] that requires a resize of the + /// underlying [`SharedMemory`] segments will fail. + fn max_number_of_reallocations() -> usize; + + /// Returns the number of active [`SharedMemory`] segments. + fn number_of_active_segments(&self) -> usize; + + /// Allocates a new piece of [`SharedMemory`] if the provided [`Layout`] exceeds the current + /// supported [`Layout`], the memory would be out-of-memory or the number of chunks exceeds the + /// current supported amount of chunks, a new [`SharedMemory`] segment will be created. If this + /// fails an [`SharedMemoryCreateError`] will be returned. + fn allocate( + &self, + layout: std::alloc::Layout, + ) -> Result; + + /// Release previously allocated memory + /// + /// # Safety + /// + /// * the offset must be acquired with [`SharedMemory::allocate()`] - extracted from the + /// [`ShmPointer`] + /// * the layout must be identical to the one used in [`SharedMemory::allocate()`] + unsafe fn deallocate(&self, offset: PointerOffset, layout: std::alloc::Layout); +} diff --git a/iceoryx2-cal/src/shared_memory/common.rs b/iceoryx2-cal/src/shared_memory/common.rs index 38545aff8..959bbde5f 100644 --- a/iceoryx2-cal/src/shared_memory/common.rs +++ b/iceoryx2-cal/src/shared_memory/common.rs @@ -383,6 +383,15 @@ pub mod details { } } + impl>> + crate::shared_memory::details::SharedMemoryLowLevelAPI + for Memory + { + fn allocator(&self) -> &Allocator { + unsafe { self.storage.get().allocator.assume_init_ref() } + } + } + impl>> crate::shared_memory::SharedMemory for Memory { @@ -418,7 +427,7 @@ pub mod details { Ok(ShmPointer { offset, - data_ptr: (offset.value() + self.payload_start_address) as *mut u8, + data_ptr: (offset.offset() + self.payload_start_address) as *mut u8, }) } diff --git a/iceoryx2-cal/src/shared_memory/mod.rs b/iceoryx2-cal/src/shared_memory/mod.rs index 80b0441b4..24edf1a32 100644 --- a/iceoryx2-cal/src/shared_memory/mod.rs +++ b/iceoryx2-cal/src/shared_memory/mod.rs @@ -95,6 +95,15 @@ pub struct ShmPointer { pub data_ptr: *mut u8, } +#[doc(hidden)] +pub(crate) mod details { + use super::*; + + pub trait SharedMemoryLowLevelAPI { + fn allocator(&self) -> &Allocator; + } +} + /// Creates [`SharedMemory`]. pub trait SharedMemoryBuilder>: NamedConceptBuilder @@ -126,7 +135,7 @@ pub trait SharedMemoryBuilder: - Sized + Debug + NamedConcept + NamedConceptMgmt + Sized + Debug + NamedConcept + NamedConceptMgmt + details::SharedMemoryLowLevelAPI { type Builder: SharedMemoryBuilder; diff --git a/iceoryx2-cal/src/shared_memory_directory/file.rs b/iceoryx2-cal/src/shared_memory_directory/file.rs index 4de009872..e598e058f 100644 --- a/iceoryx2-cal/src/shared_memory_directory/file.rs +++ b/iceoryx2-cal/src/shared_memory_directory/file.rs @@ -98,7 +98,7 @@ impl<'a> FileCreator<'a> { ) -> Result, SharedMemoryDirectoryCreateFileError> { let id = fail!(from self, when self.set.insert( name, - self.memory.offset.value(), + self.memory.offset.offset(), self.layout.size(), self.is_persistent, ), diff --git a/iceoryx2-cal/src/shm_allocator/bump_allocator.rs b/iceoryx2-cal/src/shm_allocator/bump_allocator.rs index a8c5d7813..47952d9db 100644 --- a/iceoryx2-cal/src/shm_allocator/bump_allocator.rs +++ b/iceoryx2-cal/src/shm_allocator/bump_allocator.rs @@ -13,7 +13,7 @@ use crate::shm_allocator::*; use iceoryx2_bb_log::fail; -#[derive(Default, Clone, Copy)] +#[derive(Default, Clone, Copy, Debug)] pub struct Config {} impl ShmAllocatorConfig for Config {} @@ -25,9 +25,52 @@ pub struct BumpAllocator { max_supported_alignment_by_memory: usize, } +impl BumpAllocator { + pub fn total_space(&self) -> usize { + self.allocator.total_space() + } +} + impl ShmAllocator for BumpAllocator { type Configuration = Config; + fn resize_hint( + &self, + layout: Layout, + strategy: AllocationStrategy, + ) -> SharedMemorySetupHint { + let current_payload_size = self.allocator.total_space(); + if layout.size() < self.allocator.free_space() { + return SharedMemorySetupHint { + payload_size: current_payload_size, + config: Self::Configuration::default(), + }; + } + + let payload_size = match strategy { + AllocationStrategy::BestFit => current_payload_size + layout.size(), + AllocationStrategy::PowerOfTwo => { + (current_payload_size + layout.size()).next_power_of_two() + } + AllocationStrategy::Static => current_payload_size, + }; + + SharedMemorySetupHint { + payload_size, + config: Self::Configuration::default(), + } + } + + fn initial_setup_hint( + max_chunk_layout: Layout, + max_number_of_chunks: usize, + ) -> SharedMemorySetupHint { + SharedMemorySetupHint { + config: Self::Configuration::default(), + payload_size: max_chunk_layout.size() * max_number_of_chunks, + } + } + fn management_size(_memory_size: usize, _config: &Self::Configuration) -> usize { 0 } @@ -90,7 +133,7 @@ impl ShmAllocator for BumpAllocator { unsafe fn deallocate(&self, offset: PointerOffset, layout: Layout) { self.allocator.deallocate( - NonNull::new_unchecked((offset.0 + self.base_address) as *mut u8), + NonNull::new_unchecked((offset.offset() + self.base_address) as *mut u8), layout, ); } diff --git a/iceoryx2-cal/src/shm_allocator/mod.rs b/iceoryx2-cal/src/shm_allocator/mod.rs index fa803a5af..ed5905c66 100644 --- a/iceoryx2-cal/src/shm_allocator/mod.rs +++ b/iceoryx2-cal/src/shm_allocator/mod.rs @@ -11,46 +11,82 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT pub mod bump_allocator; +pub mod pointer_offset; pub mod pool_allocator; -use std::{alloc::Layout, ptr::NonNull}; +use std::{alloc::Layout, fmt::Debug, ptr::NonNull}; pub use iceoryx2_bb_elementary::allocator::AllocationError; use iceoryx2_bb_elementary::{allocator::BaseAllocator, enum_gen}; +pub use pointer_offset::*; -pub trait ShmAllocatorConfig: Copy + Default {} +/// Trait that identifies a configuration of a [`ShmAllocator`]. +pub trait ShmAllocatorConfig: Copy + Default + Debug {} -#[derive(Clone, Copy, Eq, PartialEq, Debug)] -pub struct PointerOffset(usize); - -impl PointerOffset { - pub fn new(value: usize) -> PointerOffset { - Self(value) - } - - pub fn value(&self) -> usize { - self.0 - } -} - -enum_gen! { ShmAllocationError +enum_gen! { +/// Describes the errors that can occur when [`ShmAllocator::allocate()`] is called. + ShmAllocationError entry: ExceedsMaxSupportedAlignment mapping: AllocationError } +/// Describes generically an [`AllocationStrategy`], meaning how the memory is increased when the +/// available memory is insufficient. +#[derive(Clone, Copy, Eq, PartialEq, Debug, Default)] +pub enum AllocationStrategy { + /// Increases the memory so that it perfectly fits the new size requirements. This may lead + /// to a lot of reallocations but has the benefit that no byte is wasted. + BestFit, + /// Increases the memory by rounding the increased memory size up to the next power of two. + /// Reduces reallocations a lot at the cost of increased memory usage. + PowerOfTwo, + /// The memory is not increased. This may lead to an out-of-memory error when allocating. + #[default] + Static, +} + +/// Describes error that may occur when a [`ShmAllocator`] is initialized. #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum ShmAllocatorInitError { + /// The [`SharedMemory`](crate::shared_memory::SharedMemory) max supported alignment does not + /// satisfy the required alignment of the [`ShmAllocator`]. MaxSupportedMemoryAlignmentInsufficient, + /// The [`ShmAllocator`] requires more memory to initialize than available. AllocationFailed, } +/// Returned by [`ShmAllocator::resize_hint()`] and [`ShmAllocator::initial_setup_hint()`]. +/// It contains a payload size and [`ShmAllocator`] configuration suggestion for the given +/// parameters. +pub struct SharedMemorySetupHint { + /// The payload size of the [`SharedMemory`](crate::shared_memory::SharedMemory) + pub payload_size: usize, + /// The [`ShmAllocatorConfig`] that shall be used for the + /// [`SharedMemory`](crate::shared_memory::SharedMemory) + pub config: Config, +} + /// Every allocator implementation must be relocatable. The allocator itself must be stored either /// in the same shared memory segment or in a separate shared memory segment of a different type /// but accessible by all participating processes. -pub trait ShmAllocator: Send + Sync + 'static { +pub trait ShmAllocator: Debug + Send + Sync + 'static { type Configuration: ShmAllocatorConfig; + /// Suggest a new payload size by considering the current allocation state in combination with + /// a provided [`AllocationStrategy`] and a `layout` that shall be allocatable. + fn resize_hint( + &self, + layout: Layout, + strategy: AllocationStrategy, + ) -> SharedMemorySetupHint; + + /// Suggest a managed payload size under a provided configuration assuming that at most + /// `max_number_of_chunks` of memory are in use in parallel. + fn initial_setup_hint( + max_chunk_layout: Layout, + max_number_of_chunks: usize, + ) -> SharedMemorySetupHint; /// Returns the required memory size of the additional dynamic part of the allocator that is /// allocated in [`ShmAllocator::init()`]. diff --git a/iceoryx2-cal/src/shm_allocator/pointer_offset.rs b/iceoryx2-cal/src/shm_allocator/pointer_offset.rs new file mode 100644 index 000000000..da8eb1b10 --- /dev/null +++ b/iceoryx2-cal/src/shm_allocator/pointer_offset.rs @@ -0,0 +1,81 @@ +// Copyright (c) 2023 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::fmt::Debug; + +pub type SegmentIdUnderlyingType = u8; + +/// Defines the [`SegmentId`] of a [`SharedMemory`](crate::shared_memory::SharedMemory) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct SegmentId(SegmentIdUnderlyingType); + +impl SegmentId { + /// Creates a new [`SegmentId`] from a given value. + pub const fn new(value: SegmentIdUnderlyingType) -> Self { + Self(value) + } + + /// Returns the underlying value of the [`SegmentId`] + pub const fn value(&self) -> SegmentIdUnderlyingType { + self.0 + } + + /// Returns the maximum value the [`SegmentId`] supports. + pub const fn max_segment_id() -> SegmentIdUnderlyingType { + SegmentIdUnderlyingType::MAX + } +} + +/// An offset to a [`SharedMemory`](crate::shared_memory::SharedMemory) address. It requires the +/// +/// [`SharedMemory::payload_start_address()`](crate::shared_memory::SharedMemory::payload_start_address()) +/// of the corresponding [`SharedMemory`](crate::shared_memory::SharedMemory) to be converted into +/// an actual pointer. +/// +/// Contains the offset and the corresponding [`SegmentId`]. +#[derive(Clone, Copy, Eq, PartialEq)] +pub struct PointerOffset(u64); + +impl PointerOffset { + /// Creates a new [`PointerOffset`] from the given offset value with the [`SegmentId`] == 0. + pub const fn new(offset: usize) -> PointerOffset { + const SEGMENT_ID: u64 = 0; + Self((offset as u64) << (SegmentIdUnderlyingType::BITS) | SEGMENT_ID) + } + + /// Sets the [`SegmentId`] of the [`PointerOffset`]. + pub fn set_segment_id(&mut self, value: SegmentId) { + self.0 &= !((1u64 << SegmentIdUnderlyingType::BITS) - 1); + self.0 |= value.0 as u64; + } + + /// Returns the offset. + pub const fn offset(&self) -> usize { + (self.0 >> (SegmentIdUnderlyingType::BITS)) as usize + } + + /// Returns the [`SegmentId`]. + pub const fn segment_id(&self) -> SegmentId { + SegmentId((self.0 & ((1u64 << SegmentIdUnderlyingType::BITS) - 1)) as u8) + } +} + +impl Debug for PointerOffset { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PointerOffset {{ offset: {}, segment_id: {:?} }}", + self.offset(), + self.segment_id() + ) + } +} diff --git a/iceoryx2-cal/src/shm_allocator/pool_allocator.rs b/iceoryx2-cal/src/shm_allocator/pool_allocator.rs index 96f42abb0..0f5b679b6 100644 --- a/iceoryx2-cal/src/shm_allocator/pool_allocator.rs +++ b/iceoryx2-cal/src/shm_allocator/pool_allocator.rs @@ -10,15 +10,19 @@ // // SPDX-License-Identifier: Apache-2.0 OR MIT -use std::{alloc::Layout, ptr::NonNull}; +use std::{alloc::Layout, ptr::NonNull, sync::atomic::Ordering}; use crate::shm_allocator::{ShmAllocator, ShmAllocatorConfig}; use iceoryx2_bb_elementary::allocator::BaseAllocator; use iceoryx2_bb_log::fail; +use iceoryx2_pal_concurrency_sync::iox_atomic::IoxAtomicUsize; -use super::{PointerOffset, ShmAllocationError, ShmAllocatorInitError}; +use super::{ + AllocationStrategy, PointerOffset, SharedMemorySetupHint, ShmAllocationError, + ShmAllocatorInitError, +}; -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct Config { pub bucket_layout: Layout, } @@ -41,6 +45,7 @@ pub struct PoolAllocator { // the allocator only manages a range of numbers base_address: usize, max_supported_alignment_by_memory: usize, + number_of_used_buckets: IoxAtomicUsize, } impl PoolAllocator { @@ -56,6 +61,76 @@ impl PoolAllocator { impl ShmAllocator for PoolAllocator { type Configuration = Config; + fn resize_hint( + &self, + layout: Layout, + strategy: AllocationStrategy, + ) -> SharedMemorySetupHint { + let current_layout = unsafe { + Layout::from_size_align_unchecked( + self.allocator.bucket_size(), + self.allocator.max_alignment(), + ) + }; + + let adjusted_number_of_buckets = if self.number_of_used_buckets.load(Ordering::Relaxed) + == self.number_of_buckets() as usize + { + match strategy { + AllocationStrategy::BestFit => self.allocator.number_of_buckets() + 1, + AllocationStrategy::PowerOfTwo => { + (self.allocator.number_of_buckets() + 1).next_power_of_two() + } + AllocationStrategy::Static => self.allocator.number_of_buckets(), + } + } else { + self.number_of_buckets() + }; + + let adjusted_layout = + if current_layout.size() < layout.size() || current_layout.align() < layout.align() { + match strategy { + AllocationStrategy::Static => current_layout, + AllocationStrategy::BestFit => unsafe { + let align = layout.align().max(current_layout.align()); + let size = layout + .size() + .max(current_layout.size()) + .next_multiple_of(align); + Layout::from_size_align_unchecked(size, align) + }, + AllocationStrategy::PowerOfTwo => unsafe { + let align = layout + .align() + .max(current_layout.align()) + .next_power_of_two(); + let size = layout + .size() + .max(current_layout.size()) + .next_power_of_two() + .next_multiple_of(align); + Layout::from_size_align_unchecked(size, align) + }, + } + } else { + current_layout + }; + + Self::initial_setup_hint(adjusted_layout, adjusted_number_of_buckets as usize) + } + + fn initial_setup_hint( + max_chunk_layout: Layout, + max_number_of_chunks: usize, + ) -> SharedMemorySetupHint { + SharedMemorySetupHint { + payload_size: max_chunk_layout.size() * max_number_of_chunks, + config: Self::Configuration { + bucket_layout: max_chunk_layout, + }, + } + } + fn management_size(memory_size: usize, config: &Self::Configuration) -> usize { iceoryx2_bb_memory::pool_allocator::PoolAllocator::memory_size( config.bucket_layout, @@ -80,6 +155,7 @@ impl ShmAllocator for PoolAllocator { ), base_address: (managed_memory.as_ptr() as *mut u8) as usize, max_supported_alignment_by_memory, + number_of_used_buckets: IoxAtomicUsize::new(0), } } @@ -117,14 +193,16 @@ impl ShmAllocator for PoolAllocator { } let chunk = fail!(from self, when self.allocator.allocate(layout), "{}.", msg); + self.number_of_used_buckets.fetch_add(1, Ordering::Relaxed); Ok(PointerOffset::new( (chunk.as_ptr() as *const u8) as usize - self.allocator.start_address(), )) } unsafe fn deallocate(&self, offset: PointerOffset, layout: Layout) { + self.number_of_used_buckets.fetch_sub(1, Ordering::Relaxed); self.allocator.deallocate( - NonNull::new_unchecked((offset.value() + self.allocator.start_address()) as *mut u8), + NonNull::new_unchecked((offset.offset() + self.allocator.start_address()) as *mut u8), layout, ); } diff --git a/iceoryx2-cal/src/zero_copy_connection/common.rs b/iceoryx2-cal/src/zero_copy_connection/common.rs index 1a48a6ed1..d9a151b00 100644 --- a/iceoryx2-cal/src/zero_copy_connection/common.rs +++ b/iceoryx2-cal/src/zero_copy_connection/common.rs @@ -499,13 +499,13 @@ pub mod details { .storage .get() .used_chunk_list - .insert(ptr.value() / self.storage.get().sample_size) + .insert(ptr.offset() / self.storage.get().sample_size) { fail!(from self, with ZeroCopySendError::UsedChunkListFull, "{} since the used chunk list is full.", msg); } - match unsafe { self.storage.get().submission_channel.push(ptr.value()) } { + match unsafe { self.storage.get().submission_channel.push(ptr.offset()) } { Some(v) => { if !self .storage @@ -636,7 +636,7 @@ pub mod details { } fn release(&self, ptr: PointerOffset) -> Result<(), ZeroCopyReleaseError> { - match unsafe { self.storage.get().completion_channel.push(ptr.value()) } { + match unsafe { self.storage.get().completion_channel.push(ptr.offset()) } { true => { *self.borrow_counter() -= 1; Ok(()) diff --git a/iceoryx2-cal/tests/pointer_offset_tests.rs b/iceoryx2-cal/tests/pointer_offset_tests.rs new file mode 100644 index 000000000..3781e5071 --- /dev/null +++ b/iceoryx2-cal/tests/pointer_offset_tests.rs @@ -0,0 +1,50 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +mod pointer_offset { + use iceoryx2_bb_testing::assert_that; + use iceoryx2_cal::shm_allocator::{PointerOffset, SegmentId}; + + #[test] + fn new_works() { + const TEST_OFFSET: usize = 123914; + let sut = PointerOffset::new(TEST_OFFSET); + + assert_that!(sut.offset(), eq TEST_OFFSET); + assert_that!(sut.segment_id(), eq SegmentId::new(0)); + } + + #[test] + fn set_segment_id_works() { + const TEST_OFFSET: usize = 123914; + const SEGMENT_ID: SegmentId = SegmentId::new(7); + let mut sut = PointerOffset::new(TEST_OFFSET); + sut.set_segment_id(SEGMENT_ID); + + assert_that!(sut.offset(), eq TEST_OFFSET); + assert_that!(sut.segment_id(), eq SEGMENT_ID); + } + + #[test] + fn set_segment_id_multiple_times_works() { + const TEST_OFFSET: usize = 123914; + const SEGMENT_ID_1: SegmentId = SegmentId::new(7); + const SEGMENT_ID_2: SegmentId = SegmentId::new(8); + let mut sut = PointerOffset::new(TEST_OFFSET); + + sut.set_segment_id(SEGMENT_ID_1); + assert_that!(sut.segment_id(), eq SEGMENT_ID_1); + + sut.set_segment_id(SEGMENT_ID_2); + assert_that!(sut.segment_id(), eq SEGMENT_ID_2); + } +} diff --git a/iceoryx2-cal/tests/resizable_shared_memory_tests.rs b/iceoryx2-cal/tests/resizable_shared_memory_tests.rs new file mode 100644 index 000000000..0a1c3ce37 --- /dev/null +++ b/iceoryx2-cal/tests/resizable_shared_memory_tests.rs @@ -0,0 +1,954 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +#[generic_tests::define] +mod resizable_shared_memory { + use std::alloc::Layout; + + use iceoryx2_bb_testing::assert_that; + use iceoryx2_cal::named_concept::*; + use iceoryx2_cal::resizable_shared_memory::{self, *}; + use iceoryx2_cal::shm_allocator::{AllocationError, AllocationStrategy, ShmAllocationError}; + use iceoryx2_cal::testing::*; + use iceoryx2_cal::{shared_memory::SharedMemory, shm_allocator::pool_allocator::PoolAllocator}; + + type DefaultAllocator = PoolAllocator; + + #[test] + fn create_and_open_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let test_value_1 = 189273771; + let test_value_2 = 90912638975; + let ptr_creator = sut_creator.allocate(Layout::new::()).unwrap(); + + unsafe { (ptr_creator.data_ptr as *mut u64).write(test_value_1) }; + + let ptr_view = unsafe { + sut_viewer + .register_and_translate_offset(ptr_creator.offset) + .unwrap() as *const u64 + }; + + assert_that!(unsafe{ *ptr_view }, eq test_value_1); + unsafe { (ptr_creator.data_ptr as *mut u64).write(test_value_2) }; + assert_that!(unsafe{ *ptr_view }, eq test_value_2); + } + + #[test] + fn allocate_more_layout_than_hinted_when_no_other_chunks_are_in_use_releases_smaller_segment< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(128) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 1); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 2); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 3); + } + + #[test] + fn allocate_more_layout_than_hinted_when_other_chunks_are_in_use_does_not_releases_smaller_segment< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(128) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 1); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 2); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 3); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 4); + } + + #[test] + fn allocate_more_than_hinted_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let ptr_creator_1 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 1); + let ptr_creator_2 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 2); + + let test_value_1 = 109875896345234897; + let test_value_2 = 412384034975234569; + + unsafe { (ptr_creator_1.data_ptr as *mut u64).write(test_value_1) }; + unsafe { (ptr_creator_2.data_ptr as *mut u64).write(test_value_2) }; + + let ptr_view_1 = unsafe { + sut_viewer + .register_and_translate_offset(ptr_creator_1.offset) + .unwrap() as *const u64 + }; + let ptr_view_2 = unsafe { + sut_viewer + .register_and_translate_offset(ptr_creator_2.offset) + .unwrap() as *const u64 + }; + + assert_that!(unsafe{ *ptr_view_1 }, eq test_value_1); + assert_that!(unsafe{ *ptr_view_2 }, eq test_value_2); + } + + #[test] + fn deallocate_removes_unused_segments_on_creator_side< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + + let ptr_creator_1 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 1); + + let _ptr_creator_2 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 2); + + unsafe { sut_creator.deallocate(ptr_creator_1.offset, Layout::new::()) }; + assert_that!(sut_creator.number_of_active_segments(), eq 1); + } + + #[test] + fn unregister_removes_unused_segments_on_viewer_side< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::PowerOfTwo) + .create() + .unwrap(); + + let ptr_creator_1 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 1); + + let _ptr_creator_2 = sut_creator.allocate(Layout::new::()).unwrap(); + assert_that!(sut_creator.number_of_active_segments(), eq 2); + + unsafe { sut_creator.deallocate(ptr_creator_1.offset, Layout::new::()) }; + assert_that!(sut_creator.number_of_active_segments(), eq 1); + } + + fn allocate_more_than_hinted_with_increasing_chunk_size_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >( + strategy: AllocationStrategy, + ) { + const NUMBER_OF_ITERATIONS: usize = 128; + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .allocation_strategy(strategy) + .create() + .unwrap(); + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let mut ptrs = vec![]; + for i in 0..NUMBER_OF_ITERATIONS { + let size = 2 + i; + let layout = unsafe { Layout::from_size_align_unchecked(size, 1) }; + let ptr = sut_creator.allocate(layout).unwrap(); + + for n in 0..size { + unsafe { ptr.data_ptr.add(n).write(i as u8) }; + } + ptrs.push(ptr); + } + + for i in 0..NUMBER_OF_ITERATIONS { + let size = 2 + i; + let ptr_view = unsafe { + sut_viewer + .register_and_translate_offset(ptrs[i].offset) + .unwrap() + }; + + for n in 0..size { + assert_that!(unsafe{ *ptr_view.add(n) }, eq i as u8); + } + } + } + + #[test] + fn allocate_more_than_hinted_with_increasing_chunk_size_and_best_fit_strategy_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_more_than_hinted_with_increasing_chunk_size_works::( + AllocationStrategy::BestFit, + ); + } + + #[test] + fn allocate_more_than_hinted_with_increasing_chunk_size_and_power_of_two_strategy_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_more_than_hinted_with_increasing_chunk_size_works::( + AllocationStrategy::PowerOfTwo, + ); + } + + fn allocate_with_sufficient_chunk_hint_and_increasing_size< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >( + strategy: AllocationStrategy, + ) { + const NUMBER_OF_REALLOCATIONS: usize = 128; + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(NUMBER_OF_REALLOCATIONS) + .allocation_strategy(strategy) + .create() + .unwrap(); + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let mut ptrs = vec![]; + for i in 0..NUMBER_OF_REALLOCATIONS { + let size = 2 + i; + let layout = unsafe { Layout::from_size_align_unchecked(size, 1) }; + let ptr = sut_creator.allocate(layout).unwrap(); + + for n in 0..size { + unsafe { ptr.data_ptr.add(n).write(2 * i as u8) }; + } + ptrs.push(ptr); + } + + for i in 0..NUMBER_OF_REALLOCATIONS { + let size = 2 + i; + let ptr_view = unsafe { + sut_viewer + .register_and_translate_offset(ptrs[i].offset) + .unwrap() + }; + + for n in 0..size { + assert_that!(unsafe{ *ptr_view.add(n) }, eq 2*i as u8); + } + } + } + + #[test] + fn allocate_with_sufficient_chunk_hint_and_increasing_size_strategy_power_of_two< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_with_sufficient_chunk_hint_and_increasing_size::( + AllocationStrategy::PowerOfTwo, + ) + } + + #[test] + fn allocate_with_sufficient_chunk_hint_and_increasing_size_strategy_best_fit< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_with_sufficient_chunk_hint_and_increasing_size::( + AllocationStrategy::BestFit, + ) + } + + fn allocate_with_sufficient_chunk_hint_and_increasing_alignment< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >( + strategy: AllocationStrategy, + ) { + const NUMBER_OF_REALLOCATIONS: usize = 6; + let storage_name = generate_name(); + let config = generate_isolated_config::(); + let size = 1024; + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::from_size_align(size, 1).unwrap()) + .max_number_of_chunks_hint(NUMBER_OF_REALLOCATIONS) + .allocation_strategy(strategy) + .create() + .unwrap(); + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let mut ptrs = vec![]; + for i in 0..NUMBER_OF_REALLOCATIONS { + let layout = unsafe { + Layout::from_size_align_unchecked(size, 2_i32.pow(i as u32 + 1) as usize) + }; + let ptr = sut_creator.allocate(layout).unwrap(); + + for n in 0..size { + unsafe { ptr.data_ptr.add(n).write(2 * i as u8) }; + } + ptrs.push(ptr); + } + + for i in 0..NUMBER_OF_REALLOCATIONS { + let size = 2 + i; + let ptr_view = unsafe { + sut_viewer + .register_and_translate_offset(ptrs[i].offset) + .unwrap() + }; + + for n in 0..size { + assert_that!(unsafe{ *ptr_view.add(n) }, eq 2*i as u8); + } + } + } + + #[test] + fn allocate_with_sufficient_chunk_hint_and_increasing_alignment_strategy_power_of_two< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_with_sufficient_chunk_hint_and_increasing_alignment::( + AllocationStrategy::PowerOfTwo, + ) + } + + #[test] + fn allocate_with_sufficient_chunk_hint_and_increasing_alignment_strategy_best_fit< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + allocate_with_sufficient_chunk_hint_and_increasing_alignment::( + AllocationStrategy::BestFit, + ) + } + + #[test] + fn deallocate_last_segment_does_not_release_it< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .allocation_strategy(AllocationStrategy::Static) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .create() + .unwrap(); + + assert_that!(sut.allocate(Layout::new::()), is_ok); + + let result = sut.allocate(Layout::new::()); + assert_that!(result, is_err); + assert_that!(result.err().unwrap(), eq ResizableShmAllocationError::ShmAllocationError(ShmAllocationError::AllocationError(AllocationError::OutOfMemory))); + } + + #[test] + fn static_allocation_strategy_does_not_resize_available_chunks< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .allocation_strategy(AllocationStrategy::Static) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(8) + .create() + .unwrap(); + + let result = sut.allocate(Layout::from_size_align(8, 1).unwrap()); + assert_that!(result, is_err); + } + + #[test] + fn static_allocation_strategy_does_not_increase_available_chunks< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let storage_name = generate_name(); + let config = generate_isolated_config::(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .allocation_strategy(AllocationStrategy::Static) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .create() + .unwrap(); + + let result = sut.allocate(Layout::new::()); + assert_that!(result, is_ok); + let result = sut.allocate(Layout::new::()); + assert_that!(result, is_err); + } + + #[test] + fn list_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 28; + let config = generate_isolated_config::(); + + let mut suts = vec![]; + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .create() + .unwrap(); + names.push(storage_name); + suts.push(sut); + } + + let list_suts = Sut::list_cfg(&config).unwrap(); + assert_that!(list_suts, len names.len()); + for name in names { + assert_that!(list_suts, contains name); + } + } + + #[test] + fn list_works_when_the_start_segment_is_no_longer_used< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 33; + let config = generate_isolated_config::(); + + let mut suts = vec![]; + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + // this allocates a new segment and release the original one + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 1); + + // this allocates a new segment + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 2); + + names.push(storage_name); + suts.push(sut); + } + + let list_suts = Sut::list_cfg(&config).unwrap(); + assert_that!(list_suts, len names.len()); + for name in names { + assert_that!(list_suts, contains name); + } + } + + #[test] + fn does_exist_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 1; + let config = generate_isolated_config::(); + + let mut suts = vec![]; + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .create() + .unwrap(); + names.push(storage_name); + suts.push(sut); + } + + for name in names { + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(true)); + } + } + + #[test] + fn does_exist_works_when_the_start_segment_is_no_longer_used< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 25; + let config = generate_isolated_config::(); + + let mut suts = vec![]; + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + // this allocates a new segment and release the original one + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 1); + + // this allocates a new segment + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 2); + + names.push(storage_name); + suts.push(sut); + } + + for name in names { + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(true)); + } + } + + #[test] + fn remove_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 26; + let config = generate_isolated_config::(); + + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + assert_that!(unsafe { Sut::remove_cfg(&storage_name, &config) }, eq Ok(false)); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .create() + .unwrap(); + core::mem::forget(sut); + names.push(storage_name); + } + + for name in names { + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(true)); + assert_that!(unsafe { Sut::remove_cfg(&name, &config) }, eq Ok(true)); + assert_that!(unsafe { Sut::remove_cfg(&name, &config) }, eq Ok(false)); + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(false)); + } + } + + #[test] + fn remove_with_multiple_segments_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const NUMBER_OF_STORAGES: usize = 26; + let config = generate_isolated_config::(); + + let mut names = vec![]; + + for _ in 0..NUMBER_OF_STORAGES { + let storage_name = generate_name(); + assert_that!(unsafe { Sut::remove_cfg(&storage_name, &config) }, eq Ok(false)); + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(123) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + sut.allocate(Layout::new::()).unwrap(); + sut.allocate(Layout::new::()).unwrap(); + sut.allocate(Layout::new::()).unwrap(); + sut.allocate(Layout::new::()).unwrap(); + assert_that!(sut.number_of_active_segments(), eq 4); + + core::mem::forget(sut); + names.push(storage_name); + } + + for name in names { + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(true)); + assert_that!(unsafe { Sut::remove_cfg(&name, &config) }, eq Ok(true)); + assert_that!(unsafe { Sut::remove_cfg(&name, &config) }, eq Ok(false)); + assert_that!(Sut::does_exist_cfg(&name, &config), eq Ok(false)); + } + } + + #[test] + fn open_when_zero_segment_not_available_works< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + const TEST_VALUE: u32 = 89123523; + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(123) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + let chunk = sut_creator.allocate(Layout::new::()).unwrap(); + unsafe { sut_creator.deallocate(chunk.offset, Layout::new::()) }; + let chunk = sut_creator.allocate(Layout::new::()).unwrap(); + sut_creator.allocate(Layout::new::()).unwrap(); + unsafe { (chunk.data_ptr as *mut u32).write(TEST_VALUE) }; + assert_that!(sut_creator.number_of_active_segments(), eq 2); + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let translated_chunk = unsafe { + sut_viewer + .register_and_translate_offset(chunk.offset) + .unwrap() + }; + assert_that!(unsafe { *(translated_chunk as *const u32) }, eq TEST_VALUE); + } + + #[test] + fn creator_releases_resizable_shared_memory_when_it_goes_out_of_scope< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(123) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + assert_that!(Sut::does_exist_cfg(&storage_name, &config), eq Ok(true)); + drop(sut_creator); + assert_that!(Sut::does_exist_cfg(&storage_name, &config), eq Ok(false)); + } + + #[test] + fn view_does_not_releases_resizable_shared_memory_when_it_goes_out_of_scope< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(123) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + let sut_view = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + assert_that!(Sut::does_exist_cfg(&storage_name, &config), eq Ok(true)); + drop(sut_view); + assert_that!(Sut::does_exist_cfg(&storage_name, &config), eq Ok(true)); + drop(sut_creator); + assert_that!(Sut::does_exist_cfg(&storage_name, &config), eq Ok(false)); + } + + #[test] + fn when_max_number_of_reallocations_is_exceeded_another_allocation_fails< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut_creator = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + for n in 0..Sut::max_number_of_reallocations() { + assert_that!( + sut_creator.allocate(Layout::from_size_align(n + 1, 1).unwrap()), + is_ok + ); + assert_that!(sut_creator.number_of_active_segments(), eq n + 1); + } + + let result = sut_creator.allocate(Layout::from_size_align(1024, 1).unwrap()); + assert_that!(result, is_err); + assert_that!( + result.err().unwrap(), eq + ResizableShmAllocationError::MaxReallocationsReached + ); + } + + #[test] + fn register_offset_in_view_maps_required_segments< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + let value_1 = 123; + let value_2 = 2345; + let value_3 = 345678; + let value_4 = 456789012345; + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + let chunk_1 = sut.allocate(Layout::new::()).unwrap(); + let chunk_2 = sut.allocate(Layout::new::()).unwrap(); + let chunk_3 = sut.allocate(Layout::new::()).unwrap(); + let chunk_4 = sut.allocate(Layout::new::()).unwrap(); + + unsafe { (chunk_1.data_ptr as *mut u8).write(value_1) }; + unsafe { (chunk_2.data_ptr as *mut u16).write(value_2) }; + unsafe { (chunk_3.data_ptr as *mut u32).write(value_3) }; + unsafe { (chunk_4.data_ptr as *mut u64).write(value_4) }; + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + let tr_chunk_1 = unsafe { + sut_viewer + .register_and_translate_offset(chunk_1.offset) + .unwrap() + }; + assert_that!(sut_viewer.number_of_active_segments(), eq 1); + let tr_chunk_2 = unsafe { + sut_viewer + .register_and_translate_offset(chunk_2.offset) + .unwrap() + }; + assert_that!(sut_viewer.number_of_active_segments(), eq 2); + let tr_chunk_3 = unsafe { + sut_viewer + .register_and_translate_offset(chunk_3.offset) + .unwrap() + }; + assert_that!(sut_viewer.number_of_active_segments(), eq 3); + let tr_chunk_4 = unsafe { + sut_viewer + .register_and_translate_offset(chunk_4.offset) + .unwrap() + }; + assert_that!(sut_viewer.number_of_active_segments(), eq 4); + + assert_that!(unsafe { *(tr_chunk_1 as *mut u8) }, eq value_1); + assert_that!(unsafe { *(tr_chunk_2 as *mut u16) }, eq value_2); + assert_that!(unsafe { *(tr_chunk_3 as *mut u32) }, eq value_3); + assert_that!(unsafe { *(tr_chunk_4 as *mut u64) }, eq value_4); + } + + #[test] + fn unregister_offset_in_view_releases_unused_segments< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + let chunk_1 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_2 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_3 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_4 = sut.allocate(Layout::new::()).unwrap().offset; + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + unsafe { sut_viewer.register_and_translate_offset(chunk_1).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_2).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_3).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_4).unwrap() }; + + assert_that!(sut_viewer.number_of_active_segments(), eq 4); + + unsafe { sut_viewer.unregister_offset(chunk_1) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 3); + unsafe { sut_viewer.unregister_offset(chunk_2) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 2); + unsafe { sut_viewer.unregister_offset(chunk_3) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 1); + unsafe { sut_viewer.unregister_offset(chunk_4) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 1); + } + + #[test] + fn unregister_offset_in_reverse_order_in_view_releases_unused_segments< + Shm: SharedMemory, + Sut: ResizableSharedMemory, + >() { + let config = generate_isolated_config::(); + let storage_name = generate_name(); + + let sut = Sut::MemoryBuilder::new(&storage_name) + .config(&config) + .max_chunk_layout_hint(Layout::new::()) + .max_number_of_chunks_hint(1) + .allocation_strategy(AllocationStrategy::BestFit) + .create() + .unwrap(); + + let chunk_1 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_2 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_3 = sut.allocate(Layout::new::()).unwrap().offset; + let chunk_4 = sut.allocate(Layout::new::()).unwrap().offset; + + let sut_viewer = Sut::ViewBuilder::new(&storage_name) + .config(&config) + .open() + .unwrap(); + + unsafe { sut_viewer.register_and_translate_offset(chunk_1).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_2).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_3).unwrap() }; + unsafe { sut_viewer.register_and_translate_offset(chunk_4).unwrap() }; + + assert_that!(sut_viewer.number_of_active_segments(), eq 4); + + unsafe { sut_viewer.unregister_offset(chunk_4) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 4); + unsafe { sut_viewer.unregister_offset(chunk_3) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 3); + unsafe { sut_viewer.unregister_offset(chunk_2) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 2); + unsafe { sut_viewer.unregister_offset(chunk_1) }; + assert_that!(sut_viewer.number_of_active_segments(), eq 1); + } + + #[instantiate_tests(, resizable_shared_memory::dynamic::DynamicMemory>>)] + mod posix {} + + #[instantiate_tests(, resizable_shared_memory::dynamic::DynamicMemory>>)] + mod process_local {} +} diff --git a/iceoryx2-cal/tests/shm_allocator_bump_allocator_tests.rs b/iceoryx2-cal/tests/shm_allocator_bump_allocator_tests.rs new file mode 100644 index 000000000..37a8670a8 --- /dev/null +++ b/iceoryx2-cal/tests/shm_allocator_bump_allocator_tests.rs @@ -0,0 +1,106 @@ +// Copyright (c) 2024 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache Software License 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license +// which is available at https://opensource.org/licenses/MIT. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +mod shm_allocator_bump_allocator { + use std::{alloc::Layout, ptr::NonNull}; + + use iceoryx2_bb_testing::assert_that; + use iceoryx2_cal::shm_allocator::{bump_allocator::*, AllocationStrategy, ShmAllocator}; + + const MAX_SUPPORTED_ALIGNMENT: usize = 4096; + const MEM_SIZE: usize = 16384 * 10; + const PAYLOAD_SIZE: usize = 8192; + + struct TestContext { + _payload_memory: Box<[u8; MEM_SIZE]>, + _base_address: NonNull<[u8]>, + sut: Box, + } + + impl TestContext { + fn new() -> Self { + let mut payload_memory = Box::new([0u8; MEM_SIZE]); + let base_address = + unsafe { NonNull::<[u8]>::new_unchecked(&mut payload_memory[0..PAYLOAD_SIZE]) }; + let allocator = iceoryx2_bb_memory::bump_allocator::BumpAllocator::new( + unsafe { NonNull::new_unchecked(payload_memory[PAYLOAD_SIZE..].as_mut_ptr()) }, + MEM_SIZE, + ); + let config = Config::default(); + let mut sut = Box::new(unsafe { + BumpAllocator::new_uninit(MAX_SUPPORTED_ALIGNMENT, base_address, &config) + }); + + unsafe { sut.init(&allocator).unwrap() }; + + Self { + _payload_memory: payload_memory, + _base_address: base_address, + sut, + } + } + } + + #[test] + fn initial_setup_hint_is_layout_times_number_of_chunks() { + let layout = Layout::from_size_align(64, 2).unwrap(); + let max_number_of_chunks = 54; + let hint = BumpAllocator::initial_setup_hint(layout, max_number_of_chunks); + + assert_that!(hint.payload_size, eq layout.size() * max_number_of_chunks); + } + + fn no_new_resize_hint_when_there_is_memory_available(strategy: AllocationStrategy) { + let test_context = TestContext::new(); + let hint = test_context + .sut + .resize_hint(Layout::from_size_align(8, 2).unwrap(), strategy); + + assert_that!(hint.payload_size, eq test_context.sut.total_space()); + } + + #[test] + fn no_new_resize_hint_with_power_of_two_when_there_is_memory_available() { + no_new_resize_hint_when_there_is_memory_available(AllocationStrategy::PowerOfTwo) + } + + #[test] + fn no_new_resize_hint_with_best_fit_when_there_is_memory_available() { + no_new_resize_hint_when_there_is_memory_available(AllocationStrategy::BestFit) + } + + #[test] + fn new_resize_hint_with_power_of_two_when_there_is_not_enough_memory_available() { + let test_context = TestContext::new(); + let layout = Layout::from_size_align(test_context.sut.total_space() + 1, 1).unwrap(); + let hint = test_context + .sut + .resize_hint(layout, AllocationStrategy::PowerOfTwo); + assert_that!( + hint.payload_size, + eq(test_context.sut.total_space() + layout.size()).next_power_of_two() + ); + } + + #[test] + fn new_resize_hint_with_best_fit_when_there_is_not_enough_memory_available() { + let test_context = TestContext::new(); + let layout = Layout::from_size_align(test_context.sut.total_space() + 1, 1).unwrap(); + let hint = test_context + .sut + .resize_hint(layout, AllocationStrategy::BestFit); + assert_that!( + hint.payload_size, + eq(test_context.sut.total_space() + layout.size()) + ); + } +} diff --git a/iceoryx2-cal/tests/shm_allocator_pool_allocator_tests.rs b/iceoryx2-cal/tests/shm_allocator_pool_allocator_tests.rs index 90e2fba65..37087f91a 100644 --- a/iceoryx2-cal/tests/shm_allocator_pool_allocator_tests.rs +++ b/iceoryx2-cal/tests/shm_allocator_pool_allocator_tests.rs @@ -17,7 +17,7 @@ mod shm_allocator_pool_allocator { use iceoryx2_bb_memory::bump_allocator::BumpAllocator; use iceoryx2_bb_testing::assert_that; use iceoryx2_cal::{ - shm_allocator::{pool_allocator::*, ShmAllocationError, ShmAllocator}, + shm_allocator::{pool_allocator::*, AllocationStrategy, ShmAllocationError, ShmAllocator}, zero_copy_connection::PointerOffset, }; @@ -69,6 +69,115 @@ mod shm_allocator_pool_allocator { assert_that!(test_context.sut.max_alignment(), eq BUCKET_CONFIG.align()); } + #[test] + fn initial_setup_hint_is_layout_times_number_of_chunks() { + let layout = Layout::from_size_align(64, 2).unwrap(); + let max_number_of_chunks = 54; + let hint = PoolAllocator::initial_setup_hint(layout, max_number_of_chunks); + + assert_that!(hint.config.bucket_layout, eq layout); + assert_that!(hint.payload_size, eq layout.size() * max_number_of_chunks); + } + + fn no_new_resize_hint_when_layout_is_smaller_and_buckets_are_available( + strategy: AllocationStrategy, + ) { + let initial_layout = Layout::from_size_align(12, 4).unwrap(); + let test_context = TestContext::new(initial_layout); + let hint = test_context + .sut + .resize_hint(Layout::from_size_align(8, 2).unwrap(), strategy); + + assert_that!(hint.config.bucket_layout, eq initial_layout); + assert_that!(hint.payload_size, eq initial_layout.size() * test_context.sut.number_of_buckets() as usize); + } + + #[test] + fn no_new_resize_hint_with_power_of_two_when_layout_is_smaller_and_buckets_are_available() { + no_new_resize_hint_when_layout_is_smaller_and_buckets_are_available( + AllocationStrategy::PowerOfTwo, + ) + } + + #[test] + fn no_new_resize_hint_with_best_fit_when_layout_is_smaller_and_buckets_are_available() { + no_new_resize_hint_when_layout_is_smaller_and_buckets_are_available( + AllocationStrategy::BestFit, + ) + } + + #[test] + fn new_resize_hint_with_power_of_two_when_layout_is_greater() { + let initial_layout = Layout::from_size_align(12, 4).unwrap(); + let increased_layout = Layout::from_size_align(28, 2).unwrap(); + let test_context = TestContext::new(initial_layout); + let hint = test_context + .sut + .resize_hint(increased_layout, AllocationStrategy::PowerOfTwo); + assert_that!(hint.config.bucket_layout.size(), eq increased_layout.size().next_power_of_two()); + assert_that!(hint.config.bucket_layout.align(), eq initial_layout.align()); + assert_that!(hint.payload_size, eq increased_layout.size().next_power_of_two() * test_context.sut.number_of_buckets() as usize); + } + + #[test] + fn new_resize_hint_with_best_fit_when_layout_is_greater() { + let initial_layout = Layout::from_size_align(12, 4).unwrap(); + let increased_layout = Layout::from_size_align(28, 2).unwrap(); + let test_context = TestContext::new(initial_layout); + let hint = test_context + .sut + .resize_hint(increased_layout, AllocationStrategy::BestFit); + assert_that!(hint.config.bucket_layout.size(), eq increased_layout.size()); + assert_that!(hint.config.bucket_layout.align(), eq initial_layout.align()); + assert_that!(hint.payload_size, eq increased_layout.size() * test_context.sut.number_of_buckets() as usize); + } + + #[test] + fn new_resize_hint_with_power_of_two_when_buckets_are_exhausted() { + let initial_layout = Layout::from_size_align(12, 4).unwrap(); + let increased_layout = Layout::from_size_align(14, 8).unwrap(); + let test_context = TestContext::new(initial_layout); + + for _ in 0..test_context.sut.number_of_buckets() { + assert_that!(unsafe { test_context.sut.allocate(initial_layout) }, is_ok); + } + + assert_that!( + unsafe { test_context.sut.allocate(increased_layout) }, + is_err + ); + + let hint = test_context + .sut + .resize_hint(increased_layout, AllocationStrategy::PowerOfTwo); + assert_that!(hint.config.bucket_layout.size(), eq increased_layout.size().next_power_of_two()); + assert_that!(hint.config.bucket_layout.align(), eq increased_layout.align()); + assert_that!(hint.payload_size, eq increased_layout.size().next_power_of_two() * (test_context.sut.number_of_buckets() + 1).next_power_of_two() as usize); + } + + #[test] + fn new_resize_hint_with_best_fit_when_buckets_are_exhausted() { + let initial_layout = Layout::from_size_align(12, 4).unwrap(); + let increased_layout = Layout::from_size_align(16, 8).unwrap(); + let test_context = TestContext::new(initial_layout); + + for _ in 0..test_context.sut.number_of_buckets() { + assert_that!(unsafe { test_context.sut.allocate(initial_layout) }, is_ok); + } + + assert_that!( + unsafe { test_context.sut.allocate(increased_layout) }, + is_err + ); + + let hint = test_context + .sut + .resize_hint(increased_layout, AllocationStrategy::BestFit); + assert_that!(hint.config.bucket_layout.size(), eq increased_layout.size()); + assert_that!(hint.config.bucket_layout.align(), eq increased_layout.align()); + assert_that!(hint.payload_size, eq increased_layout.size() * (test_context.sut.number_of_buckets() + 1) as usize); + } + #[test] fn allocate_and_release_all_buckets_works() { const REPETITIONS: usize = 10; @@ -79,8 +188,8 @@ mod shm_allocator_pool_allocator { for _ in 0..test_context.sut.number_of_buckets() { let memory = unsafe { test_context.sut.allocate(BUCKET_CONFIG).unwrap() }; // the returned offset must be a multiple of the bucket size - assert_that!((memory.value() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); - assert_that!(mem_set.insert(memory.value()), eq true); + assert_that!((memory.offset() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); + assert_that!(mem_set.insert(memory.offset()), eq true); } assert_that!(unsafe { test_context.sut.allocate(BUCKET_CONFIG) }, eq Err(ShmAllocationError::AllocationError(AllocationError::OutOfMemory))); @@ -105,12 +214,12 @@ mod shm_allocator_pool_allocator { for _ in 0..(test_context.sut.number_of_buckets() - 1) { let memory_1 = unsafe { test_context.sut.allocate(BUCKET_CONFIG).unwrap() }; // the returned offset must be a multiple of the bucket size - assert_that!((memory_1.value() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); + assert_that!((memory_1.offset() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); let memory_2 = unsafe { test_context.sut.allocate(BUCKET_CONFIG).unwrap() }; // the returned offset must be a multiple of the bucket size - assert_that!((memory_2.value() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); - assert_that!(mem_set.insert(memory_2.value()), eq true); + assert_that!((memory_2.offset() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); + assert_that!(mem_set.insert(memory_2.offset()), eq true); unsafe { test_context.sut.deallocate(memory_1, BUCKET_CONFIG); @@ -119,8 +228,8 @@ mod shm_allocator_pool_allocator { let memory = unsafe { test_context.sut.allocate(BUCKET_CONFIG).unwrap() }; // the returned offset must be a multiple of the bucket size - assert_that!((memory.value() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); - assert_that!(mem_set.insert(memory.value()), eq true); + assert_that!((memory.offset() - test_context.sut.relative_start_address()) % BUCKET_CONFIG.size(), eq 0); + assert_that!(mem_set.insert(memory.offset()), eq true); assert_that!(unsafe { test_context.sut.allocate(BUCKET_CONFIG) }, eq Err(ShmAllocationError::AllocationError(AllocationError::OutOfMemory))); @@ -145,7 +254,7 @@ mod shm_allocator_pool_allocator { Layout::from_size_align(128.min(2_usize.pow(i)), 2_usize.pow(n)).unwrap(); let mut counter = 0; while let Ok(memory) = unsafe { test_context.sut.allocate(mem_layout) } { - assert_that!(memory.value() % mem_layout.align(), eq 0 ); + assert_that!(memory.offset() % mem_layout.align(), eq 0 ); counter += 1; } @@ -169,7 +278,7 @@ mod shm_allocator_pool_allocator { Layout::from_size_align(128.min(2_usize.pow(i)), 2_usize.pow(n)).unwrap(); if let Ok(memory) = unsafe { test_context.sut.allocate(mem_layout) } { - assert_that!(memory.value() % mem_layout.align(), eq 0 ); + assert_that!(memory.offset() % mem_layout.align(), eq 0 ); counter += 1; } else { keep_running = false; diff --git a/iceoryx2-cal/tests/shm_allocator_trait_tests.rs b/iceoryx2-cal/tests/shm_allocator_trait_tests.rs index ddb2489ee..b65fc3466 100644 --- a/iceoryx2-cal/tests/shm_allocator_trait_tests.rs +++ b/iceoryx2-cal/tests/shm_allocator_trait_tests.rs @@ -105,7 +105,7 @@ mod shm_allocator { let layout = unsafe { Layout::from_size_align_unchecked(CHUNK_SIZE, 1) }; let distance = unsafe { test.sut().allocate(layout).unwrap() }; - assert_that!(distance.value(), eq 0); + assert_that!(distance.offset(), eq 0); unsafe { test.sut().deallocate(distance, layout) }; } diff --git a/iceoryx2-cal/tests/zero_copy_connection_trait_tests.rs b/iceoryx2-cal/tests/zero_copy_connection_trait_tests.rs index a749899e2..37534a54b 100644 --- a/iceoryx2-cal/tests/zero_copy_connection_trait_tests.rs +++ b/iceoryx2-cal/tests/zero_copy_connection_trait_tests.rs @@ -350,12 +350,12 @@ mod zero_copy_connection { ); let sample = sut_receiver.receive().unwrap(); assert_that!(sample, is_some); - assert_that!(sample.as_ref().unwrap().value(), eq sample_offset); + assert_that!(sample.as_ref().unwrap().offset(), eq sample_offset); assert_that!(sut_receiver.release(sample.unwrap()), is_ok); let retrieval = sut_sender.reclaim().unwrap(); assert_that!(retrieval, is_some); - assert_that!(retrieval.as_ref().unwrap().value(), eq sample_offset); + assert_that!(retrieval.as_ref().unwrap().offset(), eq sample_offset); let retrieval = sut_sender.reclaim().unwrap(); assert_that!(retrieval, is_none); @@ -440,7 +440,7 @@ mod zero_copy_connection { let sample_offset = SAMPLE_SIZE * (BUFFER_SIZE + i); let result = sut_sender.try_send(PointerOffset::new(sample_offset)); assert_that!(result, is_ok); - assert_that!(result.ok().unwrap().unwrap().value(), eq overflow_sample_offset); + assert_that!(result.ok().unwrap().unwrap().offset(), eq overflow_sample_offset); } } @@ -477,7 +477,7 @@ mod zero_copy_connection { let sample = receiver.receive(); let sample_offset = SAMPLE_SIZE * i; assert_that!(sample, is_ok); - assert_that!(sample.ok().unwrap().unwrap().value(), eq sample_offset); + assert_that!(sample.ok().unwrap().unwrap().offset(), eq sample_offset); } } @@ -590,8 +590,8 @@ mod zero_copy_connection { std::thread::sleep(TIMEOUT); let sample_2 = receive_sample(); - assert_that!(sample_1.value(), eq sample_offset_1); - assert_that!(sample_2.value(), eq sample_offset_2); + assert_that!(sample_1.offset(), eq sample_offset_1); + assert_that!(sample_2.offset(), eq sample_offset_2); }); barrier.wait(); @@ -642,7 +642,7 @@ mod zero_copy_connection { for _ in 0..BUFFER_SIZE { unsafe { sut_sender.acquire_used_offsets(|offset| { - assert_that!(offsets.remove(&offset.value()), eq true); + assert_that!(offsets.remove(&offset.offset()), eq true); }) }; } @@ -692,7 +692,7 @@ mod zero_copy_connection { for _ in 0..BUFFER_SIZE { unsafe { sut_sender.acquire_used_offsets(|offset| { - assert_that!(offsets.remove(&offset.value()), eq true); + assert_that!(offsets.remove(&offset.offset()), eq true); }) }; } @@ -785,7 +785,7 @@ mod zero_copy_connection { for _ in 0..BUFFER_SIZE { unsafe { sut_sender.acquire_used_offsets(|offset| { - assert_that!(offsets.remove(&offset.value()), eq true); + assert_that!(offsets.remove(&offset.offset()), eq true); }) }; } diff --git a/iceoryx2/src/port/publisher.rs b/iceoryx2/src/port/publisher.rs index e7e4b7b30..c8a88ae91 100644 --- a/iceoryx2/src/port/publisher.rs +++ b/iceoryx2/src/port/publisher.rs @@ -265,7 +265,7 @@ impl DataSegment { let msg = "Unable to allocate Sample"; let ptr = self.memory.allocate(layout)?; - if self.sample_reference_counter[self.sample_index(ptr.offset.value())] + if self.sample_reference_counter[self.sample_index(ptr.offset.offset())] .fetch_add(1, Ordering::Relaxed) != 0 { @@ -282,7 +282,7 @@ impl DataSegment { } fn release_sample(&self, distance_to_chunk: PointerOffset) { - if self.sample_reference_counter[self.sample_index(distance_to_chunk.value())] + if self.sample_reference_counter[self.sample_index(distance_to_chunk.offset())] .fetch_sub(1, Ordering::Relaxed) == 1 { diff --git a/iceoryx2/src/port/subscriber.rs b/iceoryx2/src/port/subscriber.rs index e1767eb63..b894b9eb5 100644 --- a/iceoryx2/src/port/subscriber.rs +++ b/iceoryx2/src/port/subscriber.rs @@ -306,7 +306,7 @@ impl None => Ok(None), Some(offset) => { let absolute_address = - offset.value() + connection.data_segment.payload_start_address(); + offset.offset() + connection.data_segment.payload_start_address(); let details = SampleDetails { publisher_connection: connection.clone(), diff --git a/iceoryx2/src/sample_mut.rs b/iceoryx2/src/sample_mut.rs index f8a31b88c..de4b29404 100644 --- a/iceoryx2/src/sample_mut.rs +++ b/iceoryx2/src/sample_mut.rs @@ -288,6 +288,6 @@ impl< /// # } /// ``` pub fn send(self) -> Result { - self.data_segment.send_sample(self.offset_to_chunk.value()) + self.data_segment.send_sample(self.offset_to_chunk.offset()) } }