Skip to content

Commit

Permalink
refactor(aarch64): migrate to free-list crate
Browse files Browse the repository at this point in the history
Signed-off-by: Martin Kröning <[email protected]>
  • Loading branch information
mkroening committed May 8, 2024
1 parent 3dd321d commit 25e1adb
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 34 deletions.
37 changes: 21 additions & 16 deletions src/arch/aarch64/mm/physicalmem.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
use core::alloc::AllocError;
use core::sync::atomic::{AtomicUsize, Ordering};

use free_list::{AllocError, FreeList, PageLayout, PageRange};
use hermit_sync::InterruptTicketMutex;

use crate::arch::aarch64::kernel::get_limit;
use crate::arch::aarch64::mm::paging::{BasePageSize, PageSize};
use crate::arch::aarch64::mm::PhysAddr;
use crate::mm;
use crate::mm::freelist::{FreeList, FreeListEntry};

static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList> =
static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
InterruptTicketMutex::new(FreeList::new());
static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);

Expand All @@ -19,15 +18,14 @@ fn detect_from_limits() -> Result<(), ()> {
return Err(());
}

let entry = FreeListEntry {
start: mm::kernel_end_address().as_usize(),
end: limit,
};
let range = PageRange::new(mm::kernel_end_address().as_usize(), limit).unwrap();
TOTAL_MEMORY.store(
limit - mm::kernel_end_address().as_usize(),
Ordering::SeqCst,
);
PHYSICAL_FREE_LIST.lock().push(entry);
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}

Ok(())
}
Expand All @@ -52,10 +50,13 @@ pub fn allocate(size: usize) -> Result<PhysAddr, AllocError> {
BasePageSize::SIZE
);

let layout = PageLayout::from_size(size).unwrap();

Ok(PhysAddr(
PHYSICAL_FREE_LIST
.lock()
.allocate(size, None)?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -77,10 +78,13 @@ pub fn allocate_aligned(size: usize, align: usize) -> Result<PhysAddr, AllocErro
BasePageSize::SIZE
);

let layout = PageLayout::from_size_align(size, align).unwrap();

Ok(PhysAddr(
PHYSICAL_FREE_LIST
.lock()
.allocate(size, Some(align))?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -102,13 +106,14 @@ pub fn deallocate(physical_address: PhysAddr, size: usize) {
BasePageSize::SIZE
);

PHYSICAL_FREE_LIST
.lock()
.deallocate(physical_address.as_usize(), size);
let range = PageRange::from_start_len(physical_address.as_usize(), size).unwrap();

unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

pub fn print_information() {
PHYSICAL_FREE_LIST
.lock()
.print_information(" PHYSICAL MEMORY FREE LIST ");
let free_list = PHYSICAL_FREE_LIST.lock();
info!("Physical memory free list:\n{free_list}");
}
42 changes: 25 additions & 17 deletions src/arch/aarch64/mm/virtualmem.rs
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
use core::alloc::AllocError;

use free_list::{AllocError, FreeList, PageLayout, PageRange};
use hermit_sync::InterruptTicketMutex;

use crate::arch::aarch64::mm::paging::{BasePageSize, PageSize};
use crate::arch::aarch64::mm::VirtAddr;
use crate::mm;
use crate::mm::freelist::{FreeList, FreeListEntry};

static KERNEL_FREE_LIST: InterruptTicketMutex<FreeList> =
static KERNEL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
InterruptTicketMutex::new(FreeList::new());

/// End of the virtual memory address space reserved for kernel memory (4 GiB).
/// This also marks the start of the virtual memory address space reserved for the task heap.
const KERNEL_VIRTUAL_MEMORY_END: VirtAddr = VirtAddr(0x1_0000_0000);

pub fn init() {
let entry = FreeListEntry {
start: mm::kernel_end_address().as_usize(),
end: KERNEL_VIRTUAL_MEMORY_END.as_usize(),
};
KERNEL_FREE_LIST.lock().push(entry);
let range = PageRange::new(
mm::kernel_end_address().as_usize(),
KERNEL_VIRTUAL_MEMORY_END.as_usize(),
)
.unwrap();
unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
Expand All @@ -32,10 +33,13 @@ pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
BasePageSize::SIZE
);

let layout = PageLayout::from_size(size).unwrap();

Ok(VirtAddr(
KERNEL_FREE_LIST
.lock()
.allocate(size, None)?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -57,10 +61,13 @@ pub fn allocate_aligned(size: usize, align: usize) -> Result<VirtAddr, AllocErro
BasePageSize::SIZE
);

let layout = PageLayout::from_size_align(size, align).unwrap();

Ok(VirtAddr(
KERNEL_FREE_LIST
.lock()
.allocate(size, Some(align))?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand Down Expand Up @@ -91,9 +98,11 @@ pub fn deallocate(virtual_address: VirtAddr, size: usize) {
BasePageSize::SIZE
);

KERNEL_FREE_LIST
.lock()
.deallocate(virtual_address.as_usize(), size);
let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();

unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

/*pub fn reserve(virtual_address: VirtAddr, size: usize) {
Expand Down Expand Up @@ -133,7 +142,6 @@ pub fn deallocate(virtual_address: VirtAddr, size: usize) {
}*/

pub fn print_information() {
KERNEL_FREE_LIST
.lock()
.print_information(" KERNEL VIRTUAL MEMORY FREE LIST ");
let free_list = KERNEL_FREE_LIST.lock();
info!("Virtual memory free list:\n{free_list}");
}
2 changes: 1 addition & 1 deletion src/mm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
pub mod allocator;
pub mod device_alloc;
#[cfg(not(target_arch = "x86_64"))]
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
pub mod freelist;

use core::mem;
Expand Down

0 comments on commit 25e1adb

Please sign in to comment.