Skip to content

Commit

Permalink
refactor(x86_64): migrate to free-list crate
Browse files Browse the repository at this point in the history
Signed-off-by: Martin Kröning <[email protected]>
  • Loading branch information
mkroening committed May 8, 2024
1 parent c221155 commit c2d6abc
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 47 deletions.
21 changes: 19 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ cfg-if = "1"
crossbeam-utils = { version = "0.8", default-features = false }
dyn-clone = "1.0"
fdt = "0.1"
free-list = "0.3"
hashbrown = { version = "0.14", default-features = false }
hermit-entry = { version = "0.10", features = ["kernel"] }
hermit-sync = "0.1"
Expand Down
76 changes: 46 additions & 30 deletions src/arch/x86_64/mm/physicalmem.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
use core::alloc::AllocError;
use core::sync::atomic::{AtomicUsize, Ordering};

use ::x86_64::structures::paging::{FrameAllocator, PhysFrame};
use free_list::{AllocError, FreeList, PageLayout, PageRange};
use hermit_sync::InterruptTicketMutex;
use multiboot::information::{MemoryType, Multiboot};

use crate::arch::x86_64::kernel::{get_fdt, get_limit, get_mbinfo};
use crate::arch::x86_64::mm::paging::{BasePageSize, PageSize};
use crate::arch::x86_64::mm::{MultibootMemory, PhysAddr, VirtAddr};
use crate::mm;
use crate::mm::freelist::{FreeList, FreeListEntry};

static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList> =
static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
InterruptTicketMutex::new(FreeList::new());
static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);

Expand Down Expand Up @@ -45,12 +44,14 @@ fn detect_from_fdt() -> Result<(), ()> {
VirtAddr(start_address)
};

let entry = FreeListEntry::new(start_address.as_usize(), end_address as usize);
let range = PageRange::new(start_address.as_usize(), end_address as usize).unwrap();
let _ = TOTAL_MEMORY.fetch_add(
(end_address - start_address.as_u64()) as usize,
Ordering::SeqCst,
);
PHYSICAL_FREE_LIST.lock().push(entry);
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

assert!(
Expand Down Expand Up @@ -84,15 +85,18 @@ fn detect_from_multiboot_info() -> Result<(), ()> {
VirtAddr(m.base_address())
};

let entry = FreeListEntry::new(
let range = PageRange::new(
start_address.as_usize(),
(m.base_address() + m.length()) as usize,
);
)
.unwrap();
let _ = TOTAL_MEMORY.fetch_add(
(m.base_address() + m.length() - start_address.as_u64()) as usize,
Ordering::SeqCst,
);
PHYSICAL_FREE_LIST.lock().push(entry);
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

assert!(
Expand All @@ -111,18 +115,25 @@ fn detect_from_limits() -> Result<(), ()> {

// add gap for the APIC
if limit > KVM_32BIT_GAP_START {
let entry = FreeListEntry::new(mm::kernel_end_address().as_usize(), KVM_32BIT_GAP_START);
PHYSICAL_FREE_LIST.lock().push(entry);
let range =
PageRange::new(mm::kernel_end_address().as_usize(), KVM_32BIT_GAP_START).unwrap();
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
if limit > KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE {
let entry = FreeListEntry::new(KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE, limit);
PHYSICAL_FREE_LIST.lock().push(entry);
let range = PageRange::new(KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE, limit).unwrap();
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
TOTAL_MEMORY.store(limit - KVM_32BIT_GAP_SIZE, Ordering::SeqCst);
} else {
TOTAL_MEMORY.store(KVM_32BIT_GAP_START, Ordering::SeqCst);
}
} else {
let entry = FreeListEntry::new(mm::kernel_end_address().as_usize(), limit);
PHYSICAL_FREE_LIST.lock().push(entry);
let range = PageRange::new(mm::kernel_end_address().as_usize(), limit).unwrap();
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
TOTAL_MEMORY.store(limit, Ordering::SeqCst);
}

Expand Down Expand Up @@ -150,10 +161,13 @@ pub fn allocate(size: usize) -> Result<PhysAddr, AllocError> {
BasePageSize::SIZE
);

let layout = PageLayout::from_size(size).unwrap();

Ok(PhysAddr(
PHYSICAL_FREE_LIST
.lock()
.allocate(size, None)?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -163,10 +177,8 @@ pub struct FrameAlloc;

unsafe impl<S: x86_64::structures::paging::PageSize> FrameAllocator<S> for FrameAlloc {
fn allocate_frame(&mut self) -> Option<PhysFrame<S>> {
let addr = PHYSICAL_FREE_LIST
.lock()
.allocate(S::SIZE as usize, Some(S::SIZE as usize))
.ok()? as u64;
let layout = PageLayout::from_size_align(S::SIZE as usize, S::SIZE as usize).unwrap();
let addr = PHYSICAL_FREE_LIST.lock().allocate(layout).ok()?.start() as u64;
Some(PhysFrame::from_start_address(x86_64::PhysAddr::new(addr)).unwrap())
}
}
Expand All @@ -187,10 +199,13 @@ pub fn allocate_aligned(size: usize, align: usize) -> Result<PhysAddr, AllocErro
BasePageSize::SIZE
);

let layout = PageLayout::from_size_align(size, align).unwrap();

Ok(PhysAddr(
PHYSICAL_FREE_LIST
.lock()
.allocate(size, Some(align))?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -212,9 +227,11 @@ pub fn deallocate(physical_address: PhysAddr, size: usize) {
BasePageSize::SIZE
);

PHYSICAL_FREE_LIST
.lock()
.deallocate(physical_address.as_usize(), size);
let range = PageRange::from_start_len(physical_address.as_usize(), size).unwrap();

unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

#[allow(dead_code)]
Expand All @@ -236,14 +253,13 @@ pub fn reserve(physical_address: PhysAddr, size: usize) {
BasePageSize::SIZE
);

// we are able to ignore errors because it could be already reserved
let _ = PHYSICAL_FREE_LIST
.lock()
.reserve(physical_address.as_usize(), size);
let range = PageRange::from_start_len(physical_address.as_usize(), size).unwrap();

// FIXME: Don't ignore errors anymore
PHYSICAL_FREE_LIST.lock().allocate_at(range).ok();
}

pub fn print_information() {
PHYSICAL_FREE_LIST
.lock()
.print_information(" PHYSICAL MEMORY FREE LIST ");
let free_list = PHYSICAL_FREE_LIST.lock();
info!("Physical memory free list:\n{free_list}");
}
38 changes: 23 additions & 15 deletions src/arch/x86_64/mm/virtualmem.rs
Original file line number Diff line number Diff line change
@@ -1,21 +1,22 @@
use core::alloc::AllocError;

use free_list::{AllocError, FreeList, PageLayout, PageRange};
use hermit_sync::InterruptTicketMutex;

use crate::arch::x86_64::mm::paging::{BasePageSize, PageSize};
use crate::arch::x86_64::mm::VirtAddr;
use crate::mm;
use crate::mm::freelist::{FreeList, FreeListEntry};

static KERNEL_FREE_LIST: InterruptTicketMutex<FreeList> =
static KERNEL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
InterruptTicketMutex::new(FreeList::new());

pub fn init() {
let entry = FreeListEntry::new(
let range = PageRange::new(
mm::kernel_end_address().as_usize(),
kernel_heap_end().as_usize(),
);
KERNEL_FREE_LIST.lock().push(entry);
)
.unwrap();
unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
Expand All @@ -28,10 +29,13 @@ pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
BasePageSize::SIZE
);

let layout = PageLayout::from_size(size).unwrap();

Ok(VirtAddr(
KERNEL_FREE_LIST
.lock()
.allocate(size, None)?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand All @@ -54,10 +58,13 @@ pub fn allocate_aligned(size: usize, align: usize) -> Result<VirtAddr, AllocErro
BasePageSize::SIZE
);

let layout = PageLayout::from_size_align(size, align).unwrap();

Ok(VirtAddr(
KERNEL_FREE_LIST
.lock()
.allocate(size, Some(align))?
.allocate(layout)?
.start()
.try_into()
.unwrap(),
))
Expand Down Expand Up @@ -88,9 +95,11 @@ pub fn deallocate(virtual_address: VirtAddr, size: usize) {
BasePageSize::SIZE
);

KERNEL_FREE_LIST
.lock()
.deallocate(virtual_address.as_usize(), size);
let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();

unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}
}

/*pub fn reserve(virtual_address: VirtAddr, size: usize) {
Expand Down Expand Up @@ -132,9 +141,8 @@ pub fn deallocate(virtual_address: VirtAddr, size: usize) {
}*/

pub fn print_information() {
KERNEL_FREE_LIST
.lock()
.print_information(" KERNEL VIRTUAL MEMORY FREE LIST ");
let free_list = KERNEL_FREE_LIST.lock();
info!("Virtual memory free list:\n{free_list}");
}

/// End of the virtual memory address space reserved for kernel memory.
Expand Down
1 change: 1 addition & 0 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
pub mod allocator;
pub mod device_alloc;
#[cfg(not(target_arch = "x86_64"))]
pub mod freelist;

use core::mem;
Expand Down

0 comments on commit c2d6abc

Please sign in to comment.