Skip to content

Commit

Permalink
Merge pull request #1160 from hermit-os/phys-alloc
Browse files Browse the repository at this point in the history
feat(mm): add `DeviceAlloc` for communicating with devices
  • Loading branch information
mkroening authored May 2, 2024
2 parents 9bbf71b + e0799ad commit 377f440
Show file tree
Hide file tree
Showing 7 changed files with 71 additions and 19 deletions.
11 changes: 7 additions & 4 deletions src/arch/x86_64/kernel/apic.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use alloc::alloc::alloc;
use alloc::vec::Vec;
use core::alloc::Layout;
#[cfg(feature = "smp")]
use core::arch::x86_64::_mm_mfence;
#[cfg(feature = "acpi")]
Expand Down Expand Up @@ -27,7 +29,7 @@ use crate::arch::x86_64::mm::{paging, virtualmem, PhysAddr, VirtAddr};
use crate::arch::x86_64::swapgs;
use crate::config::*;
use crate::scheduler::CoreId;
use crate::{arch, env, mm, scheduler};
use crate::{arch, env, scheduler};

const MP_FLT_SIGNATURE: u32 = 0x5f504d5f;
const MP_CONFIG_SIGNATURE: u32 = 0x504d4350;
Expand Down Expand Up @@ -684,9 +686,10 @@ pub fn init_x2apic() {
/// Initialize the required _start variables for the next CPU to be booted.
pub fn init_next_processor_variables() {
// Allocate stack for the CPU and pass the addresses.
// Keep the stack executable to possibly support dynamically generated code on the stack (see https://security.stackexchange.com/a/47825).
let stack = mm::allocate(KERNEL_STACK_SIZE, true);
CURRENT_STACK_ADDRESS.store(stack.as_u64(), Ordering::Relaxed);
let layout = Layout::from_size_align(KERNEL_STACK_SIZE, BasePageSize::SIZE as usize).unwrap();
let stack = unsafe { alloc(layout) };
assert!(!stack.is_null());
CURRENT_STACK_ADDRESS.store(stack, Ordering::Relaxed);
}

/// Boot all Application Processors
Expand Down
4 changes: 2 additions & 2 deletions src/arch/x86_64/kernel/core_local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub(crate) struct CoreLocal {
/// Task State Segment (TSS) allocated for this CPU Core.
pub tss: Cell<*mut TaskStateSegment>,
/// start address of the kernel stack
pub kernel_stack: Cell<u64>,
pub kernel_stack: Cell<*mut u8>,
/// Interface to the interrupt counters
irq_statistics: &'static IrqStatistics,
/// Queue of async tasks
Expand Down Expand Up @@ -56,7 +56,7 @@ impl CoreLocal {
core_id,
scheduler: Cell::new(ptr::null_mut()),
tss: Cell::new(ptr::null_mut()),
kernel_stack: Cell::new(0),
kernel_stack: Cell::new(ptr::null_mut()),
irq_statistics,
async_tasks: RefCell::new(Vec::new()),
#[cfg(feature = "smp")]
Expand Down
16 changes: 10 additions & 6 deletions src/arch/x86_64/kernel/gdt.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use alloc::alloc::alloc;
use alloc::boxed::Box;
use core::alloc::Layout;
use core::sync::atomic::Ordering;

use x86_64::instructions::tables;
Expand Down Expand Up @@ -33,9 +35,9 @@ pub fn add_current_core() {

// Every task later gets its own stack, so this boot stack is only used by the Idle task on each core.
// When switching to another task on this core, this entry is replaced.
let rsp = CURRENT_STACK_ADDRESS.load(Ordering::Relaxed) + KERNEL_STACK_SIZE as u64
- TaskStacks::MARKER_SIZE as u64;
tss.privilege_stack_table[0] = VirtAddr::new(rsp);
let rsp = CURRENT_STACK_ADDRESS.load(Ordering::Relaxed);
let rsp = unsafe { rsp.add(KERNEL_STACK_SIZE - TaskStacks::MARKER_SIZE) };
tss.privilege_stack_table[0] = VirtAddr::from_ptr(rsp);
CoreLocal::get().kernel_stack.set(rsp);

// Allocate all ISTs for this core.
Expand All @@ -47,9 +49,11 @@ pub fn add_current_core() {
BasePageSize::SIZE as usize
};

let ist = crate::mm::allocate(sz, true);
let ist_start = ist.as_u64() + sz as u64 - TaskStacks::MARKER_SIZE as u64;
tss.interrupt_stack_table[i] = VirtAddr::new(ist_start);
let layout = Layout::from_size_align(sz, BasePageSize::SIZE as usize).unwrap();
let ist = unsafe { alloc(layout) };
assert!(!ist.is_null());
let ist_start = unsafe { ist.add(sz - TaskStacks::MARKER_SIZE) };
tss.interrupt_stack_table[i] = VirtAddr::from_ptr(ist_start);
}

CoreLocal::get().tss.set(tss);
Expand Down
5 changes: 3 additions & 2 deletions src/arch/x86_64/kernel/mod.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
#[cfg(feature = "common-os")]
use core::arch::asm;
use core::num::NonZeroU64;
use core::ptr;
#[cfg(feature = "newlib")]
use core::slice;
use core::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use core::sync::atomic::{AtomicPtr, AtomicU32, Ordering};

use hermit_entry::boot_info::{BootInfo, PlatformInfo, RawBootInfo};
use hermit_sync::InterruptSpinMutex;
Expand Down Expand Up @@ -246,7 +247,7 @@ pub fn print_statistics() {
/// It also synchronizes initialization of CPU cores.
pub static CPU_ONLINE: AtomicU32 = AtomicU32::new(0);

pub static CURRENT_STACK_ADDRESS: AtomicU64 = AtomicU64::new(0);
pub static CURRENT_STACK_ADDRESS: AtomicPtr<u8> = AtomicPtr::new(ptr::null_mut());

#[cfg(target_os = "none")]
#[inline(never)]
Expand Down
28 changes: 28 additions & 0 deletions src/mm/device_alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
use core::alloc::{AllocError, Allocator, Layout};
use core::ptr::{self, NonNull};

use align_address::Align;

use crate::arch::mm::paging::{BasePageSize, PageSize};

/// An [`Allocator`] for memory that is used to communicate with devices.
///
/// Allocations from this allocator always correspond to contiguous physical memory.
pub struct DeviceAlloc;

unsafe impl Allocator for DeviceAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
assert!(layout.align() <= BasePageSize::SIZE as usize);
let size = layout.size().align_up(BasePageSize::SIZE as usize);
let ptr = super::allocate(size, true).as_mut_ptr::<u8>();
let slice = ptr::slice_from_raw_parts_mut(ptr, size);
Ok(NonNull::new(slice).unwrap())
}

unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
assert!(layout.align() <= BasePageSize::SIZE as usize);
let size = layout.size().align_up(BasePageSize::SIZE as usize);
let addr = ptr.as_ptr().expose_provenance().into();
super::deallocate(addr, size);
}
}
22 changes: 19 additions & 3 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
pub mod allocator;
pub mod device_alloc;
pub mod freelist;

use core::mem;
Expand Down Expand Up @@ -120,7 +121,22 @@ pub(crate) fn init() {
let kernel_heap_size = 10 * LargePageSize::SIZE as usize;

unsafe {
let start = allocate(kernel_heap_size, true);
let start = {
let physical_address = arch::mm::physicalmem::allocate(kernel_heap_size).unwrap();
let virtual_address = arch::mm::virtualmem::allocate(kernel_heap_size).unwrap();

let count = kernel_heap_size / BasePageSize::SIZE as usize;
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
arch::mm::paging::map::<BasePageSize>(
virtual_address,
physical_address,
count,
flags,
);

virtual_address
};
ALLOCATOR.init(start.as_mut_ptr(), kernel_heap_size);

info!("Kernel heap starts at {:#x}", start);
Expand Down Expand Up @@ -297,7 +313,7 @@ pub(crate) fn print_information() {
arch::mm::virtualmem::print_information();
}

#[allow(dead_code)]
/// Soft-deprecated in favor of `DeviceAlloc`
pub(crate) fn allocate(sz: usize, no_execution: bool) -> VirtAddr {
let size = sz.align_up(BasePageSize::SIZE as usize);
let physical_address = arch::mm::physicalmem::allocate(size).unwrap();
Expand All @@ -314,7 +330,7 @@ pub(crate) fn allocate(sz: usize, no_execution: bool) -> VirtAddr {
virtual_address
}

#[allow(dead_code)]
/// Soft-deprecated in favor of `DeviceAlloc`
pub(crate) fn deallocate(virtual_address: VirtAddr, sz: usize) {
let size = sz.align_up(BasePageSize::SIZE as usize);

Expand Down
4 changes: 2 additions & 2 deletions src/scheduler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -637,8 +637,8 @@ impl PerCoreScheduler {
let rsp = (current_task_borrowed.stacks.get_kernel_stack()
+ current_task_borrowed.stacks.get_kernel_stack_size()
- TaskStacks::MARKER_SIZE)
.as_u64();
tss.privilege_stack_table[0] = VirtAddr::new(rsp);
.as_mut_ptr();
tss.privilege_stack_table[0] = VirtAddr::from_ptr(rsp);
CoreLocal::get().kernel_stack.set(rsp);
let ist_start = (current_task_borrowed.stacks.get_interrupt_stack()
+ current_task_borrowed.stacks.get_interrupt_stack_size()
Expand Down

0 comments on commit 377f440

Please sign in to comment.