From 22d19e0e73628ed1b77958f9d7c3619572df05a8 Mon Sep 17 00:00:00 2001 From: Andrew Walbran Date: Tue, 24 Sep 2024 14:35:36 +0100 Subject: [PATCH] Use different Attributes types for each translation regime. --- src/idmap.rs | 162 ++++++++++++++++++-------------- src/lib.rs | 84 +++++++++++------ src/linearmap.rs | 125 ++++++++++++++---------- src/paging.rs | 198 ++++++++++++++++++++++++--------------- src/paging/attributes.rs | 166 ++++++++++++++++++++++++++++++-- src/target.rs | 67 +++++++------ 6 files changed, 536 insertions(+), 266 deletions(-) diff --git a/src/idmap.rs b/src/idmap.rs index 6cccf87..3cb5d94 100644 --- a/src/idmap.rs +++ b/src/idmap.rs @@ -25,8 +25,8 @@ impl IdTranslation { } } -impl Translation for IdTranslation { - fn allocate_table(&mut self) -> (NonNull, PhysicalAddress) { +impl Translation for IdTranslation { + fn allocate_table(&mut self) -> (NonNull>, PhysicalAddress) { let table = PageTable::new(); // Physical address is the same as the virtual address because we are using identity mapping @@ -34,7 +34,7 @@ impl Translation for IdTranslation { (table, PhysicalAddress(table.as_ptr() as usize)) } - unsafe fn deallocate_table(&mut self, page_table: NonNull) { + unsafe fn deallocate_table(&mut self, page_table: NonNull>) { // SAFETY: Our caller promises that the memory was allocated by `allocate_table` on this // `IdTranslation` and not yet deallocated. `allocate_table` used the global allocator and // appropriate layout by calling `PageTable::new()`. @@ -43,8 +43,8 @@ impl Translation for IdTranslation { } } - fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull { - NonNull::new(pa.0 as *mut PageTable).expect("Got physical address 0 for pagetable") + fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull> { + NonNull::new(pa.0 as *mut PageTable).expect("Got physical address 0 for pagetable") } } @@ -65,20 +65,26 @@ impl Translation for IdTranslation { /// ```no_run /// use aarch64_paging::{ /// idmap::IdMap, -/// paging::{attributes::Attributes, MemoryRegion, TranslationRegime}, +/// paging::{attributes::AttributesEl1, MemoryRegion, TranslationRegime}, /// }; /// /// const ASID: usize = 1; /// const ROOT_LEVEL: usize = 1; -/// const NORMAL_CACHEABLE: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE); +/// const NORMAL_CACHEABLE: AttributesEl1 = +/// AttributesEl1::ATTRIBUTE_INDEX_1.union(AttributesEl1::INNER_SHAREABLE); /// /// // Create a new EL1 page table with identity mapping. /// let mut idmap = IdMap::new(ASID, ROOT_LEVEL, TranslationRegime::El1And0); /// // Map a 2 MiB region of memory as read-write. -/// idmap.map_range( -/// &MemoryRegion::new(0x80200000, 0x80400000), -/// NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED, -/// ).unwrap(); +/// idmap +/// .map_range( +/// &MemoryRegion::new(0x80200000, 0x80400000), +/// NORMAL_CACHEABLE +/// | AttributesEl1::NON_GLOBAL +/// | AttributesEl1::VALID +/// | AttributesEl1::ACCESSED, +/// ) +/// .unwrap(); /// // SAFETY: Everything the program uses is within the 2 MiB region mapped above. /// unsafe { /// // Set `TTBR0_EL1` to activate the page table. @@ -94,22 +100,27 @@ impl Translation for IdTranslation { /// idmap.deactivate(); /// } /// // Now change the mapping to read-only and executable. -/// idmap.map_range( -/// &MemoryRegion::new(0x80200000, 0x80400000), -/// NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::READ_ONLY | Attributes::VALID -/// | Attributes::ACCESSED, -/// ).unwrap(); +/// idmap +/// .map_range( +/// &MemoryRegion::new(0x80200000, 0x80400000), +/// NORMAL_CACHEABLE +/// | AttributesEl1::NON_GLOBAL +/// | AttributesEl1::READ_ONLY +/// | AttributesEl1::VALID +/// | AttributesEl1::ACCESSED, +/// ) +/// .unwrap(); /// // SAFETY: Everything the program will used is mapped in by this page table. /// unsafe { /// idmap.activate(); /// } /// ``` #[derive(Debug)] -pub struct IdMap { - mapping: Mapping, +pub struct IdMap { + mapping: Mapping, } -impl IdMap { +impl IdMap { /// Creates a new identity-mapping page table with the given ASID and root level. pub fn new(asid: usize, rootlevel: usize, translation_regime: TranslationRegime) -> Self { Self { @@ -192,7 +203,7 @@ impl IdMap { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> { + pub fn map_range(&mut self, range: &MemoryRegion, flags: A) -> Result<(), MapError> { self.map_range_with_constraints(range, flags, Constraints::empty()) } @@ -219,9 +230,9 @@ impl IdMap { pub fn map_range_with_constraints( &mut self, range: &MemoryRegion, - flags: Attributes, + flags: A, constraints: Constraints, - ) -> Result<(), MapError> { + ) -> Result<(), MapError> { let pa = IdTranslation::virtual_to_physical(range.start()); self.mapping.map_range(range, pa, flags, constraints) } @@ -263,9 +274,9 @@ impl IdMap { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> + pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { self.mapping.modify_range(range, f) } @@ -293,9 +304,9 @@ impl IdMap { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. - pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> + pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, { self.mapping.walk_range(range, f) } @@ -335,12 +346,12 @@ impl IdMap { #[cfg(test)] mod tests { use super::*; - use crate::paging::{BITS_PER_LEVEL, PAGE_SIZE}; + use crate::paging::{attributes::AttributesEl1, BITS_PER_LEVEL, PAGE_SIZE}; const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39; - const DEVICE_NGNRE: Attributes = Attributes::ATTRIBUTE_INDEX_0; - const NORMAL_CACHEABLE: Attributes = - Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE); + const DEVICE_NGNRE: AttributesEl1 = AttributesEl1::ATTRIBUTE_INDEX_0; + const NORMAL_CACHEABLE: AttributesEl1 = + AttributesEl1::ATTRIBUTE_INDEX_1.union(AttributesEl1::INNER_SHAREABLE); #[test] fn map_valid() { @@ -354,7 +365,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, 1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -369,7 +380,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, PAGE_SIZE * 2), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -387,7 +398,7 @@ mod tests { MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1, MAX_ADDRESS_FOR_ROOT_LEVEL_1 ), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -402,7 +413,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -417,7 +428,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -430,7 +441,7 @@ mod tests { idmap .map_range_with_constraints( &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, Constraints::NO_BLOCK_MAPPINGS, ) .unwrap(); @@ -444,7 +455,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Ok(()) ); @@ -453,7 +464,7 @@ mod tests { idmap .map_range( &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ) .ok(); // SAFETY: This doesn't actually activate the page table in tests, it just treats it as @@ -467,7 +478,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Ok(()) ); @@ -476,7 +487,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( BLOCK_SIZE, @@ -489,7 +500,10 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED | Attributes::READ_ONLY, + NORMAL_CACHEABLE + | AttributesEl1::VALID + | AttributesEl1::ACCESSED + | AttributesEl1::READ_ONLY, ), Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( BLOCK_SIZE, @@ -499,7 +513,10 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, BLOCK_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED | Attributes::READ_ONLY, + NORMAL_CACHEABLE + | AttributesEl1::VALID + | AttributesEl1::ACCESSED + | AttributesEl1::READ_ONLY, ), Ok(()) ); @@ -508,7 +525,10 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, BLOCK_SIZE), - DEVICE_NGNRE | Attributes::VALID | Attributes::ACCESSED | Attributes::NON_GLOBAL, + DEVICE_NGNRE + | AttributesEl1::VALID + | AttributesEl1::ACCESSED + | AttributesEl1::NON_GLOBAL, ), Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( 0, PAGE_SIZE @@ -535,7 +555,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, 2 * PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Ok(()) ); @@ -545,9 +565,9 @@ mod tests { idmap.map_range( &MemoryRegion::new(0, PAGE_SIZE), NORMAL_CACHEABLE - | Attributes::VALID - | Attributes::ACCESSED - | Attributes::NON_GLOBAL, + | AttributesEl1::VALID + | AttributesEl1::ACCESSED + | AttributesEl1::NON_GLOBAL, ), Ok(()) ); @@ -556,7 +576,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( 0, PAGE_SIZE @@ -572,7 +592,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ), Ok(()) ); @@ -589,7 +609,7 @@ mod tests { MAX_ADDRESS_FOR_ROOT_LEVEL_1, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1, ), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Err(MapError::AddressRange(VirtualAddress( MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE @@ -600,7 +620,7 @@ mod tests { assert_eq!( idmap.map_range( &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Err(MapError::AddressRange(VirtualAddress( MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE @@ -608,16 +628,16 @@ mod tests { ); } - fn make_map() -> IdMap { + fn make_map() -> IdMap { let mut idmap = IdMap::new(1, 1, TranslationRegime::El1And0); idmap .map_range( &MemoryRegion::new(0, PAGE_SIZE * 2), NORMAL_CACHEABLE - | Attributes::NON_GLOBAL - | Attributes::READ_ONLY - | Attributes::VALID - | Attributes::ACCESSED, + | AttributesEl1::NON_GLOBAL + | AttributesEl1::READ_ONLY + | AttributesEl1::VALID + | AttributesEl1::ACCESSED, ) .unwrap(); // SAFETY: This doesn't actually activate the page table in tests, it just treats it as @@ -635,8 +655,10 @@ mod tests { .modify_range( &MemoryRegion::new(PAGE_SIZE * 2, 1), &|_range, entry, _level| { - entry - .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap()); + entry.modify_flags( + AttributesEl1::SWFLAG_0, + AttributesEl1::from_bits(0usize).unwrap(), + ); Ok(()) }, ) @@ -649,7 +671,7 @@ mod tests { assert!(idmap .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| { if level == 3 || !entry.is_table_or_page() { - entry.modify_flags(Attributes::SWFLAG_0, Attributes::NON_GLOBAL); + entry.modify_flags(AttributesEl1::SWFLAG_0, AttributesEl1::NON_GLOBAL); } Ok(()) }) @@ -657,8 +679,10 @@ mod tests { idmap .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| { if level == 3 || !entry.is_table_or_page() { - entry - .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap()); + entry.modify_flags( + AttributesEl1::SWFLAG_0, + AttributesEl1::from_bits(0usize).unwrap(), + ); } Ok(()) }) @@ -666,7 +690,7 @@ mod tests { idmap .modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| { if level == 3 || !entry.is_table_or_page() { - assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0)); + assert!(entry.flags().unwrap().contains(AttributesEl1::SWFLAG_0)); assert_eq!(range.end() - range.start(), PAGE_SIZE); } Ok(()) @@ -686,16 +710,16 @@ mod tests { idmap .map_range( &MemoryRegion::new(0, BLOCK_RANGE), - NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::SWFLAG_0, + NORMAL_CACHEABLE | AttributesEl1::NON_GLOBAL | AttributesEl1::SWFLAG_0, ) .unwrap(); idmap .map_range( &MemoryRegion::new(0, PAGE_SIZE), NORMAL_CACHEABLE - | Attributes::NON_GLOBAL - | Attributes::VALID - | Attributes::ACCESSED, + | AttributesEl1::NON_GLOBAL + | AttributesEl1::VALID + | AttributesEl1::ACCESSED, ) .unwrap(); idmap @@ -703,7 +727,7 @@ mod tests { &MemoryRegion::new(0, BLOCK_RANGE), &|range, entry, level| { if level == 3 { - let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0); + let has_swflag = entry.flags().unwrap().contains(AttributesEl1::SWFLAG_0); let is_first_page = range.start().0 == 0usize; assert!(has_swflag != is_first_page); } @@ -721,7 +745,7 @@ mod tests { idmap .map_range( &MemoryRegion::new(0, PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ) .unwrap(); idmap @@ -729,7 +753,7 @@ mod tests { &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 20), &mut |_, descriptor, _| { assert!(!descriptor.is_valid()); - assert_eq!(descriptor.flags(), Some(Attributes::empty())); + assert_eq!(descriptor.flags(), Some(AttributesEl1::empty())); assert_eq!(descriptor.output_address(), PhysicalAddress(0)); Ok(()) }, diff --git a/src/lib.rs b/src/lib.rs index 46755f4..d7a0db3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,20 +19,26 @@ //! # #[cfg(feature = "alloc")] { //! use aarch64_paging::{ //! idmap::IdMap, -//! paging::{attributes::Attributes, MemoryRegion, TranslationRegime}, +//! paging::{attributes::AttributesEl1, MemoryRegion, TranslationRegime}, //! }; //! //! const ASID: usize = 1; //! const ROOT_LEVEL: usize = 1; -//! const NORMAL_CACHEABLE: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE); +//! const NORMAL_CACHEABLE: AttributesEl1 = +//! AttributesEl1::ATTRIBUTE_INDEX_1.union(AttributesEl1::INNER_SHAREABLE); //! //! // Create a new EL1 page table with identity mapping. //! let mut idmap = IdMap::new(ASID, ROOT_LEVEL, TranslationRegime::El1And0); //! // Map a 2 MiB region of memory as read-write. -//! idmap.map_range( -//! &MemoryRegion::new(0x80200000, 0x80400000), -//! NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED, -//! ).unwrap(); +//! idmap +//! .map_range( +//! &MemoryRegion::new(0x80200000, 0x80400000), +//! NORMAL_CACHEABLE +//! | AttributesEl1::NON_GLOBAL +//! | AttributesEl1::VALID +//! | AttributesEl1::ACCESSED, +//! ) +//! .unwrap(); //! // SAFETY: Everything the program uses is within the 2 MiB region mapped above. //! unsafe { //! // Set `TTBR0_EL1` to activate the page table. @@ -61,13 +67,14 @@ extern crate alloc; use core::arch::asm; use core::fmt::{self, Display, Formatter}; use paging::{ - attributes::Attributes, Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, - Translation, TranslationRegime, VaRange, VirtualAddress, + attributes::{Attributes, CommonAttributes}, + Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, Translation, + TranslationRegime, VaRange, VirtualAddress, }; /// An error attempting to map some range in the page table. #[derive(Clone, Debug, Eq, PartialEq)] -pub enum MapError { +pub enum MapError { /// The address requested to be mapped was out of the range supported by the page table /// configuration. AddressRange(VirtualAddress), @@ -76,14 +83,14 @@ pub enum MapError { /// The end of the memory region is before the start. RegionBackwards(MemoryRegion), /// There was an error while updating a page table entry. - PteUpdateFault(Descriptor), + PteUpdateFault(Descriptor), /// The requested flags are not supported for this mapping - InvalidFlags(Attributes), + InvalidFlags(A), /// Updating the range violates break-before-make rules and the mapping is live BreakBeforeMakeViolation(MemoryRegion), } -impl Display for MapError { +impl Display for MapError { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Self::AddressRange(va) => write!(f, "Virtual address {} out of range", va), @@ -114,15 +121,15 @@ impl Display for MapError { /// switch back to a previous static page table, and then `activate` again after making the desired /// changes. #[derive(Debug)] -pub struct Mapping { - root: RootTable, +pub struct Mapping, A: Attributes> { + root: RootTable, #[allow(unused)] asid: usize, #[allow(unused)] previous_ttbr: Option, } -impl Mapping { +impl, A: Attributes> Mapping { /// Creates a new page table with the given ASID, root level and translation mapping. pub fn new( translation: T, @@ -317,13 +324,13 @@ impl Mapping { /// Checks whether the given range can be mapped or updated while the translation is live, /// without violating architectural break-before-make (BBM) requirements. - fn check_range_bbm(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError> + fn check_range_bbm(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { self.root.visit_range( range, - &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| { + &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| { if d.is_valid() { let err = MapError::BreakBeforeMakeViolation(mr.clone()); @@ -340,7 +347,7 @@ impl Mapping { (dd.flags().ok_or(err.clone())?, dd.output_address()) }; - if !flags.contains(Attributes::VALID) { + if !flags.contains(CommonAttributes::VALID.into()) { // Removing the valid bit is always ok return Ok(()); } @@ -353,13 +360,15 @@ impl Mapping { let desc_flags = d.flags().unwrap(); if (desc_flags ^ flags).intersects( - Attributes::ATTRIBUTE_INDEX_MASK | Attributes::SHAREABILITY_MASK, + (CommonAttributes::ATTRIBUTE_INDEX_MASK + | CommonAttributes::SHAREABILITY_MASK) + .into(), ) { // Cannot change memory type return Err(err); } - if (desc_flags - flags).contains(Attributes::NON_GLOBAL) { + if (desc_flags - flags).contains(CommonAttributes::NON_GLOBAL.into()) { // Cannot convert from non-global to global return Err(err); } @@ -393,11 +402,11 @@ impl Mapping { &mut self, range: &MemoryRegion, pa: PhysicalAddress, - flags: Attributes, + flags: A, constraints: Constraints, - ) -> Result<(), MapError> { + ) -> Result<(), MapError> { if self.active() { - let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| { + let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| { let mask = !(paging::granularity_at_level(lvl) - 1); let pa = (mr.start() - range.start() + pa.0) & mask; d.set(PhysicalAddress(pa), flags); @@ -437,9 +446,9 @@ impl Mapping { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> + pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { if self.active() { self.check_range_bbm(range, f)?; @@ -466,9 +475,9 @@ impl Mapping { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. - pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> + pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, { self.root.walk_range(range, f) } @@ -505,7 +514,7 @@ impl Mapping { } } -impl Drop for Mapping { +impl, A: Attributes> Drop for Mapping { fn drop(&mut self) { if self.previous_ttbr.is_some() { #[cfg(target_arch = "aarch64")] @@ -524,18 +533,31 @@ mod tests { use self::idmap::IdTranslation; #[cfg(feature = "alloc")] use super::*; + use paging::attributes::{AttributesEl2, AttributesEl3}; #[cfg(feature = "alloc")] #[test] #[should_panic] fn no_el2_asid() { - Mapping::new(IdTranslation, 1, 1, TranslationRegime::El2, VaRange::Lower); + Mapping::::new( + IdTranslation, + 1, + 1, + TranslationRegime::El2, + VaRange::Lower, + ); } #[cfg(feature = "alloc")] #[test] #[should_panic] fn no_el3_asid() { - Mapping::new(IdTranslation, 1, 1, TranslationRegime::El3, VaRange::Lower); + Mapping::::new( + IdTranslation, + 1, + 1, + TranslationRegime::El3, + VaRange::Lower, + ); } } diff --git a/src/linearmap.rs b/src/linearmap.rs index e5ed242..0172b33 100644 --- a/src/linearmap.rs +++ b/src/linearmap.rs @@ -39,7 +39,10 @@ impl LinearTranslation { Self { offset } } - fn virtual_to_physical(&self, va: VirtualAddress) -> Result { + fn virtual_to_physical( + &self, + va: VirtualAddress, + ) -> Result> { if let Some(pa) = checked_add_to_unsigned(va.0 as isize, self.offset) { Ok(PhysicalAddress(pa)) } else { @@ -48,19 +51,19 @@ impl LinearTranslation { } } -impl Translation for LinearTranslation { - fn allocate_table(&mut self) -> (NonNull, PhysicalAddress) { +impl Translation for LinearTranslation { + fn allocate_table(&mut self) -> (NonNull>, PhysicalAddress) { let table = PageTable::new(); // Assume that the same linear mapping is used everywhere. let va = VirtualAddress(table.as_ptr() as usize); - let pa = self.virtual_to_physical(va).expect( + let pa = self.virtual_to_physical::(va).expect( "Allocated subtable with virtual address which doesn't correspond to any physical address." ); (table, pa) } - unsafe fn deallocate_table(&mut self, page_table: NonNull) { + unsafe fn deallocate_table(&mut self, page_table: NonNull>) { // SAFETY: Our caller promises that the memory was allocated by `allocate_table` on this // `LinearTranslation` and not yet deallocated. `allocate_table` used the global allocator // and appropriate layout by calling `PageTable::new()`. @@ -69,13 +72,13 @@ impl Translation for LinearTranslation { } } - fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull { + fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull> { let signed_pa = pa.0 as isize; if signed_pa < 0 { panic!("Invalid physical address {} for pagetable", pa); } if let Some(va) = signed_pa.checked_sub(self.offset) { - if let Some(ptr) = NonNull::new(va as *mut PageTable) { + if let Some(ptr) = NonNull::new(va as *mut PageTable) { ptr } else { panic!( @@ -100,11 +103,11 @@ fn checked_add_to_unsigned(a: isize, b: isize) -> Option { /// This assumes that the same linear mapping is used both for the page table being managed, and for /// code that is managing it. #[derive(Debug)] -pub struct LinearMap { - mapping: Mapping, +pub struct LinearMap { + mapping: Mapping, } -impl LinearMap { +impl LinearMap { /// Creates a new identity-mapping page table with the given ASID, root level and offset, for /// use in the given TTBR. /// @@ -202,7 +205,7 @@ impl LinearMap { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn map_range(&mut self, range: &MemoryRegion, flags: Attributes) -> Result<(), MapError> { + pub fn map_range(&mut self, range: &MemoryRegion, flags: A) -> Result<(), MapError> { self.map_range_with_constraints(range, flags, Constraints::empty()) } @@ -232,9 +235,9 @@ impl LinearMap { pub fn map_range_with_constraints( &mut self, range: &MemoryRegion, - flags: Attributes, + flags: A, constraints: Constraints, - ) -> Result<(), MapError> { + ) -> Result<(), MapError> { let pa = self .mapping .translation() @@ -279,9 +282,9 @@ impl LinearMap { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> + pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { self.mapping.modify_range(range, f) } @@ -309,9 +312,9 @@ impl LinearMap { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. - pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> + pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, { self.mapping.walk_range(range, f) } @@ -351,13 +354,13 @@ impl LinearMap { #[cfg(test)] mod tests { use super::*; - use crate::paging::BITS_PER_LEVEL; + use crate::paging::{attributes::AttributesEl1, BITS_PER_LEVEL}; const MAX_ADDRESS_FOR_ROOT_LEVEL_1: usize = 1 << 39; const GIB_512_S: isize = 512 * 1024 * 1024 * 1024; const GIB_512: usize = 512 * 1024 * 1024 * 1024; - const NORMAL_CACHEABLE: Attributes = - Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE); + const NORMAL_CACHEABLE: AttributesEl1 = + AttributesEl1::ATTRIBUTE_INDEX_1.union(AttributesEl1::INNER_SHAREABLE); #[test] fn map_valid() { @@ -366,7 +369,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(0, 1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -376,7 +379,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(0, PAGE_SIZE * 2), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -389,7 +392,7 @@ mod tests { MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1, MAX_ADDRESS_FOR_ROOT_LEVEL_1 ), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -407,7 +410,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -426,7 +429,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE + 1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -442,7 +445,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(PAGE_SIZE, PAGE_SIZE * 3), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -461,7 +464,7 @@ mod tests { MAX_ADDRESS_FOR_ROOT_LEVEL_1 - 1, MAX_ADDRESS_FOR_ROOT_LEVEL_1 ), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -479,7 +482,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(LEVEL_2_BLOCK_SIZE, MAX_ADDRESS_FOR_ROOT_LEVEL_1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Ok(()) ); @@ -496,7 +499,7 @@ mod tests { MAX_ADDRESS_FOR_ROOT_LEVEL_1, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1, ), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Err(MapError::AddressRange(VirtualAddress( MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE @@ -507,7 +510,7 @@ mod tests { assert_eq!( pagetable.map_range( &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1 + 1), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED ), Err(MapError::AddressRange(VirtualAddress( MAX_ADDRESS_FOR_ROOT_LEVEL_1 + PAGE_SIZE @@ -531,11 +534,11 @@ mod tests { let translation = LinearTranslation::new(4096); assert_eq!( translation.physical_to_virtual(PhysicalAddress(8192)), - NonNull::new(4096 as *mut PageTable).unwrap(), + NonNull::new(4096 as *mut PageTable).unwrap(), ); assert_eq!( translation.physical_to_virtual(PhysicalAddress(GIB_512 + 4096)), - NonNull::new(GIB_512 as *mut PageTable).unwrap(), + NonNull::new(GIB_512 as *mut PageTable).unwrap(), ); } @@ -543,14 +546,20 @@ mod tests { #[should_panic] fn physical_address_to_zero_ttbr0() { let translation = LinearTranslation::new(4096); - translation.physical_to_virtual(PhysicalAddress(4096)); + >::physical_to_virtual( + &translation, + PhysicalAddress(4096), + ); } #[test] #[should_panic] fn physical_address_out_of_range_ttbr0() { let translation = LinearTranslation::new(4096); - translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize)); + >::physical_to_virtual( + &translation, + PhysicalAddress(-4096_isize as usize), + ); } #[test] @@ -560,11 +569,11 @@ mod tests { let translation = LinearTranslation::new(GIB_512_S + 4096); assert_eq!( translation.physical_to_virtual(PhysicalAddress(8192)), - NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(), + NonNull::new((4096 - GIB_512_S) as *mut PageTable).unwrap(), ); assert_eq!( translation.physical_to_virtual(PhysicalAddress(GIB_512)), - NonNull::new(-4096_isize as *mut PageTable).unwrap(), + NonNull::new(-4096_isize as *mut PageTable).unwrap(), ); } @@ -574,7 +583,10 @@ mod tests { // Map the 512 GiB region at the top of virtual address space to the bottom of physical // address space. let translation = LinearTranslation::new(GIB_512_S); - translation.physical_to_virtual(PhysicalAddress(GIB_512)); + >::physical_to_virtual( + &translation, + PhysicalAddress(GIB_512), + ); } #[test] @@ -583,7 +595,10 @@ mod tests { // Map the 512 GiB region at the top of virtual address space to the bottom of physical // address space. let translation = LinearTranslation::new(GIB_512_S); - translation.physical_to_virtual(PhysicalAddress(-4096_isize as usize)); + >::physical_to_virtual( + &translation, + PhysicalAddress(-4096_isize as usize), + ); } #[test] @@ -591,7 +606,7 @@ mod tests { let translation = LinearTranslation::new(-4096); let va = VirtualAddress(1024); assert_eq!( - translation.virtual_to_physical(va), + translation.virtual_to_physical::(va), Err(MapError::InvalidVirtualAddress(va)) ) } @@ -604,12 +619,12 @@ mod tests { // The first page in the region covered by TTBR1. assert_eq!( - translation.virtual_to_physical(VirtualAddress(0xffff_ff80_0000_0000)), + translation.virtual_to_physical::(VirtualAddress(0xffff_ff80_0000_0000)), Ok(PhysicalAddress(0)) ); // The last page in the region covered by TTBR1. assert_eq!( - translation.virtual_to_physical(VirtualAddress(0xffff_ffff_ffff_f000)), + translation.virtual_to_physical::(VirtualAddress(0xffff_ffff_ffff_f000)), Ok(PhysicalAddress(0x7f_ffff_f000)) ); } @@ -622,7 +637,7 @@ mod tests { pagetable .map_range( &MemoryRegion::new(0, 1 << 30), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ) .unwrap(); assert_eq!( @@ -636,7 +651,7 @@ mod tests { pagetable .map_range( &MemoryRegion::new(0, 1 << 30), - NORMAL_CACHEABLE | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE | AttributesEl1::VALID | AttributesEl1::ACCESSED, ) .unwrap(); assert_eq!( @@ -645,7 +660,7 @@ mod tests { ); } - fn make_map() -> LinearMap { + fn make_map() -> LinearMap { let mut lmap = LinearMap::new(1, 1, 4096, TranslationRegime::El1And0, VaRange::Lower); // Mapping VA range 0x0 - 0x2000 to PA range 0x1000 - 0x3000 lmap.map_range(&MemoryRegion::new(0, PAGE_SIZE * 2), NORMAL_CACHEABLE) @@ -660,8 +675,10 @@ mod tests { .modify_range( &MemoryRegion::new(PAGE_SIZE * 2, 1), &|_range, entry, _level| { - entry - .modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap()); + entry.modify_flags( + AttributesEl1::SWFLAG_0, + AttributesEl1::from_bits(0usize).unwrap(), + ); Ok(()) }, ) @@ -673,14 +690,17 @@ mod tests { let mut lmap = make_map(); lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|_range, entry, level| { if level == 3 || !entry.is_table_or_page() { - entry.modify_flags(Attributes::SWFLAG_0, Attributes::from_bits(0usize).unwrap()); + entry.modify_flags( + AttributesEl1::SWFLAG_0, + AttributesEl1::from_bits(0usize).unwrap(), + ); } Ok(()) }) .unwrap(); lmap.modify_range(&MemoryRegion::new(1, PAGE_SIZE), &|range, entry, level| { if level == 3 || !entry.is_table_or_page() { - assert!(entry.flags().unwrap().contains(Attributes::SWFLAG_0)); + assert!(entry.flags().unwrap().contains(AttributesEl1::SWFLAG_0)); assert_eq!(range.end() - range.start(), PAGE_SIZE); } Ok(()) @@ -695,19 +715,22 @@ mod tests { let mut lmap = LinearMap::new(1, 1, 0x1000, TranslationRegime::El1And0, VaRange::Lower); lmap.map_range( &MemoryRegion::new(0, BLOCK_RANGE), - NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::SWFLAG_0, + NORMAL_CACHEABLE | AttributesEl1::NON_GLOBAL | AttributesEl1::SWFLAG_0, ) .unwrap(); lmap.map_range( &MemoryRegion::new(0, PAGE_SIZE), - NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED, + NORMAL_CACHEABLE + | AttributesEl1::NON_GLOBAL + | AttributesEl1::VALID + | AttributesEl1::ACCESSED, ) .unwrap(); lmap.modify_range( &MemoryRegion::new(0, BLOCK_RANGE), &|range, entry, level| { if level == 3 { - let has_swflag = entry.flags().unwrap().contains(Attributes::SWFLAG_0); + let has_swflag = entry.flags().unwrap().contains(AttributesEl1::SWFLAG_0); let is_first_page = range.start().0 == 0usize; assert!(has_swflag != is_first_page); } diff --git a/src/paging.rs b/src/paging.rs index 805bba5..5d63627 100644 --- a/src/paging.rs +++ b/src/paging.rs @@ -10,7 +10,7 @@ pub mod attributes; use crate::MapError; #[cfg(feature = "alloc")] use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error, Layout}; -use attributes::Attributes; +use attributes::{Attributes, CommonAttributes}; use bitflags::bitflags; use core::fmt::{self, Debug, Display, Formatter}; use core::marker::PhantomData; @@ -164,10 +164,10 @@ pub(crate) fn granularity_at_level(level: usize) -> usize { /// An implementation of this trait needs to be provided to the mapping routines, so that the /// physical addresses used in the page tables can be converted into virtual addresses that can be /// used to access their contents from the code. -pub trait Translation { +pub trait Translation { /// Allocates a zeroed page, which is already mapped, to be used for a new subtable of some /// pagetable. Returns both a pointer to the page and its physical address. - fn allocate_table(&mut self) -> (NonNull, PhysicalAddress); + fn allocate_table(&mut self) -> (NonNull>, PhysicalAddress); /// Deallocates the page which was previous allocated by [`allocate_table`](Self::allocate_table). /// @@ -175,10 +175,10 @@ pub trait Translation { /// /// The memory must have been allocated by `allocate_table` on the same `Translation`, and not /// yet deallocated. - unsafe fn deallocate_table(&mut self, page_table: NonNull); + unsafe fn deallocate_table(&mut self, page_table: NonNull>); /// Given the physical address of a subtable, returns the virtual address at which it is mapped. - fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull; + fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull>; } impl MemoryRegion { @@ -257,15 +257,15 @@ bitflags! { } /// A complete hierarchy of page tables including all levels. -pub struct RootTable { - table: PageTableWithLevel, +pub struct RootTable, A: Attributes> { + table: PageTableWithLevel, translation: T, pa: PhysicalAddress, translation_regime: TranslationRegime, va_range: VaRange, } -impl RootTable { +impl, A: Attributes> RootTable { /// Creates a new page table starting at the given root level. /// /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not @@ -314,11 +314,13 @@ impl RootTable { &mut self, range: &MemoryRegion, pa: PhysicalAddress, - flags: Attributes, + flags: A, constraints: Constraints, - ) -> Result<(), MapError> { - if flags.contains(Attributes::TABLE_OR_PAGE) { - return Err(MapError::InvalidFlags(Attributes::TABLE_OR_PAGE)); + ) -> Result<(), MapError> { + if flags.contains(CommonAttributes::TABLE_OR_PAGE.into()) { + return Err(MapError::InvalidFlags( + CommonAttributes::TABLE_OR_PAGE.into(), + )); } self.verify_region(range)?; self.table @@ -385,9 +387,9 @@ impl RootTable { /// /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, /// and modifying those would violate architectural break-before-make (BBM) requirements. - pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> + pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { self.verify_region(range)?; self.table.modify_range(&mut self.translation, range, f) @@ -416,9 +418,9 @@ impl RootTable { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. - pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> + pub fn walk_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>, { self.visit_range(range, &mut |mr, desc, level| { f(mr, desc, level).map_err(|_| MapError::PteUpdateFault(*desc)) @@ -426,9 +428,9 @@ impl RootTable { } // Private version of `walk_range` using a closure that returns MapError on error - pub(crate) fn visit_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> + pub(crate) fn visit_range(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), MapError>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), MapError>, { self.verify_region(range)?; self.table.visit_range(&self.translation, range, f) @@ -444,7 +446,7 @@ impl RootTable { } /// Checks whether the region is within range of the page table. - fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> { + fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> { if region.end() < region.start() { return Err(MapError::RegionBackwards(region.clone())); } @@ -468,7 +470,7 @@ impl RootTable { } } -impl Debug for RootTable { +impl, A: Attributes> Debug for RootTable { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { writeln!( f, @@ -480,7 +482,7 @@ impl Debug for RootTable { } } -impl Drop for RootTable { +impl, A: Attributes> Drop for RootTable { fn drop(&mut self) { // SAFETY: We created the table in `RootTable::new` by calling `PageTableWithLevel::new` // with `self.translation`. Subtables were similarly created by @@ -519,20 +521,20 @@ impl Iterator for ChunkedIterator<'_> { /// implement `Debug` and `Drop`, as walking the page table hierachy requires knowing the starting /// level. #[derive(Debug)] -struct PageTableWithLevel { - table: NonNull, +struct PageTableWithLevel, A: Attributes> { + table: NonNull>, level: usize, _translation: PhantomData, } // SAFETY: The underlying PageTable is process-wide and can be safely accessed from any thread // with appropriate synchronization. This type manages ownership for the raw pointer. -unsafe impl Send for PageTableWithLevel {} +unsafe impl + Send, A: Attributes + Send> Send for PageTableWithLevel {} // SAFETY: &Self only allows reading from the page table, which is safe to do from any thread. -unsafe impl Sync for PageTableWithLevel {} +unsafe impl + Sync, A: Attributes + Sync> Sync for PageTableWithLevel {} -impl PageTableWithLevel { +impl, A: Attributes> PageTableWithLevel { /// Allocates a new, zeroed, appropriately-aligned page table with the given translation, /// returning both a pointer to it and its physical address. fn new(translation: &mut T, level: usize) -> (Self, PhysicalAddress) { @@ -546,7 +548,7 @@ impl PageTableWithLevel { ) } - fn from_pointer(table: NonNull, level: usize) -> Self { + fn from_pointer(table: NonNull>, level: usize) -> Self { Self { table, level, @@ -555,7 +557,7 @@ impl PageTableWithLevel { } /// Returns a reference to the descriptor corresponding to a given virtual address. - fn get_entry(&self, va: VirtualAddress) -> &Descriptor { + fn get_entry(&self, va: VirtualAddress) -> &Descriptor { let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL; let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL); // SAFETY: We know that the pointer is properly aligned, dereferenced and initialised, and @@ -566,7 +568,7 @@ impl PageTableWithLevel { } /// Returns a mutable reference to the descriptor corresponding to a given virtual address. - fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor { + fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor { let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL; let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL); // SAFETY: We know that the pointer is properly aligned, dereferenced and initialised, and @@ -581,7 +583,7 @@ impl PageTableWithLevel { fn split_entry( translation: &mut T, chunk: &MemoryRegion, - entry: &mut Descriptor, + entry: &mut Descriptor, level: usize, ) -> Self { let granularity = granularity_at_level(level); @@ -589,7 +591,7 @@ impl PageTableWithLevel { let (mut subtable, subtable_pa) = Self::new(translation, level + 1); if let Some(old_flags) = old.flags() { let old_pa = old.output_address(); - if !old_flags.contains(Attributes::TABLE_OR_PAGE) + if !old_flags.contains(CommonAttributes::TABLE_OR_PAGE.into()) && (!old_flags.is_empty() || old_pa.0 != 0) { // `old` was a block entry, so we need to split it. @@ -605,7 +607,10 @@ impl PageTableWithLevel { ); } } - entry.set(subtable_pa, Attributes::TABLE_OR_PAGE | Attributes::VALID); + entry.set( + subtable_pa, + CommonAttributes::TABLE_OR_PAGE | CommonAttributes::VALID, + ); subtable } @@ -623,7 +628,7 @@ impl PageTableWithLevel { translation: &mut T, range: &MemoryRegion, mut pa: PhysicalAddress, - flags: Attributes, + flags: A, constraints: Constraints, ) { let level = self.level; @@ -634,7 +639,7 @@ impl PageTableWithLevel { if level == LEAF_LEVEL { // Put down a page mapping. - entry.set(pa, flags | Attributes::TABLE_OR_PAGE); + entry.set(pa, flags | A::from(CommonAttributes::TABLE_OR_PAGE)); } else if chunk.is_block(level) && !entry.is_table_or_page() && is_aligned(pa.0, granularity) @@ -667,9 +672,9 @@ impl PageTableWithLevel { let mut i = 0; while i < table.entries.len() { - if table.entries[i].0 == 0 { + if table.entries[i].value == 0 { let first_zero = i; - while i < table.entries.len() && table.entries[i].0 == 0 { + while i < table.entries.len() && table.entries[i].value == 0 { i += 1; } if i - 1 == first_zero { @@ -727,9 +732,9 @@ impl PageTableWithLevel { translation: &mut T, range: &MemoryRegion, f: &F, - ) -> Result<(), MapError> + ) -> Result<(), MapError> where - F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, + F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { let level = self.level; for chunk in range.split(level) { @@ -755,7 +760,7 @@ impl PageTableWithLevel { /// If the function returns an error, the walk is terminated and the error value is passed on fn visit_range(&self, translation: &T, range: &MemoryRegion, f: &mut F) -> Result<(), E> where - F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), E>, + F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), E>, { let level = self.level; for chunk in range.split(level) { @@ -790,12 +795,22 @@ impl PageTableWithLevel { /// A single level of a page table. #[repr(C, align(4096))] -#[cfg_attr(feature = "zerocopy", derive(AsBytes, FromZeroes))] -pub struct PageTable { - entries: [Descriptor; 1 << BITS_PER_LEVEL], +#[cfg_attr(feature = "zerocopy", derive(FromZeroes))] +pub struct PageTable { + entries: [Descriptor; 1 << BITS_PER_LEVEL], +} + +// SAFETY: PageTable is repr(C), has no padding, and we enforce that its field is AsBytes. +// Unfortunately the derive macro doesn't work here because of the generic parameter, but Descriptor +// only uses it in a PhantomData so it doesn't actually affect the representation. +unsafe impl AsBytes for PageTable +where + Descriptor: AsBytes, +{ + fn only_derive_is_allowed_to_implement_this_trait() {} } -impl PageTable { +impl PageTable { /// Allocates a new zeroed, appropriately-aligned pagetable on the heap using the global /// allocator and returns a pointer to it. #[cfg(feature = "alloc")] @@ -814,57 +829,68 @@ impl PageTable { /// - A pointer to a lower level pagetable, if it is not in the lowest level page table. #[cfg_attr(feature = "zerocopy", derive(AsBytes, FromZeroes))] #[derive(Clone, Copy, PartialEq, Eq)] -#[repr(C)] -pub struct Descriptor(usize); +#[repr(transparent)] +pub struct Descriptor { + value: usize, + _attributes: PhantomData, +} -impl Descriptor { +impl Descriptor { const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48); + #[cfg(test)] + const fn new(value: usize) -> Self { + Self { + value, + _attributes: PhantomData, + } + } + pub(crate) fn output_address(self) -> PhysicalAddress { - PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK) + PhysicalAddress(self.value & Self::PHYSICAL_ADDRESS_BITMASK) } /// Returns the flags of this page table entry, or `None` if its state does not /// contain a valid set of flags. - pub fn flags(self) -> Option { - Attributes::from_bits(self.0 & !Self::PHYSICAL_ADDRESS_BITMASK) + pub fn flags(self) -> Option { + A::from_bits(self.value & !Self::PHYSICAL_ADDRESS_BITMASK) } /// Modifies the page table entry by setting or clearing its flags. /// Panics when attempting to convert a table descriptor into a block/page descriptor or vice /// versa - this is not supported via this API. - pub fn modify_flags(&mut self, set: Attributes, clear: Attributes) { - let flags = (self.0 | set.bits()) & !clear.bits(); + pub fn modify_flags(&mut self, set: A, clear: A) { + let flags = (self.value | set.bits()) & !clear.bits(); - if (self.0 ^ flags) & Attributes::TABLE_OR_PAGE.bits() != 0 { + if (self.value ^ flags) & CommonAttributes::TABLE_OR_PAGE.bits() != 0 { panic!("Cannot convert between table and block/page descriptors\n"); } - self.0 = flags; + self.value = flags; } /// Returns `true` if [`Attributes::VALID`] is set on this entry, e.g. if the entry is mapped. pub fn is_valid(self) -> bool { - (self.0 & Attributes::VALID.bits()) != 0 + (self.value & CommonAttributes::VALID.bits()) != 0 } /// Returns `true` if this is a valid entry pointing to a next level translation table or a page. pub fn is_table_or_page(self) -> bool { if let Some(flags) = self.flags() { - flags.contains(Attributes::TABLE_OR_PAGE | Attributes::VALID) + flags.contains((CommonAttributes::TABLE_OR_PAGE | CommonAttributes::VALID).into()) } else { false } } - pub(crate) fn set(&mut self, pa: PhysicalAddress, flags: Attributes) { - self.0 = (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.bits(); + pub(crate) fn set(&mut self, pa: PhysicalAddress, flags: impl Into) { + self.value = (pa.0 & Self::PHYSICAL_ADDRESS_BITMASK) | flags.into().bits(); } - fn subtable( + fn subtable>( self, translation: &T, level: usize, - ) -> Option> { + ) -> Option> { if level < LEAF_LEVEL && self.is_table_or_page() { let output_address = self.output_address(); let table = translation.physical_to_virtual(output_address); @@ -874,9 +900,9 @@ impl Descriptor { } } -impl Debug for Descriptor { +impl Debug for Descriptor { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { - write!(f, "{:#016x}", self.0)?; + write!(f, "{:#016x}", self.value)?; if self.is_valid() { if let Some(flags) = self.flags() { write!(f, " ({}, {:?})", self.output_address(), flags)?; @@ -933,6 +959,7 @@ pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool { #[cfg(test)] mod tests { + use super::attributes::{AttributesEl1, AttributesEl2, AttributesEl3}; use super::*; #[cfg(feature = "alloc")] use crate::idmap::IdTranslation; @@ -1001,57 +1028,66 @@ mod tests { #[test] fn invalid_descriptor() { - let desc = Descriptor(0usize); + let desc = Descriptor::::new(0usize); assert!(!desc.is_valid()); - assert!(!desc.flags().unwrap().contains(Attributes::VALID)); + assert!(!desc.flags().unwrap().contains(AttributesEl1::VALID)); } #[test] fn set_descriptor() { const PHYSICAL_ADDRESS: usize = 0x12340000; - let mut desc = Descriptor(0usize); + let mut desc = Descriptor::::new(0usize); assert!(!desc.is_valid()); desc.set( PhysicalAddress(PHYSICAL_ADDRESS), - Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID, + AttributesEl1::TABLE_OR_PAGE + | AttributesEl1::USER + | AttributesEl1::SWFLAG_1 + | AttributesEl1::VALID, ); assert!(desc.is_valid()); assert_eq!( desc.flags().unwrap(), - Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1 | Attributes::VALID + AttributesEl1::TABLE_OR_PAGE + | AttributesEl1::USER + | AttributesEl1::SWFLAG_1 + | AttributesEl1::VALID ); assert_eq!(desc.output_address(), PhysicalAddress(PHYSICAL_ADDRESS)); } #[test] fn modify_descriptor_flags() { - let mut desc = Descriptor(0usize); + let mut desc = Descriptor::::new(0usize); assert!(!desc.is_valid()); desc.set( PhysicalAddress(0x12340000), - Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1, + AttributesEl1::TABLE_OR_PAGE | AttributesEl1::USER | AttributesEl1::SWFLAG_1, ); desc.modify_flags( - Attributes::DBM | Attributes::SWFLAG_3, - Attributes::VALID | Attributes::SWFLAG_1, + AttributesEl1::DBM | AttributesEl1::SWFLAG_3, + AttributesEl1::VALID | AttributesEl1::SWFLAG_1, ); assert!(!desc.is_valid()); assert_eq!( desc.flags().unwrap(), - Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_3 | Attributes::DBM + AttributesEl1::TABLE_OR_PAGE + | AttributesEl1::USER + | AttributesEl1::SWFLAG_3 + | AttributesEl1::DBM ); } #[test] #[should_panic] fn modify_descriptor_table_or_page_flag() { - let mut desc = Descriptor(0usize); + let mut desc = Descriptor::::new(0usize); assert!(!desc.is_valid()); desc.set( PhysicalAddress(0x12340000), - Attributes::TABLE_OR_PAGE | Attributes::USER | Attributes::SWFLAG_1, + AttributesEl1::TABLE_OR_PAGE | AttributesEl1::USER | AttributesEl1::SWFLAG_1, ); - desc.modify_flags(Attributes::VALID, Attributes::TABLE_OR_PAGE); + desc.modify_flags(AttributesEl1::VALID, AttributesEl1::TABLE_OR_PAGE); } #[cfg(feature = "alloc")] @@ -1072,13 +1108,23 @@ mod tests { #[test] #[should_panic] fn no_el2_ttbr1() { - RootTable::::new(IdTranslation, 1, TranslationRegime::El2, VaRange::Upper); + RootTable::::new( + IdTranslation, + 1, + TranslationRegime::El2, + VaRange::Upper, + ); } #[cfg(feature = "alloc")] #[test] #[should_panic] fn no_el3_ttbr1() { - RootTable::::new(IdTranslation, 1, TranslationRegime::El3, VaRange::Upper); + RootTable::::new( + IdTranslation, + 1, + TranslationRegime::El3, + VaRange::Upper, + ); } } diff --git a/src/paging/attributes.rs b/src/paging/attributes.rs index 3a4b1fe..c9f0b8c 100644 --- a/src/paging/attributes.rs +++ b/src/paging/attributes.rs @@ -2,12 +2,111 @@ // This project is dual-licensed under Apache 2.0 and MIT terms. // See LICENSE-APACHE and LICENSE-MIT for details. -use bitflags::bitflags; +use super::TranslationRegime; +use bitflags::{bitflags, Flags}; +use core::{ + fmt::Debug, + ops::{BitOr, BitXor, Sub}, +}; + +/// Attribute bits for a mapping in a page table. +pub trait Attributes: + Copy + + Clone + + Debug + + Flags + + Sub + + BitOr + + BitXor + + From +{ + const TRANSLATION_REGIME: TranslationRegime; +} + +bitflags! { + #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + pub struct CommonAttributes: usize{ + const VALID = 1 << 0; + const TABLE_OR_PAGE = 1 << 1; + + const ATTRIBUTE_INDEX_0 = 0 << 2; + const ATTRIBUTE_INDEX_1 = 1 << 2; + const ATTRIBUTE_INDEX_2 = 2 << 2; + const ATTRIBUTE_INDEX_3 = 3 << 2; + const ATTRIBUTE_INDEX_4 = 4 << 2; + const ATTRIBUTE_INDEX_5 = 5 << 2; + const ATTRIBUTE_INDEX_6 = 6 << 2; + const ATTRIBUTE_INDEX_7 = 7 << 2; + + const OUTER_SHAREABLE = 2 << 8; + const INNER_SHAREABLE = 3 << 8; + + const NON_GLOBAL = 1 << 11; + } +} + +impl CommonAttributes { + /// Mask for the bits determining the shareability of the mapping. + pub const SHAREABILITY_MASK: Self = Self::INNER_SHAREABLE; + + /// Mask for the bits determining the attribute index of the mapping. + pub const ATTRIBUTE_INDEX_MASK: Self = Self::ATTRIBUTE_INDEX_7; +} + +bitflags! { + /// Attribute bits for a mapping in a page table for the EL3 translation regime. + #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + pub struct AttributesEl3: usize { + const VALID = 1 << 0; + const TABLE_OR_PAGE = 1 << 1; + + const ATTRIBUTE_INDEX_0 = 0 << 2; + const ATTRIBUTE_INDEX_1 = 1 << 2; + const ATTRIBUTE_INDEX_2 = 2 << 2; + const ATTRIBUTE_INDEX_3 = 3 << 2; + const ATTRIBUTE_INDEX_4 = 4 << 2; + const ATTRIBUTE_INDEX_5 = 5 << 2; + const ATTRIBUTE_INDEX_6 = 6 << 2; + const ATTRIBUTE_INDEX_7 = 7 << 2; + + const OUTER_SHAREABLE = 2 << 8; + const INNER_SHAREABLE = 3 << 8; + + const NS = 1 << 5; + const USER_RES1 = 1 << 6; + const READ_ONLY = 1 << 7; + const ACCESSED = 1 << 10; + const NSE = 1 << 11; + const DBM = 1 << 51; + /// Execute-never. + const XN = 1 << 54; + + // Software flags in block and page descriptor entries. + const SWFLAG_0 = 1 << 55; + const SWFLAG_1 = 1 << 56; + const SWFLAG_2 = 1 << 57; + const SWFLAG_3 = 1 << 58; + } +} + +impl AttributesEl3 { + pub const RES1: Self = Self::USER_RES1; +} + +impl Attributes for AttributesEl3 { + const TRANSLATION_REGIME: TranslationRegime = TranslationRegime::El3; +} + +impl From for AttributesEl3 { + fn from(common: CommonAttributes) -> Self { + Self::from_bits_retain(common.bits()) + } +} bitflags! { - /// Attribute bits for a mapping in a page table. + /// Attribute bits for a mapping in a page table for the non-secure EL2 translation regime. #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - pub struct Attributes: usize { + pub struct AttributesEl2: usize { const VALID = 1 << 0; const TABLE_OR_PAGE = 1 << 1; @@ -43,10 +142,61 @@ bitflags! { } } -impl Attributes { - /// Mask for the bits determining the shareability of the mapping. - pub const SHAREABILITY_MASK: Self = Self::INNER_SHAREABLE; +impl Attributes for AttributesEl2 { + const TRANSLATION_REGIME: TranslationRegime = TranslationRegime::El2; +} - /// Mask for the bits determining the attribute index of the mapping. - pub const ATTRIBUTE_INDEX_MASK: Self = Self::ATTRIBUTE_INDEX_7; +impl From for AttributesEl2 { + fn from(common: CommonAttributes) -> Self { + Self::from_bits_retain(common.bits()) + } +} + +bitflags! { + /// Attribute bits for a mapping in a page table for the EL1&0 translation regime. + #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + pub struct AttributesEl1: usize { + const VALID = 1 << 0; + const TABLE_OR_PAGE = 1 << 1; + + const ATTRIBUTE_INDEX_0 = 0 << 2; + const ATTRIBUTE_INDEX_1 = 1 << 2; + const ATTRIBUTE_INDEX_2 = 2 << 2; + const ATTRIBUTE_INDEX_3 = 3 << 2; + const ATTRIBUTE_INDEX_4 = 4 << 2; + const ATTRIBUTE_INDEX_5 = 5 << 2; + const ATTRIBUTE_INDEX_6 = 6 << 2; + const ATTRIBUTE_INDEX_7 = 7 << 2; + + const OUTER_SHAREABLE = 2 << 8; + const INNER_SHAREABLE = 3 << 8; + + const NS = 1 << 5; + const USER = 1 << 6; + const READ_ONLY = 1 << 7; + const ACCESSED = 1 << 10; + const NON_GLOBAL = 1 << 11; + const DBM = 1 << 51; + /// Privileged Execute-never, if two privilege levels are supported. + const PXN = 1 << 53; + /// Unprivileged Execute-never, or just Execute-never if only one privilege level is + /// supported. + const UXN = 1 << 54; + + // Software flags in block and page descriptor entries. + const SWFLAG_0 = 1 << 55; + const SWFLAG_1 = 1 << 56; + const SWFLAG_2 = 1 << 57; + const SWFLAG_3 = 1 << 58; + } +} + +impl Attributes for AttributesEl1 { + const TRANSLATION_REGIME: TranslationRegime = TranslationRegime::El1And0; +} + +impl From for AttributesEl1 { + fn from(common: CommonAttributes) -> Self { + Self::from_bits_retain(common.bits()) + } } diff --git a/src/target.rs b/src/target.rs index 5dcdfbc..160837e 100644 --- a/src/target.rs +++ b/src/target.rs @@ -6,7 +6,7 @@ //! //! See [`TargetAllocator`] for details on how to use it. -use crate::paging::{deallocate, PageTable, PhysicalAddress, Translation}; +use crate::paging::{attributes::Attributes, deallocate, PageTable, PhysicalAddress, Translation}; use alloc::{vec, vec::Vec}; use core::{mem::size_of, ptr::NonNull}; #[cfg(feature = "zerocopy")] @@ -20,7 +20,7 @@ use zerocopy::AsBytes; /// ``` /// use aarch64_paging::{ /// paging::{ -/// attributes::Attributes, Constraints, MemoryRegion, PhysicalAddress, RootTable, +/// attributes::AttributesEl1, Constraints, MemoryRegion, PhysicalAddress, RootTable, /// TranslationRegime, VaRange, /// }, /// target::TargetAllocator, @@ -37,10 +37,10 @@ use zerocopy::AsBytes; /// map.map_range( /// &MemoryRegion::new(0x0, 0x1000), /// PhysicalAddress(0x4_2000), -/// Attributes::VALID -/// | Attributes::ATTRIBUTE_INDEX_0 -/// | Attributes::INNER_SHAREABLE -/// | Attributes::UXN, +/// AttributesEl1::VALID +/// | AttributesEl1::ATTRIBUTE_INDEX_0 +/// | AttributesEl1::INNER_SHAREABLE +/// | AttributesEl1::UXN, /// Constraints::empty(), /// ) /// .unwrap(); @@ -51,12 +51,12 @@ use zerocopy::AsBytes; /// # } /// ``` #[derive(Debug)] -pub struct TargetAllocator { +pub struct TargetAllocator { base_address: u64, - allocations: Vec>>, + allocations: Vec>>>, } -impl TargetAllocator { +impl TargetAllocator { /// Creates a new `TargetAllocator` for a page table which will be loaded on the target in a /// contiguous block of memory starting at the given address. pub fn new(base_address: u64) -> Self { @@ -66,7 +66,7 @@ impl TargetAllocator { } } - fn add_allocation(&mut self, page_table: NonNull) -> usize { + fn add_allocation(&mut self, page_table: NonNull>) -> usize { for (i, allocation) in self.allocations.iter_mut().enumerate() { if allocation.is_none() { *allocation = Some(page_table); @@ -77,7 +77,7 @@ impl TargetAllocator { self.allocations.len() - 1 } - fn remove_allocation(&mut self, page_table: NonNull) -> bool { + fn remove_allocation(&mut self, page_table: NonNull>) -> bool { for allocation in &mut self.allocations { if *allocation == Some(page_table) { *allocation = None; @@ -92,9 +92,9 @@ impl TargetAllocator { /// This could be embedded in a binary image for the target. #[cfg(feature = "zerocopy")] pub fn as_bytes(&self) -> Vec { - let mut bytes = vec![0; self.allocations.len() * size_of::()]; + let mut bytes = vec![0; self.allocations.len() * size_of::>()]; for (chunk, allocation) in bytes - .chunks_exact_mut(size_of::()) + .chunks_exact_mut(size_of::>()) .zip(self.allocations.iter()) { if let Some(page_table) = allocation { @@ -108,17 +108,17 @@ impl TargetAllocator { } } -impl Translation for TargetAllocator { - fn allocate_table(&mut self) -> (NonNull, PhysicalAddress) { +impl Translation for TargetAllocator { + fn allocate_table(&mut self) -> (NonNull>, PhysicalAddress) { let page_table = PageTable::new(); let index = self.add_allocation(page_table); let address = PhysicalAddress( - usize::try_from(self.base_address).unwrap() + index * size_of::(), + usize::try_from(self.base_address).unwrap() + index * size_of::>(), ); (page_table, address) } - unsafe fn deallocate_table(&mut self, page_table: NonNull) { + unsafe fn deallocate_table(&mut self, page_table: NonNull>) { if !self.remove_allocation(page_table) { panic!( "dealloc_table called for page table {:?} which isn't in allocations.", @@ -127,15 +127,15 @@ impl Translation for TargetAllocator { } // SAFETY: Our caller promises that the memory was allocated by `allocate_table` on this // `TargetAllocator` and not yet deallocated. `allocate_table` used the global allocator - // and appropriate layout by calling `PageTable::new()`. + // and appropriate layout by calling `PageTable::new()`. unsafe { deallocate(page_table); } } - fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull { + fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull> { self.allocations - [(pa.0 - usize::try_from(self.base_address).unwrap()) / size_of::()] + [(pa.0 - usize::try_from(self.base_address).unwrap()) / size_of::>()] .unwrap() } } @@ -144,7 +144,7 @@ impl Translation for TargetAllocator { mod tests { use super::*; use crate::paging::{ - attributes::Attributes, Constraints, MemoryRegion, RootTable, TranslationRegime, VaRange, + attributes::AttributesEl1, Constraints, MemoryRegion, RootTable, TranslationRegime, VaRange, }; const ROOT_LEVEL: usize = 1; @@ -160,38 +160,43 @@ mod tests { map.map_range( &MemoryRegion::new(0x0, 0x1000), PhysicalAddress(0x4_2000), - Attributes::VALID - | Attributes::ATTRIBUTE_INDEX_0 - | Attributes::INNER_SHAREABLE - | Attributes::UXN, + AttributesEl1::VALID + | AttributesEl1::ATTRIBUTE_INDEX_0 + | AttributesEl1::INNER_SHAREABLE + | AttributesEl1::UXN, Constraints::empty(), ) .unwrap(); let bytes = map.translation().as_bytes(); - assert_eq!(bytes.len(), 3 * size_of::()); + assert_eq!(bytes.len(), 3 * size_of::>()); // Table mapping for table at 0x01_1000 assert_eq!( bytes[0..8], [0x03, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00] ); - for byte in &bytes[8..size_of::()] { + for byte in &bytes[8..size_of::>()] { assert_eq!(*byte, 0); } // Table mapping for table at 0x01_2000 assert_eq!( - bytes[size_of::()..size_of::() + 8], + bytes[size_of::>()..size_of::>() + 8], [0x03, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00] ); - for byte in &bytes[size_of::() + 8..2 * size_of::()] { + for byte in &bytes + [size_of::>() + 8..2 * size_of::>()] + { assert_eq!(*byte, 0); } // Page mapping for 0x04_2000 with the attributes given above. assert_eq!( - bytes[2 * size_of::()..2 * size_of::() + 8], + bytes[2 * size_of::>() + ..2 * size_of::>() + 8], [0x03, 0x23, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00] ); - for byte in &bytes[2 * size_of::() + 8..3 * size_of::()] { + for byte in &bytes[2 * size_of::>() + 8 + ..3 * size_of::>()] + { assert_eq!(*byte, 0); } }