From de91603059ebfeb69a01b016a05032a8624babd7 Mon Sep 17 00:00:00 2001 From: Berkus Decker Date: Sun, 21 Feb 2021 00:40:42 +0200 Subject: [PATCH] [wip] necessary modifications --- .../arch/aarch64/memory/mmu-experimental.rs | 31 +++++++++++-------- nucleus/src/arch/aarch64/memory/mmu.rs | 21 ++++++++++--- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/nucleus/src/arch/aarch64/memory/mmu-experimental.rs b/nucleus/src/arch/aarch64/memory/mmu-experimental.rs index 943863a..e970ab6 100644 --- a/nucleus/src/arch/aarch64/memory/mmu-experimental.rs +++ b/nucleus/src/arch/aarch64/memory/mmu-experimental.rs @@ -33,6 +33,7 @@ impl PageSize for Size1GiB { } /// Errors from mapping layer (@todo use anyhow/snafu? thiserror?) +#[derive(Debug, Snafu)] pub enum TranslationError { NoPage, } @@ -58,13 +59,14 @@ impl ActivePageTable { unsafe { self.l0.as_mut() } } - pub fn translate(&self, virtual_address: VirtAddr) -> Result { - let offset = virtual_address % Size4KiB::SIZE as usize; // @todo use the size of the last page of course - self.translate_page(Page::containing_address(virtual_address)) - .map(|frame| frame.start_address() + offset) - } + // pub fn translate(&self, virtual_address: VirtAddr) -> Result { + // let offset = virtual_address % Size4KiB::SIZE as usize; // @todo use the size of the last page of course + // self.translate_page(Page::containing_address(virtual_address))? + // .map(|frame| frame.start_address() + offset) + // } fn translate_page(&self, page: Page) -> Result { + // @todo translate only one level of hierarchy per impl function... let l1 = self.l0().next_table(u64::from(page.l0_index()) as usize); /* let huge_page = || { @@ -138,6 +140,8 @@ impl ActivePageTable { where A: FrameAllocator, { + // @todo fail mapping if table is not allocated, causing client to allocate and restart + // @todo problems described in preso - chicken&egg problem of allocating first allocations let frame = allocator.allocate_frame().expect("out of memory"); self.map_to(page, frame, flags, allocator) } @@ -172,18 +176,19 @@ impl ActivePageTable { // tlb::flush(VirtAddr(page.start_address())); // TODO free p(1,2,3) table if empty //allocator.deallocate_frame(frame); + // @todo do NOT deallocate frames either, but need to signal client that it's unused } } // Abstractions for page table entries. /// The error returned by the `PageTableEntry::frame` method. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Snafu, Debug, Clone, Copy, PartialEq)] pub enum FrameError { /// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame. FrameNotPresent, /// The entry has the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame - /// as return type, so a huge frame can't be returned. + /// as return type, so a huge frame can't be returned. @todo HugeFrame, } @@ -265,17 +270,17 @@ impl fmt::Debug for PageTableEntry { } }*/ -impl Table +impl Table where - L: HierarchicalLevel, + Level: HierarchicalLevel, { - pub fn next_table_create( + pub fn next_table_create( &mut self, index: usize, - allocator: &mut A, - ) -> &mut Table + allocator: &mut Alloc, + ) -> &mut Table where - A: FrameAllocator, + Alloc: FrameAllocator, { if self.next_table(index).is_none() { assert!( diff --git a/nucleus/src/arch/aarch64/memory/mmu.rs b/nucleus/src/arch/aarch64/memory/mmu.rs index d5bdd4a..2feef14 100644 --- a/nucleus/src/arch/aarch64/memory/mmu.rs +++ b/nucleus/src/arch/aarch64/memory/mmu.rs @@ -177,6 +177,13 @@ impl PageTableEntry { } } +#[derive(Snafu, Debug)] +enum PageTableError { + #[snafu(display("BlockDescriptor: Address is not 2 MiB aligned."))] + //"PageDescriptor: Address is not 4 KiB aligned." + NotAligned(&'static str), +} + /// A Level2 block descriptor with 2 MiB aperture. /// /// The output points to physical memory. @@ -186,9 +193,9 @@ impl PageTableEntry { fn new_lvl2_block_descriptor( output_addr: usize, attribute_fields: AttributeFields, - ) -> Result { + ) -> Result { if output_addr % Size2MiB::SIZE as usize != 0 { - return Err("BlockDescriptor: Address is not 2 MiB aligned."); + return Err(PageTableError::NotAligned(Size2MiB::SIZE_AS_DEBUG_STR)); } let shifted = output_addr >> Size2MiB::SHIFT; @@ -211,9 +218,9 @@ impl PageTableEntry { fn new_page_descriptor( output_addr: usize, attribute_fields: AttributeFields, - ) -> Result { + ) -> Result { if output_addr % Size4KiB::SIZE as usize != 0 { - return Err("PageDescriptor: Address is not 4 KiB aligned."); + return Err(PageTableError::NotAligned(Size4KiB::SIZE_AS_DEBUG_STR)); } let shifted = output_addr >> Size4KiB::SHIFT; @@ -249,11 +256,17 @@ impl From for u64 { } } +// to get L0 we must allocate a few frames from boot region allocator. +// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu, +// this part will be inited in mmu::init(): + +// @todo do NOT keep these statically, always allocate from available bump memory static mut LVL2_TABLE: Table = Table:: { entries: [0; NUM_ENTRIES_4KIB as usize], level: PhantomData, }; +// @todo do NOT keep these statically, always allocate from available bump memory static mut LVL3_TABLE: Table = Table:: { entries: [0; NUM_ENTRIES_4KIB as usize], level: PhantomData,