diff --git a/nucleus/src/arch/aarch64/memory/area_frame_allocator.rs b/nucleus/src/arch/aarch64/memory/area_frame_allocator.rs
new file mode 100644
index 0000000..07a3bde
--- /dev/null
+++ b/nucleus/src/arch/aarch64/memory/area_frame_allocator.rs
@@ -0,0 +1,106 @@
+/*
+ * SPDX-License-Identifier: BlueOak-1.0.0
+ */
+use super::{Frame, FrameAllocator};
+use multiboot2::{MemoryArea, MemoryAreaIter}; // replace with DTB?
+
+pub struct AreaFrameAllocator {
+ next_free_frame: Frame,
+ current_area: Option<&'static MemoryArea>,
+ areas: MemoryAreaIter,
+ kernel_start: Frame,
+ kernel_end: Frame,
+ multiboot_start: Frame,
+ multiboot_end: Frame,
+}
+
+impl FrameAllocator for AreaFrameAllocator {
+ fn allocate_frame(&mut self) -> Option {
+ if let Some(_area) = self.current_area {
+ // "Clone" the frame to return it if it's free. Frame doesn't
+ // implement Clone, but we can construct an identical frame.
+ let frame = Frame {
+ number: self.next_free_frame.number,
+ };
+
+ // the last frame of the current area
+ let current_area_last_frame = Frame::containing_address(0x3f00_0000);
+ // {
+ // let address = area.base_addr + area.length - 1;
+ // Frame::containing_address(address as usize)
+ // };
+
+ if frame > current_area_last_frame {
+ // all frames of current area are used, switch to next area
+ // self.choose_next_area();
+ unimplemented!();
+ } else if frame >= self.kernel_start && frame <= self.kernel_end {
+ // `frame` is used by the kernel
+ self.next_free_frame = Frame {
+ number: self.kernel_end.number + 1,
+ };
+ } else if frame >= self.multiboot_start && frame <= self.multiboot_end {
+ // `frame` is used by the multiboot information structure
+ self.next_free_frame = Frame {
+ number: self.multiboot_end.number + 1,
+ };
+ } else {
+ // frame is unused, increment `next_free_frame` and return it
+ self.next_free_frame.number += 1;
+ return Some(frame);
+ }
+ // `frame` was not valid, try it again with the updated `next_free_frame`
+ self.allocate_frame()
+ } else {
+ None // no free frames left
+ }
+ }
+
+ fn deallocate_frame(&mut self, _frame: Frame) {
+ unimplemented!()
+ }
+}
+
+// Fixme: no multiboot, but dtb instead with avail memory regions
+// Need dtb parser here!
+
+impl AreaFrameAllocator {
+ pub fn new(
+ kernel_start: usize,
+ kernel_end: usize,
+ multiboot_start: usize,
+ multiboot_end: usize,
+ memory_areas: MemoryAreaIter,
+ ) -> AreaFrameAllocator {
+ let mut allocator = AreaFrameAllocator {
+ next_free_frame: Frame::containing_address(0),
+ current_area: None,
+ areas: memory_areas,
+ kernel_start: Frame::containing_address(kernel_start),
+ kernel_end: Frame::containing_address(kernel_end),
+ multiboot_start: Frame::containing_address(multiboot_start),
+ multiboot_end: Frame::containing_address(multiboot_end),
+ };
+ // allocator.choose_next_area();
+ allocator.next_free_frame = Frame::containing_address(0x100000); // start from 1Mb
+ allocator
+ }
+
+ fn choose_next_area(&mut self) {
+ self.current_area = self
+ .areas
+ .clone()
+ .filter(|area| {
+ let address = area.base_addr + area.length - 1;
+ Frame::containing_address(address as usize) >= self.next_free_frame
+ })
+ .min_by_key(|area| area.base_addr);
+
+ if let Some(area) = self.current_area {
+ let start_frame = Frame::containing_address(area.base_addr as usize);
+ if self.next_free_frame < start_frame {
+ self.next_free_frame = start_frame;
+ }
+ }
+ }
+}
diff --git a/nucleus/src/arch/aarch64/memory/boot_allocator.rs b/nucleus/src/arch/aarch64/memory/boot_allocator.rs
new file mode 100644
index 0000000..14b5e67
--- /dev/null
+++ b/nucleus/src/arch/aarch64/memory/boot_allocator.rs
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: BlueOak-1.0.0
+ */
+// Allocate regions from boot memory list obtained from devtree
+pub struct BootRegionAllocator {}
+
+impl BootRegionAllocator {
+ pub fn new(&boot_info: BootInfo) -> Self {
+ Self {}
+ }
+
+ pub fn alloc_region(&mut self) {}
+
+ pub fn alloc_zeroed(&mut self) {}
+}
diff --git a/nucleus/src/arch/aarch64/memory/mmu-experimental.rs b/nucleus/src/arch/aarch64/memory/mmu-experimental.rs
new file mode 100644
index 0000000..98fe9e3
--- /dev/null
+++ b/nucleus/src/arch/aarch64/memory/mmu-experimental.rs
@@ -0,0 +1,896 @@
+// 1: use Table for sure
+// 2: in tables use typed descriptors over generic u64 entries?? how to pick right type...
+// -- TableDescriptor
+// -- Lvl2BlockDescriptor
+// -- PageDescriptor
+// Use them instead of PageTableEntry
+// 3: Use PhysFrame and Page as flexible versions of various-sized pages
+
+// Level 0 descriptors can only output the address of a Level 1 table.
+// Level 3 descriptors cannot point to another table and can only output block addresses.
+// The format of the table is therefore slightly different for Level 3.
+//
+// this means:
+// - level 0 page table can be only TableDescriptors
+// - level 1,2 page table can be TableDescriptors, Lvl2BlockDescriptors (PageDescriptors)
+// - level 3 page table can be only PageDescriptors
+
+// Level / Types | Table Descriptor | Lvl2BlockDescriptor (PageDescriptor)
+// --------------+------------------+--------------------------------------
+// 0 | X | (with 4KiB granule)
+// 1 | X | X (1GiB range)
+// 2 | X | X (2MiB range)
+// 3 | | X (4KiB range) -- called PageDescriptor
+// encoding actually the same as in Table Descriptor
+
+// Translation granule affects the size of the block addressed.
+// Lets use 4KiB granule on RPi3 for simplicity.
+
+// This gives the following address format:
+//
+// Maximum OA is 48 bits.
+//
+// Level 0 descriptor cannot be block descriptor.
+// Level 0 table descriptor has Output Address in [47:12]
+//
+// Level 1 block descriptor has Output Address in [47:30]
+// Level 2 block descriptor has Output Address in [47:21]
+//
+// Level 1 table descriptor has Output Address in [47:12]
+// Level 2 table descriptor has Output Address in [47:12]
+//
+// Level 3 Page Descriptor:
+// Upper Attributes [63:51]
+// Res0 [50:48]
+// Output Address [47:12]
+// Lower Attributes [11:2]
+// 11b [1:0]
+
+// enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) }
+// impl PageTabelEntry { fn new_from_entry_addr(&u64) }
+
+// If I have, for example, Table I can get from it N `Table` (via impl HierarchicalTable)
+// From Table I can get either `Table` (via impl HierarchicalTable) or `BlockDescriptor`
+// From Table I can get either `Table` (via impl HierarchicalTable) or `BlockDescriptor`
+// From Table I can only get `PageDescriptor` (because no impl HierarchicalTable exists)
+
+// enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) }
+// return enum PageTableEntry constructed from table bits in u64
+
+/*!
+ * Paging system uses a separate address space in top kernel region (TTBR1) to access
+ * entire physical memory contents.
+ * This mapping is not available to user space (user space uses TTBR0).
+ * Use the largest possible granule size to map physical memory since we want to use
+ * the least amount of memory for these mappings.
+ */
+
+// Check largest VA supported, calculate physical_memory_offset
+//
+const PHYSICAL_MEMORY_OFFSET: u64 = 0xffff_8000_0000_0000; // Last 1GiB of VA space
+
+// AArch64:
+// Table D4-8-2021: check supported granule sizes, select alloc policy based on results.
+// TTBR_ELx is the pdbr for specific page tables
+
+// Page 2068 actual page descriptor formats
+
+/// A standard 16KiB page.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Size16KiB {}
+
+impl PageSize for Size16KiB {
+ const SIZE: u64 = 16384;
+ const SIZE_AS_DEBUG_STR: &'static str = "16KiB";
+ const SHIFT: usize = 14;
+ const MASK: u64 = 0x3fff;
+}
+
+impl NotGiantPageSize for Size16KiB {}
+
+/// A “giant” 1GiB page.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Size1GiB {}
+
+impl PageSize for Size1GiB {
+ const SIZE: u64 = Size2MiB::SIZE * NUM_ENTRIES_4KIB;
+ const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
+ const SHIFT: usize = 59; // @todo
+ const MASK: u64 = 0xfffaaaaa; // @todo
+}
+
+/// Errors from mapping layer (@todo use anyhow/snafu? thiserror?)
+pub enum TranslationError {
+ NoPage,
+}
+
+// Pointer to currently active page table
+// Could be either user space (TTBR0) or kernel space (TTBR1) -- ??
+pub struct ActivePageTable {
+ l0: Unique>,
+}
+
+impl ActivePageTable {
+ pub unsafe fn new() -> ActivePageTable {
+ ActivePageTable {
+ l0: Unique::new_unchecked(0 as *mut _),
+ }
+ }
+
+ fn l0(&self) -> &Table {
+ unsafe { self.l0.as_ref() }
+ }
+
+ fn l0_mut(&mut self) -> &mut Table {
+ unsafe { self.l0.as_mut() }
+ }
+
+ pub fn translate(&self, virtual_address: VirtAddr) -> Result {
+ let offset = virtual_address % Size4KiB::SIZE as usize; // @todo use the size of the last page of course
+ self.translate_page(Page::containing_address(virtual_address))
+ .map(|frame| frame.start_address() + offset)
+ }
+
+ fn translate_page(&self, page: Page) -> Result {
+ let l1 = self.l0().next_table(u64::from(page.l0_index()) as usize);
+ /*
+ let huge_page = || {
+ l1.and_then(|l1| {
+ let l1_entry = &l1[page.l1_index() as usize];
+ // 1GiB page?
+ if let Some(start_frame) = l1_entry.pointed_frame() {
+ if l1_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
+ != STAGE1_DESCRIPTOR::TYPE::Table.value
+ {
+ // address must be 1GiB aligned
+ //start_frame.is_aligned()
+ assert!(start_frame.number % (NUM_ENTRIES_4KIB * NUM_ENTRIES_4KIB) == 0);
+ return Ok(PhysFrame::from_start_address(
+ start_frame.number
+ + page.l2_index() * NUM_ENTRIES_4KIB
+ + page.l3_index(),
+ ));
+ }
+ }
+ if let Some(l2) = l1.next_table(page.l1_index()) {
+ let l2_entry = &l2[page.l2_index()];
+ // 2MiB page?
+ if let Some(start_frame) = l2_entry.pointed_frame() {
+ if l2_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
+ != STAGE1_DESCRIPTOR::TYPE::Table
+ {
+ // address must be 2MiB aligned
+ assert!(start_frame.number % NUM_ENTRIES_4KIB == 0);
+ return Ok(PhysFrame::from_start_address(
+ start_frame.number + page.l3_index(),
+ ));
+ }
+ }
+ }
+ Err(TranslationError::NoPage)
+ })
+ };
+ */
+ let v = l1
+ .and_then(|l1| l1.next_table(u64::from(page.l1_index()) as usize))
+ .and_then(|l2| l2.next_table(u64::from(page.l2_index()) as usize))
+ .and_then(|l3| Some(l3[u64::from(page.l3_index()) as usize])); //.pointed_frame())
+ // .ok_or(TranslationError::NoPage)
+ // .or_else(huge_page)
+ Ok(v.unwrap().into())
+ }
+
+ pub fn map_to(&mut self, page: Page, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let l0 = self.l0_mut();
+ let l1 = l0.next_table_create(u64::from(page.l0_index()) as usize, allocator);
+ let l2 = l1.next_table_create(u64::from(page.l1_index()) as usize, allocator);
+ let l3 = l2.next_table_create(u64::from(page.l2_index()) as usize, allocator);
+
+ assert_eq!(
+ l3[u64::from(page.l3_index()) as usize],
+ 0 /*.is_unused()*/
+ );
+ l3[u64::from(page.l3_index()) as usize] = PageTableEntry::PageDescriptor(
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ + flags // @todo properly extract flags
+ + STAGE1_DESCRIPTOR::VALID::True,
+ )
+ .into();
+ }
+
+ pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let frame = allocator.allocate_frame().expect("out of memory");
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ pub fn identity_map(&mut self, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ fn unmap(&mut self, page: Page, _allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ // use aarch64::instructions::tlb;
+ // use x86_64::VirtAddr;
+
+ assert!(self.translate(page.start_address()).is_ok());
+
+ let l3 = self
+ .l0_mut()
+ .next_table_mut(u64::from(page.l0_index()) as usize)
+ .and_then(|l1| l1.next_table_mut(u64::from(page.l1_index()) as usize))
+ .and_then(|l2| l2.next_table_mut(u64::from(page.l2_index()) as usize))
+ .expect("mapping code does not support huge pages");
+ let _frame = l3[u64::from(page.l3_index()) as usize];
+ // .pointed_frame()
+ // .unwrap();
+ l3[u64::from(page.l3_index()) as usize] = 0; /*.set_unused(); */
+ // tlb::flush(VirtAddr(page.start_address()));
+ // TODO free p(1,2,3) table if empty
+ //allocator.deallocate_frame(frame);
+ }
+}
+
+// Abstractions for page table entries.
+
+/// The error returned by the `PageTableEntry::frame` method.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum FrameError {
+ /// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
+ FrameNotPresent,
+ /// The entry has the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
+ /// as return type, so a huge frame can't be returned.
+ HugeFrame,
+}
+
+/// A 64-bit page table entry.
+// pub struct PageTableEntry {
+// entry: u64,
+// }
+
+const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
+/*
+impl PageTableEntry {
+ /// Creates an unused page table entry.
+ pub fn new() -> Self {
+ PageTableEntry::Invalid
+ }
+
+ /// Returns whether this entry is zero.
+ pub fn is_unused(&self) -> bool {
+ self.entry == 0
+ }
+
+ /// Sets this entry to zero.
+ pub fn set_unused(&mut self) {
+ self.entry = 0;
+ }
+
+ /// Returns the flags of this entry.
+ pub fn flags(&self) -> EntryFlags {
+ EntryFlags::new(self.entry)
+ }
+
+ /// Returns the physical address mapped by this entry, might be zero.
+ pub fn addr(&self) -> PhysAddr {
+ PhysAddr::new(self.entry & ADDR_MASK)
+ }
+
+ /// Returns the physical frame mapped by this entry.
+ ///
+ /// Returns the following errors:
+ ///
+ /// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
+ /// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
+ /// `addr` function must be used)
+ pub fn frame(&self) -> Result {
+ if !self.flags().read(STAGE1_DESCRIPTOR::VALID) {
+ Err(FrameError::FrameNotPresent)
+ // } else if self.flags().contains(EntryFlags::HUGE_PAGE) {
+ // Err(FrameError::HugeFrame)
+ } else {
+ Ok(PhysFrame::containing_address(self.addr()))
+ }
+ }
+
+ /// Map the entry to the specified physical address with the specified flags.
+ pub fn set_addr(&mut self, addr: PhysAddr, flags: EntryFlags) {
+ assert!(addr.is_aligned(Size4KiB::SIZE));
+ self.entry = addr.as_u64() | flags.bits();
+ }
+
+ /// Map the entry to the specified physical frame with the specified flags.
+ pub fn set_frame(&mut self, frame: PhysFrame, flags: EntryFlags) {
+ // assert!(!flags.contains(EntryFlags::HUGE_PAGE));
+ self.set_addr(frame.start_address(), flags)
+ }
+
+ /// Sets the flags of this entry.
+ pub fn set_flags(&mut self, flags: EntryFlags) {
+ // Todo: extract ADDR from self and replace all flags completely (?)
+ self.entry = self.addr().as_u64() | flags.bits();
+ }
+}
+
+impl fmt::Debug for PageTableEntry {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut f = f.debug_struct("PageTableEntry");
+ f.field("addr", &self.addr());
+ f.field("flags", &self.flags());
+ f.finish()
+ }
+}*/
+
+// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/frame.rs
+// Abstractions for default-sized and huge physical memory frames.
+
+/// A physical memory frame.
+/// Frame is an addressable unit of the physical address space.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+#[repr(C)]
+pub struct PhysFrame {
+ start_address: PhysAddr,
+ size: PhantomData,
+}
+
+impl From for PhysFrame {
+ fn from(address: u64) -> PhysFrame {
+ PhysFrame::containing_address(PhysAddr::new(address))
+ }
+}
+
+impl From> for u64 {
+ fn from(frame: PhysFrame) -> u64 {
+ frame.start_address.as_u64()
+ }
+}
+
+impl PhysFrame {
+ /// Returns the frame that starts at the given virtual address.
+ ///
+ /// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
+ pub fn from_start_address(address: PhysAddr) -> Result {
+ if !address.is_aligned(S::SIZE) {
+ return Err(());
+ }
+ Ok(PhysFrame::containing_address(address))
+ }
+
+ /// Returns the frame that contains the given physical address.
+ pub fn containing_address(address: PhysAddr) -> Self {
+ PhysFrame {
+ start_address: address.align_down(S::SIZE),
+ size: PhantomData,
+ }
+ }
+
+ /// Returns the start address of the frame.
+ pub fn start_address(&self) -> PhysAddr {
+ self.start_address
+ }
+
+ /// Returns the size the frame (4KB, 2MB or 1GB).
+ pub fn size(&self) -> u64 {
+ S::SIZE
+ }
+
+ /// Returns a range of frames, exclusive `end`.
+ pub fn range(start: PhysFrame, end: PhysFrame) -> PhysFrameRange {
+ PhysFrameRange { start, end }
+ }
+
+ /// Returns a range of frames, inclusive `end`.
+ pub fn range_inclusive(start: PhysFrame, end: PhysFrame) -> PhysFrameRangeInclusive {
+ PhysFrameRangeInclusive { start, end }
+ }
+}
+
+impl fmt::Debug for PhysFrame {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_fmt(format_args!(
+ "PhysFrame[{}]({:#x})",
+ S::SIZE_AS_DEBUG_STR,
+ self.start_address().as_u64()
+ ))
+ }
+}
+
+impl Add for PhysFrame {
+ type Output = Self;
+ fn add(self, rhs: u64) -> Self::Output {
+ PhysFrame::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
+ }
+}
+
+impl AddAssign for PhysFrame {
+ fn add_assign(&mut self, rhs: u64) {
+ *self = self.clone() + rhs;
+ }
+}
+
+impl Sub for PhysFrame {
+ type Output = Self;
+ fn sub(self, rhs: u64) -> Self::Output {
+ PhysFrame::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
+ }
+}
+
+impl SubAssign for PhysFrame {
+ fn sub_assign(&mut self, rhs: u64) {
+ *self = self.clone() - rhs;
+ }
+}
+
+impl Sub> for PhysFrame {
+ type Output = u64;
+ fn sub(self, rhs: PhysFrame) -> Self::Output {
+ (self.start_address - rhs.start_address) / S::SIZE
+ }
+}
+
+/// An range of physical memory frames, exclusive the upper bound.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(C)]
+pub struct PhysFrameRange {
+ /// The start of the range, inclusive.
+ pub start: PhysFrame,
+ /// The end of the range, exclusive.
+ pub end: PhysFrame,
+}
+
+impl PhysFrameRange {
+ /// Returns whether the range contains no frames.
+ pub fn is_empty(&self) -> bool {
+ !(self.start < self.end)
+ }
+}
+
+impl Iterator for PhysFrameRange {
+ type Item = PhysFrame;
+
+ fn next(&mut self) -> Option {
+ if self.start < self.end {
+ let frame = self.start.clone();
+ self.start += 1;
+ Some(frame)
+ } else {
+ None
+ }
+ }
+}
+
+impl fmt::Debug for PhysFrameRange {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PhysFrameRange")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .finish()
+ }
+}
+
+/// An range of physical memory frames, inclusive the upper bound.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(C)]
+pub struct PhysFrameRangeInclusive {
+ /// The start of the range, inclusive.
+ pub start: PhysFrame,
+ /// The start of the range, exclusive.
+ pub end: PhysFrame,
+}
+
+impl PhysFrameRangeInclusive {
+ /// Returns whether the range contains no frames.
+ pub fn is_empty(&self) -> bool {
+ !(self.start <= self.end)
+ }
+}
+
+impl Iterator for PhysFrameRangeInclusive {
+ type Item = PhysFrame;
+
+ fn next(&mut self) -> Option {
+ if self.start <= self.end {
+ let frame = self.start.clone();
+ self.start += 1;
+ Some(frame)
+ } else {
+ None
+ }
+ }
+}
+
+impl fmt::Debug for PhysFrameRangeInclusive {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PhysFrameRangeInclusive")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .finish()
+ }
+}
+
+// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/page.rs
+// Abstractions for default-sized and huge virtual memory pages.
+
+// x86_64 page level numbering: P4 -> P3 -> P2 -> P1
+// armv8a page level numbering: L0 -> L1 -> L2 -> L3
+
+/// A virtual memory page.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+#[repr(C)]
+pub struct Page {
+ start_address: VirtAddr,
+ size: PhantomData,
+}
+
+impl Page {
+ /// The page size in bytes.
+ pub const SIZE: u64 = S::SIZE;
+
+ /// Returns the page that starts at the given virtual address.
+ ///
+ /// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
+ pub fn from_start_address(address: VirtAddr) -> Result {
+ if !address.is_aligned(S::SIZE) {
+ return Err(());
+ }
+ Ok(Page::containing_address(address))
+ }
+
+ /// Returns the page that contains the given virtual address.
+ pub fn containing_address(address: VirtAddr) -> Self {
+ Page {
+ start_address: address.align_down(S::SIZE),
+ size: PhantomData,
+ }
+ }
+
+ /// Returns the start address of the page.
+ pub fn start_address(&self) -> VirtAddr {
+ self.start_address
+ }
+
+ /// Returns the size the page (4KB, 2MB or 1GB).
+ pub const fn size(&self) -> u64 {
+ S::SIZE
+ }
+
+ /// Returns the level 0 page table index of this page.
+ pub fn l0_index(&self) -> u9 {
+ self.start_address().l0_index()
+ }
+
+ /// Returns the level 1 page table index of this page.
+ pub fn l1_index(&self) -> u9 {
+ self.start_address().l1_index()
+ }
+
+ /// Returns a range of pages, exclusive `end`.
+ pub fn range(start: Self, end: Self) -> PageRange {
+ PageRange { start, end }
+ }
+
+ /// Returns a range of pages, inclusive `end`.
+ pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive {
+ PageRangeInclusive { start, end }
+ }
+}
+
+impl Page {
+ /// Returns the level 2 page table index of this page.
+ pub fn l2_index(&self) -> u9 {
+ self.start_address().l2_index()
+ }
+}
+
+impl Page {
+ /// Returns the 1GiB memory page with the specified page table indices.
+ pub fn from_page_table_indices_1gib(l0_index: u9, l1_index: u9) -> Self {
+ use bit_field::BitField;
+
+ let mut addr = 0;
+ addr.set_bits(39..48, u64::from(l0_index));
+ addr.set_bits(30..39, u64::from(l1_index));
+ Page::containing_address(VirtAddr::new(addr))
+ }
+}
+
+impl Page {
+ /// Returns the 2MiB memory page with the specified page table indices.
+ pub fn from_page_table_indices_2mib(l0_index: u9, l1_index: u9, l2_index: u9) -> Self {
+ use bit_field::BitField;
+
+ let mut addr = 0;
+ addr.set_bits(39..48, u64::from(l0_index));
+ addr.set_bits(30..39, u64::from(l1_index));
+ addr.set_bits(21..30, u64::from(l2_index));
+ Page::containing_address(VirtAddr::new(addr))
+ }
+}
+
+impl Page {
+ /// Returns the 4KiB memory page with the specified page table indices.
+ pub fn from_page_table_indices(l0_index: u9, l1_index: u9, l2_index: u9, l3_index: u9) -> Self {
+ use bit_field::BitField;
+
+ let mut addr = 0;
+ addr.set_bits(39..48, u64::from(l0_index));
+ addr.set_bits(30..39, u64::from(l1_index));
+ addr.set_bits(21..30, u64::from(l2_index));
+ addr.set_bits(12..21, u64::from(l3_index));
+ Page::containing_address(VirtAddr::new(addr))
+ }
+
+ /// Returns the level 3 page table index of this page.
+ pub fn l3_index(&self) -> u9 {
+ self.start_address().l3_index()
+ }
+}
+
+impl fmt::Debug for Page {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_fmt(format_args!(
+ "Page<{}>({:#x})",
+ S::SIZE_AS_DEBUG_STR,
+ self.start_address().as_u64()
+ ))
+ }
+}
+
+impl Add for Page {
+ type Output = Self;
+ fn add(self, rhs: u64) -> Self::Output {
+ Page::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
+ }
+}
+
+impl AddAssign for Page {
+ fn add_assign(&mut self, rhs: u64) {
+ *self = self.clone() + rhs;
+ }
+}
+
+impl Sub for Page {
+ type Output = Self;
+ fn sub(self, rhs: u64) -> Self::Output {
+ Page::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
+ }
+}
+
+impl SubAssign for Page {
+ fn sub_assign(&mut self, rhs: u64) {
+ *self = self.clone() - rhs;
+ }
+}
+
+impl Sub for Page {
+ type Output = u64;
+ fn sub(self, rhs: Self) -> Self::Output {
+ (self.start_address - rhs.start_address) / S::SIZE
+ }
+}
+
+/// A range of pages with exclusive upper bound.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(C)]
+pub struct PageRange {
+ /// The start of the range, inclusive.
+ pub start: Page,
+ /// The end of the range, exclusive.
+ pub end: Page,
+}
+
+impl PageRange {
+ /// Returns whether this range contains no pages.
+ pub fn is_empty(&self) -> bool {
+ self.start >= self.end
+ }
+}
+
+impl Iterator for PageRange {
+ type Item = Page;
+
+ fn next(&mut self) -> Option {
+ if self.start < self.end {
+ let page = self.start.clone();
+ self.start += 1;
+ Some(page)
+ } else {
+ None
+ }
+ }
+}
+
+impl PageRange {
+ /// Converts the range of 2MiB pages to a range of 4KiB pages.
+ pub fn as_4kib_page_range(self) -> PageRange {
+ PageRange {
+ start: Page::containing_address(self.start.start_address()),
+ end: Page::containing_address(self.end.start_address()),
+ }
+ }
+}
+
+impl fmt::Debug for PageRange {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PageRange")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .finish()
+ }
+}
+
+/// A range of pages with inclusive upper bound.
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(C)]
+pub struct PageRangeInclusive {
+ /// The start of the range, inclusive.
+ pub start: Page,
+ /// The end of the range, inclusive.
+ pub end: Page,
+}
+
+impl PageRangeInclusive {
+ /// Returns whether this range contains no pages.
+ pub fn is_empty(&self) -> bool {
+ self.start > self.end
+ }
+}
+
+impl Iterator for PageRangeInclusive {
+ type Item = Page;
+
+ fn next(&mut self) -> Option {
+ if self.start <= self.end {
+ let page = self.start.clone();
+ self.start += 1;
+ Some(page)
+ } else {
+ None
+ }
+ }
+}
+
+impl fmt::Debug for PageRangeInclusive {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PageRangeInclusive")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test_case]
+ pub fn test_page_ranges() {
+ let page_size = Size4KiB::SIZE;
+ let number = 1000;
+
+ let start_addr = VirtAddr::new(0xdeadbeaf);
+ let start: Page = Page::containing_address(start_addr);
+ let end = start.clone() + number;
+
+ let mut range = Page::range(start.clone(), end.clone());
+ for i in 0..number {
+ assert_eq!(
+ range.next(),
+ Some(Page::containing_address(start_addr + page_size * i))
+ );
+ }
+ assert_eq!(range.next(), None);
+
+ let mut range_inclusive = Page::range_inclusive(start, end);
+ for i in 0..=number {
+ assert_eq!(
+ range_inclusive.next(),
+ Some(Page::containing_address(start_addr + page_size * i))
+ );
+ }
+ assert_eq!(range_inclusive.next(), None);
+ }
+}
+
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+/*
+ */
+
+/*
+ * SPDX-License-Identifier: BSL-1.0 - todo this is from Sergio Benitez cs140e
+ */
+// Abstractions for page tables.
+
+// to get L0 we must allocate a few frames from boot region allocator.
+// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu,
+// this part will be inited in mmu::init():
+//pub const L0: *mut Table = &mut LVL0_TABLE as *mut _; // was Table
+// @fixme this is for recursive page tables!!
+
+impl Table
+where
+ L: HierarchicalLevel,
+{
+ fn next_table_address(&self, index: usize) -> Option {
+ let entry_flags = EntryRegister::new(self[index]);
+ if entry_flags.matches_all(STAGE1_DESCRIPTOR::VALID::True + STAGE1_DESCRIPTOR::TYPE::Table)
+ {
+ let table_address = self as *const _ as usize;
+ Some((table_address << 9) | (index << 12))
+ } else {
+ None
+ }
+ }
+
+ pub fn next_table(&self, index: usize) -> Option<&Table> {
+ self.next_table_address(index)
+ .map(|address| unsafe { &*(address as *const _) })
+ }
+
+ pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
+ self.next_table_address(index)
+ .map(|address| unsafe { &mut *(address as *mut _) })
+ }
+
+ pub fn next_table_create(
+ &mut self,
+ index: usize,
+ allocator: &mut A,
+ ) -> &mut Table
+ where
+ A: FrameAllocator,
+ {
+ if self.next_table(index).is_none() {
+ assert!(
+ EntryRegister::new(self.entries[index]).read(STAGE1_DESCRIPTOR::TYPE)
+ == STAGE1_DESCRIPTOR::TYPE::Table.value,
+ "mapping code does not support huge pages"
+ );
+ let frame = allocator.allocate_frame().expect("no frames available");
+ self.entries[index] = PageTableEntry::TableDescriptor(
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ + STAGE1_DESCRIPTOR::VALID::True,
+ )
+ .into();
+ // self.entries[index]
+ // .set_frame(frame, STAGE1_DESCRIPTOR::VALID::True /*| WRITABLE*/);
+ self.next_table_mut(index).unwrap().zero();
+ }
+ self.next_table_mut(index).unwrap()
+ }
+}
+
+// ORIGINAL MMU.RS CODE
+
+//static mut LVL0_TABLE: Table = Table {
+// entries: [0; NUM_ENTRIES_4KIB],
+// level: PhantomData,
+//};
diff --git a/nucleus/src/arch/aarch64/memory/mod.rs b/nucleus/src/arch/aarch64/memory/mod.rs
index 2df8fe7..007f2d9 100644
--- a/nucleus/src/arch/aarch64/memory/mod.rs
+++ b/nucleus/src/arch/aarch64/memory/mod.rs
@@ -13,9 +13,31 @@ use {
mod addr;
pub mod mmu;
+pub mod mmu_experimental;
+pub use mmu_experimental::*;
+
+// mod area_frame_allocator;
+// pub use self::area_frame_allocator::AreaFrameAllocator;
+// mod boot_allocator; // Hands out physical memory obtained from devtree
+// use self::paging::PAGE_SIZE;
+
pub use addr::PhysAddr;
pub use addr::VirtAddr;
+use mmu_experimental::PhysFrame;
+
+// @todo ??
+pub trait FrameAllocator {
+ fn allocate_frame(&mut self) -> Option; // @todo Result<>
+ fn deallocate_frame(&mut self, frame: PhysFrame);
+}
+
+// Identity-map things for now.
+//
+// > but more normal the simplest form is a table with 1024 32 bit entries starting at
+// a 0x4000 aligned address, where each entry describes a 1 Mb memory part.
+// On the rpi3 only the bottom 1024 entries are relevant as it has 1 Gb memory.
+
// aarch64 granules and page sizes howto:
// https://stackoverflow.com/questions/34269185/simultaneous-existence-of-different-sized-pages-on-aarch64