/* * SPDX-License-Identifier: BlueOak-1.0.0 * Copyright (c) Berkus Decker */ //! MMU initialisation. //! //! Paging is mostly based on [previous version](https://os.phil-opp.com/page-tables/) of //! Phil Opp's [paging guide](https://os.phil-opp.com/paging-implementation/) and //! [ARMv8 ARM memory addressing](https://static.docs.arm.com/100940/0100/armv8_a_address%20translation_100940_0100_en.pdf). //! It includes ideas from Sergio Benitez' cs140e OSdev course material on type-safe access. #![allow(dead_code)] use crate::memory::PageSize; use { crate::memory::{ page_size::{Size1GiB, Size2MiB, Size4KiB}, virt_page::Page, PhysAddr, PhysFrame, VirtAddr, }, core::{ marker::PhantomData, ops::{Index, IndexMut}, ptr::Unique, }, register::register_bitfields, snafu::Snafu, }; /* * With 4k page granule, a virtual address is split into 4 lookup parts * spanning 9 bits each: * * _______________________________________________ * | | | | | | | * | signx | Lv0 | Lv1 | Lv2 | Lv3 | off | * |_______|_______|_______|_______|_______|_______| * 63-48 47-39 38-30 29-21 20-12 11-00 * * mask page size * * Lv0: FF8000000000 -- * Lv1: 7FC0000000 * off: 3FFFFFFF 1G * Lv2: 3FE00000 * off: 1FFFFF 2M * Lv3: 1FF000 * off: FFF 4K * * RPi3 supports 64K and 4K granules, also 40-bit physical addresses. * It also can address only 1G physical memory, so these 40-bit phys addresses are a fake. * * 48-bit virtual address space; different mappings in VBAR0 (EL0) and VBAR1 (EL1+). */ register_bitfields! { u64, VA_INDEX [ LEVEL0 OFFSET(39) NUMBITS(9) [], LEVEL1 OFFSET(30) NUMBITS(9) [], LEVEL2 OFFSET(21) NUMBITS(9) [], LEVEL3 OFFSET(12) NUMBITS(9) [], OFFSET OFFSET(0) NUMBITS(12) [] ] } register_bitfields! { u64, // AArch64 Reference Manual page 2150, D5-2445 TABLE_DESCRIPTOR [ // In table descriptors NSTable_EL3 OFFSET(63) NUMBITS(1) [], /// Access Permissions for subsequent tables APTable OFFSET(61) NUMBITS(2) [ RW_EL1 = 0b00, RW_EL1_EL0 = 0b01, RO_EL1 = 0b10, RO_EL1_EL0 = 0b11 ], // User execute-never for subsequent tables UXNTable OFFSET(60) NUMBITS(1) [ Execute = 0, NeverExecute = 1 ], /// Privileged execute-never for subsequent tables PXNTable OFFSET(59) NUMBITS(1) [ Execute = 0, NeverExecute = 1 ], // In block descriptors // OS-specific data OSData OFFSET(55) NUMBITS(4) [], // User execute-never UXN OFFSET(54) NUMBITS(1) [ Execute = 0, NeverExecute = 1 ], /// Privileged execute-never PXN OFFSET(53) NUMBITS(1) [ Execute = 0, NeverExecute = 1 ], // @fixme ?? where is this described CONTIGUOUS OFFSET(52) NUMBITS(1) [ False = 0, True = 1 ], // @fixme ?? where is this described DIRTY OFFSET(51) NUMBITS(1) [ False = 0, True = 1 ], /// Various address fields, depending on use case LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21] NEXT_LVL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12] // @fixme ?? where is this described NON_GLOBAL OFFSET(11) NUMBITS(1) [ False = 0, True = 1 ], /// Access flag AF OFFSET(10) NUMBITS(1) [ False = 0, True = 1 ], /// Share-ability field SH OFFSET(8) NUMBITS(2) [ OuterShareable = 0b10, InnerShareable = 0b11 ], /// Access Permissions AP OFFSET(6) NUMBITS(2) [ RW_EL1 = 0b00, RW_EL1_EL0 = 0b01, RO_EL1 = 0b10, RO_EL1_EL0 = 0b11 ], NS_EL3 OFFSET(5) NUMBITS(1) [], /// Memory attributes index into the MAIR_EL1 register AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ Block = 0, Table = 1 ], VALID OFFSET(0) NUMBITS(1) [ False = 0, True = 1 ] ] } type VaIndex = register::FieldValue; type EntryFlags = register::FieldValue; // type EntryRegister = register::LocalRegisterCopy; // Possible mappings: // * TTBR0 pointing to user page global directory // * TTBR0 pointing to user page upper directory (only if mmu is set up differently) // * TTBR1 pointing to kernel page global directory with full physmem access // * Paging system uses a separate address space in top kernel region (TTBR1) to access // * entire physical memory contents. // * This mapping is not available to user space (user space uses TTBR0). // * // * Use the largest possible granule size to map physical memory since we want to use // * the least amount of memory for these mappings. // TTBR0 Page Global Directory // Level 0 descriptors can only output the address of a Level 1 table. // Level 3 descriptors cannot point to another table and can only output block addresses. // The format of the table is therefore slightly different for Level 3. // // this means: // - in level 0 page table can be only TableDescriptors // - in level 1,2 page table can be TableDescriptors, Lvl2BlockDescriptors (PageDescriptors) // - in level 3 page table can be only PageDescriptors // Level / Types | Table Descriptor | Lvl2BlockDescriptor (PageDescriptor) // --------------+------------------+-------------------------------------- // 0 | X | (with 4KiB granule) // 1 | X | X (1GiB range) // 2 | X | X (2MiB range) // 3 | | X (4KiB range) -- called PageDescriptor // encoding actually the same as in Table Descriptor // Translation granule affects the size of the block addressed. // Lets use 4KiB granule on RPi3 for simplicity. // 1, set 4KiB granule size to use the PGD - we could use 16KiB granule instead? // - need to measure waste level // - but lets stick with 4KiB for now // // If I have, for example, Table I can get from it N `Table` (via impl HierarchicalTable) // From Table I can get either `Table` (via impl HierarchicalTable) or `BlockDescriptor` // From Table I can get either `Table` (via impl HierarchicalTable) or `BlockDescriptor` // From Table I can only get `PageDescriptor` (because no impl HierarchicalTable exists) /// GlobalDirectory [ UpperDirectory entries ] /// UpperDirectory [ PageDirectory | GiantPage ] /// PageDirectory [ PageTable | LargePage ] /// PageTable [ PageFrames ] // do those as separate types, then in accessors allow only certain combinations // e.g. // struct UpperDirectoryEntry; // DirectoryEntry // struct PageDirectoryEntry; // DirectoryEntry // struct GiantPageFrame; // PageFrame // struct PageTableEntry; // DirectoryEntry // struct LargePageFrame; // PageFrame // struct PageFrame; // PageFrame // enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) } // impl PageTabelEntry { fn new_from_entry_addr(&u64) } // return enum PageTableEntry constructed from table bits in u64 enum L0Entries { UpperDirectoryEntry(VirtAddr), } enum L1Entries { PageDirectoryEntry(VirtAddr), GiantPageFrame(PhysFrame), } enum L2Entries { PageTableEntry(VirtAddr), LargePageFrame(PhysFrame), } enum L3Entries { PageFrame(PhysFrame), } enum Frames { GiantPageFrame, LargePageFrame, PageFrame, } // ---- // ---- // ---- Table levels // ---- // ---- /// L0 table -- only pointers to L1 tables pub enum L0PageGlobalDirectory {} /// L1 tables -- pointers to L2 tables or giant 1GiB pages pub enum L1PageUpperDirectory {} /// L2 tables -- pointers to L3 tables or huge 2MiB pages pub enum L2PageDirectory {} /// L3 tables -- only pointers to 4/16KiB pages pub enum L3PageTable {} /// Shared trait for specific table levels. pub trait TableLevel {} /// Shared trait for hierarchical table levels. /// /// Specifies what is the next level of page table hierarchy. pub trait HierarchicalLevel: TableLevel { /// Level of the next translation table below this one. type NextLevel: TableLevel; // fn translate() -> Directory; } /// Specify allowed page size for each level. pub trait HierarchicalPageLevel: TableLevel { /// Size of the page that can be contained in this table level. type PageLevel: PageSize; } impl TableLevel for L0PageGlobalDirectory {} impl TableLevel for L1PageUpperDirectory {} impl TableLevel for L2PageDirectory {} impl TableLevel for L3PageTable {} impl HierarchicalLevel for L0PageGlobalDirectory { type NextLevel = L1PageUpperDirectory; } impl HierarchicalLevel for L1PageUpperDirectory { type NextLevel = L2PageDirectory; } impl HierarchicalLevel for L2PageDirectory { type NextLevel = L3PageTable; } // L3PageTables do not have next level, therefore they are not HierarchicalLevel // L0PageGlobalDirectory does not contain pages, so they are not HierarchicalPageLevel impl HierarchicalPageLevel for L1PageUpperDirectory { type PageLevel = Size1GiB; } impl HierarchicalPageLevel for L2PageDirectory { type PageLevel = Size2MiB; } impl HierarchicalPageLevel for L3PageTable { type PageLevel = Size4KiB; } // ---- // ---- // ---- Directory // ---- // ---- // Maximum OA is 48 bits. // // Level 0 table descriptor has Output Address in [47:12] --> level 1 table // Level 0 descriptor cannot be block descriptor. // // Level 1 table descriptor has Output Address in [47:12] --> level 2 table // Level 1 block descriptor has Output Address in [47:30] // // Level 2 table descriptor has Output Address in [47:12] --> level 3 table // Level 2 block descriptor has Output Address in [47:21] // // Level 3 block descriptor has Output Address in [47:12] // Upper Attributes [63:51] // Res0 [50:48] // Lower Attributes [11:2] // 11b [1:0] // Each table consists of 2**9 entries const TABLE_BITS: usize = 9; const INDEX_MASK: usize = (1 << TABLE_BITS) - 1; static_assertions::const_assert!(INDEX_MASK == 0x1ff); // @todo Table in mmu.rs /// MMU address translation table. /// Contains just u64 internally, provides enum interface on top #[repr(C)] #[repr(align(4096))] struct Directory { entries: [u64; 1 << TABLE_BITS], level: PhantomData, } impl Directory { fn next(&self, address: VirtAddr) -> Option { let va = VaIndex::new(address.to_u64()); let index = va.read(VA_INDEX::LEVEL0); match self.next_table_address(index.into()) { Some(phys_addr) => Some(L0Entries::UpperDirectoryEntry(phys_addr.user_to_kernel())), None => None, } } } impl Directory { fn next(&self, address: VirtAddr) -> Option { let va = VaIndex::new(address.to_u64()); let index = va.read(VA_INDEX::LEVEL1); match self.next_table_address(index.into()) { Some(phys_addr) => Some(L1Entries::PageDirectoryEntry(phys_addr.user_to_kernel())), None => None, // @todo could be 1GiB frame } } } impl Directory { fn next(&self, address: VirtAddr) -> Option { let va = VaIndex::new(address.to_u64()); let index = va.read(VA_INDEX::LEVEL2); match self.next_table_address(index.into()) { Some(phys_addr) => Some(L2Entries::PageTableEntry(phys_addr.user_to_kernel())), None => None, // @todo could be 2MiB frame } } } impl Directory { fn next(&self, address: VirtAddr) -> Option { let va = VaIndex::new(address.as_u64()); let index = va.read(VA_INDEX::LEVEL3); match self.next_table_address(index.into()) { // @fixme wrong function Some(phys_addr) => Some(L3Entries::PageFrame(phys_addr.user_to_kernel())), None => None, // Nothing there } } } // Implementation code shared for all levels of page tables impl Directory where Level: TableLevel, { /// Construct a zeroed table at given physical location. // unsafe fn at(location: PhysAddr) -> &Self {} /// Construct and return zeroed table. fn zeroed() -> Self { Self { entries: [0; 1 << TABLE_BITS], level: PhantomData, } } /// Zero out entire table. pub fn zero(&mut self) { for entry in self.entries.iter_mut() { *entry = 0; } } } impl Index for Directory where Level: TableLevel, { type Output = u64; fn index(&self, index: usize) -> &Self::Output { &self.entries[index] } } impl IndexMut for Directory where Level: TableLevel, { fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.entries[index] } } impl Directory where Level: HierarchicalLevel, { fn next_table_address(&self, index: usize) -> Option { let entry_flags = EntryRegister::new(self[index]); // If table entry has 0b11 mask set, it is a valid table entry. // Address of the following table may be extracted from bits 47:12 if entry_flags.matches_all(TABLE_DESCRIPTOR::VALID::True + TABLE_DESCRIPTOR::TYPE::Table) { Some(entry_flags.read(NEXT_LVL_TABLE_ADDR_4KiB) << Page4KiB::SHIFT) } else { None } } pub fn next_table(&self, index: usize) -> Option<&Directory> { self.next_table_address(index) .map(|address| unsafe { &*(address.user_to_kernel() as *const _) }) } pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Directory> { self.next_table_address(index) .map(|address| unsafe { &mut *(address.user_to_kernel() as *mut _) }) } pub fn translate_levels(&self, address: VirtAddr) -> Option {} } // ---- // ---- // ---- VirtSpace // ---- // ---- /// Errors from mapping layer #[derive(Debug, Snafu)] pub enum TranslationError { /// No page found. @todo NoPage, } /// Virtual address space. @todo pub struct VirtSpace { l0: Unique>, } // translation steps: // l0: upper page directory or Err() // l1: lower page directory or 1Gb aperture or Err() // l2: page table or 2MiB aperture or Err() // l3: 4KiB aperture or Err() impl VirtSpace { // Translate translates address all the way down to physical address or error. // On each level there's next_table() fn that resolves to the next level table if possible. // pub fn translate(&self, virtual_address: VirtAddr) -> Result { // // let offset = virtual_address % Self::PageLevel::SIZE as usize; // use the size of the last page? // self.translate_page(Page::::containing_address(virtual_address))? // .map(|frame, offset| frame.start_address() + offset) // } } // pageglobaldirectory.translate() { // get page index <- generic over page level (xx << (10 + (3 - level) * 9)) // return page[index]?.translate(rest); // } #[cfg(test)] mod tests { use super::*; #[test_case] fn table_construction() { let mut level0_table = Directory::::zeroed(); let level1_table = Directory::::zeroed(); let level2_table = Directory::::zeroed(); let level3_table = Directory::::zeroed(); assert!(level0_table.next_table_address(0).is_none()); // Make entry map to a level1 table level0_table[0] = EntryFlags::from( TABLE_DESCRIPTOR::VALID::True + TABLE_DESCRIPTOR::TYPE::Table + TABLE_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(0x424242), ) .into(); assert!(level0_table.next_table_address(0).is_some()); let addr = level0_table.next_table_address(0).unwrap(); assert_eq!(addr, (0x424242 << 12)); } }