[wip] reshuffle stuff around - to be finalized
This commit is contained in:
parent
6c77d0930c
commit
81974b40c7
|
@ -1,15 +1,354 @@
|
|||
use {
|
||||
crate::{
|
||||
arch::aarch64::memory::{
|
||||
get_virt_addr_properties, AttributeFields, /*FrameAllocator, PhysAddr, VirtAddr,*/
|
||||
},
|
||||
println,
|
||||
},
|
||||
// bitflags::bitflags,
|
||||
core::{
|
||||
// convert::TryInto,
|
||||
// fmt,
|
||||
marker::PhantomData,
|
||||
ops::{Index, IndexMut},
|
||||
// ptr::Unique,
|
||||
},
|
||||
cortex_a::{
|
||||
barrier,
|
||||
regs::{ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
|
||||
},
|
||||
register::{
|
||||
cpu::{RegisterReadOnly, RegisterReadWrite},
|
||||
register_bitfields,
|
||||
},
|
||||
// ux::*,
|
||||
};
|
||||
|
||||
mod mair {
|
||||
use cortex_a::regs::MAIR_EL1;
|
||||
|
||||
/// Setup function for the MAIR_EL1 register.
|
||||
pub fn set_up() {
|
||||
use cortex_a::regs::RegisterReadWrite;
|
||||
|
||||
// Define the three memory types that we will map. Normal DRAM, Uncached and device.
|
||||
MAIR_EL1.write(
|
||||
// Attribute 2 -- Device Memory
|
||||
MAIR_EL1::Attr2_Device::nonGathering_nonReordering_EarlyWriteAck
|
||||
// Attribute 1 -- Non Cacheable DRAM
|
||||
+ MAIR_EL1::Attr1_Normal_Outer::NonCacheable
|
||||
+ MAIR_EL1::Attr1_Normal_Inner::NonCacheable
|
||||
// Attribute 0 -- Regular Cacheable
|
||||
+ MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc
|
||||
+ MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc,
|
||||
);
|
||||
}
|
||||
|
||||
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
|
||||
pub mod attr {
|
||||
pub const NORMAL: u64 = 0;
|
||||
pub const NORMAL_NON_CACHEABLE: u64 = 1;
|
||||
pub const DEVICE_NGNRE: u64 = 2;
|
||||
// DEVICE_GRE
|
||||
// DEVICE_NGNRNE
|
||||
}
|
||||
}
|
||||
|
||||
/// A function that maps the generic memory range attributes to HW-specific
|
||||
/// attributes of the MMU.
|
||||
fn into_mmu_attributes(
|
||||
attribute_fields: AttributeFields,
|
||||
) -> register::FieldValue<u64, STAGE1_DESCRIPTOR::Register> {
|
||||
use super::{AccessPermissions, MemAttributes};
|
||||
|
||||
// Memory attributes
|
||||
let mut desc = match attribute_fields.mem_attributes {
|
||||
MemAttributes::CacheableDRAM => {
|
||||
STAGE1_DESCRIPTOR::SH::InnerShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
|
||||
}
|
||||
MemAttributes::NonCacheableDRAM => {
|
||||
STAGE1_DESCRIPTOR::SH::InnerShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
|
||||
}
|
||||
MemAttributes::Device => {
|
||||
STAGE1_DESCRIPTOR::SH::OuterShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
|
||||
}
|
||||
};
|
||||
|
||||
// Access Permissions
|
||||
desc += match attribute_fields.acc_perms {
|
||||
AccessPermissions::ReadOnly => STAGE1_DESCRIPTOR::AP::RO_EL1,
|
||||
AccessPermissions::ReadWrite => STAGE1_DESCRIPTOR::AP::RW_EL1,
|
||||
};
|
||||
|
||||
// Execute Never
|
||||
desc += if attribute_fields.execute_never {
|
||||
STAGE1_DESCRIPTOR::PXN::NeverExecute
|
||||
} else {
|
||||
STAGE1_DESCRIPTOR::PXN::Execute
|
||||
};
|
||||
|
||||
desc
|
||||
}
|
||||
|
||||
/// Type-safe enum wrapper covering Table<L>'s 64-bit entries.
|
||||
#[derive(Clone)]
|
||||
// #[repr(transparent)]
|
||||
enum PageTableEntry {
|
||||
/// Empty page table entry.
|
||||
Invalid,
|
||||
/// Table descriptor is a L0, L1 or L2 table pointing to another table.
|
||||
/// L0 tables can only point to L1 tables.
|
||||
/// A descriptor pointing to the next page table.
|
||||
TableDescriptor(EntryFlags),
|
||||
/// A Level2 block descriptor with 2 MiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
Lvl2BlockDescriptor(EntryFlags),
|
||||
/// A page PageTableEntry::descriptor with 4 KiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
PageDescriptor(EntryFlags),
|
||||
}
|
||||
|
||||
/// A descriptor pointing to the next page table. (within PageTableEntry enum)
|
||||
// struct TableDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_table_descriptor(next_lvl_table_addr: usize) -> Result<PageTableEntry, &'static str> {
|
||||
if next_lvl_table_addr % Size4KiB::SIZE as usize != 0 {
|
||||
// @todo SIZE must be usize
|
||||
return Err("TableDescriptor: Address is not 4 KiB aligned.");
|
||||
}
|
||||
|
||||
let shifted = next_lvl_table_addr >> Size4KiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::TableDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Table
|
||||
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Snafu, Debug)]
|
||||
enum PageTableError {
|
||||
#[snafu(display("BlockDescriptor: Address is not 2 MiB aligned."))]
|
||||
//"PageDescriptor: Address is not 4 KiB aligned."
|
||||
NotAligned(&'static str),
|
||||
}
|
||||
|
||||
/// A Level2 block descriptor with 2 MiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
// struct Lvl2BlockDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_lvl2_block_descriptor(
|
||||
output_addr: usize,
|
||||
attribute_fields: AttributeFields,
|
||||
) -> Result<PageTableEntry, PageTableError> {
|
||||
if output_addr % Size2MiB::SIZE as usize != 0 {
|
||||
return Err(PageTableError::NotAligned(Size2MiB::SIZE_AS_DEBUG_STR));
|
||||
}
|
||||
|
||||
let shifted = output_addr >> Size2MiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::Lvl2BlockDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::AF::True
|
||||
+ into_mmu_attributes(attribute_fields)
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Block
|
||||
+ STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// A page descriptor with 4 KiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_page_descriptor(
|
||||
output_addr: usize,
|
||||
attribute_fields: AttributeFields,
|
||||
) -> Result<PageTableEntry, PageTableError> {
|
||||
if output_addr % Size4KiB::SIZE as usize != 0 {
|
||||
return Err(PageTableError::NotAligned(Size4KiB::SIZE_AS_DEBUG_STR));
|
||||
}
|
||||
|
||||
let shifted = output_addr >> Size4KiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::PageDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::AF::True
|
||||
+ into_mmu_attributes(attribute_fields)
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Table
|
||||
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u64> for PageTableEntry {
|
||||
fn from(_val: u64) -> PageTableEntry {
|
||||
// xxx0 -> Invalid
|
||||
// xx11 -> TableDescriptor on L0, L1 and L2
|
||||
// xx10 -> Block Entry L1 and L2
|
||||
// xx11 -> PageDescriptor L3
|
||||
PageTableEntry::Invalid
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PageTableEntry> for u64 {
|
||||
fn from(val: PageTableEntry) -> u64 {
|
||||
match val {
|
||||
PageTableEntry::Invalid => 0,
|
||||
PageTableEntry::TableDescriptor(x)
|
||||
| PageTableEntry::Lvl2BlockDescriptor(x)
|
||||
| PageTableEntry::PageDescriptor(x) => x.value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// to get L0 we must allocate a few frames from boot region allocator.
|
||||
// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu,
|
||||
// this part will be inited in mmu::init():
|
||||
|
||||
// @todo do NOT keep these statically, always allocate from available bump memory
|
||||
static mut LVL2_TABLE: Table<PageDirectory> = Table::<PageDirectory> {
|
||||
entries: [0; NUM_ENTRIES_4KIB as usize],
|
||||
level: PhantomData,
|
||||
};
|
||||
|
||||
// @todo do NOT keep these statically, always allocate from available bump memory
|
||||
static mut LVL3_TABLE: Table<PageTable> = Table::<PageTable> {
|
||||
entries: [0; NUM_ENTRIES_4KIB as usize],
|
||||
level: PhantomData,
|
||||
};
|
||||
|
||||
trait BaseAddr {
|
||||
fn base_addr_u64(&self) -> u64;
|
||||
fn base_addr_usize(&self) -> usize;
|
||||
}
|
||||
|
||||
impl BaseAddr for [u64; 512] {
|
||||
fn base_addr_u64(&self) -> u64 {
|
||||
self as *const u64 as u64
|
||||
}
|
||||
|
||||
fn base_addr_usize(&self) -> usize {
|
||||
self as *const u64 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Set up identity mapped page tables for the first 1 gigabyte of address space.
|
||||
/// default: 880 MB ARM ram, 128MB VC
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Completely unsafe, we're in the hardware land! Incorrectly initialised tables will just
|
||||
/// restart the CPU.
|
||||
pub unsafe fn init() -> Result<(), &'static str> {
|
||||
// Prepare the memory attribute indirection register.
|
||||
mair::set_up();
|
||||
|
||||
// Point the first 2 MiB of virtual addresses to the follow-up LVL3
|
||||
// page-table.
|
||||
LVL2_TABLE.entries[0] =
|
||||
PageTableEntry::new_table_descriptor(LVL3_TABLE.entries.base_addr_usize())?.into();
|
||||
|
||||
// Fill the rest of the LVL2 (2 MiB) entries as block descriptors.
|
||||
//
|
||||
// Notice the skip(1) which makes the iteration start at the second 2 MiB
|
||||
// block (0x20_0000).
|
||||
for (block_descriptor_nr, entry) in LVL2_TABLE.entries.iter_mut().enumerate().skip(1) {
|
||||
let virt_addr = block_descriptor_nr << Size2MiB::SHIFT;
|
||||
|
||||
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
|
||||
Err(s) => return Err(s),
|
||||
Ok((a, b)) => (a, b),
|
||||
};
|
||||
|
||||
let block_desc =
|
||||
match PageTableEntry::new_lvl2_block_descriptor(output_addr, attribute_fields) {
|
||||
Err(s) => return Err(s),
|
||||
Ok(desc) => desc,
|
||||
};
|
||||
|
||||
*entry = block_desc.into();
|
||||
}
|
||||
|
||||
// Finally, fill the single LVL3 table (4 KiB granule).
|
||||
for (page_descriptor_nr, entry) in LVL3_TABLE.entries.iter_mut().enumerate() {
|
||||
let virt_addr = page_descriptor_nr << Size4KiB::SHIFT;
|
||||
|
||||
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
|
||||
Err(s) => return Err(s),
|
||||
Ok((a, b)) => (a, b),
|
||||
};
|
||||
|
||||
let page_desc = match PageTableEntry::new_page_descriptor(output_addr, attribute_fields) {
|
||||
Err(s) => return Err(s),
|
||||
Ok(desc) => desc,
|
||||
};
|
||||
|
||||
*entry = page_desc.into();
|
||||
}
|
||||
|
||||
// Point to the LVL2 table base address in TTBR0.
|
||||
TTBR0_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // User (lo-)space addresses
|
||||
|
||||
// TTBR1_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // Kernel (hi-)space addresses
|
||||
|
||||
// Configure various settings of stage 1 of the EL1 translation regime.
|
||||
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
|
||||
TCR_EL1.write(
|
||||
TCR_EL1::TBI0::Ignored // @todo TBI1 also set to Ignored??
|
||||
+ TCR_EL1::IPS.val(ips) // Intermediate Physical Address Size
|
||||
// ttbr0 user memory addresses
|
||||
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
|
||||
+ TCR_EL1::SH0::Inner
|
||||
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::EPD0::EnableTTBR0Walks
|
||||
+ TCR_EL1::T0SZ.val(34) // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
|
||||
// ttbr1 kernel memory addresses
|
||||
+ TCR_EL1::TG1::KiB_4 // 4 KiB granule
|
||||
+ TCR_EL1::SH1::Inner
|
||||
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::EPD1::EnableTTBR1Walks
|
||||
+ TCR_EL1::T1SZ.val(34), // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
|
||||
);
|
||||
|
||||
// Switch the MMU on.
|
||||
//
|
||||
// First, force all previous changes to be seen before the MMU is enabled.
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
// use cortex_a::regs::RegisterReadWrite;
|
||||
// Enable the MMU and turn on data and instruction caching.
|
||||
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
|
||||
|
||||
// Force MMU init to complete before next instruction
|
||||
/*
|
||||
* Invalidate the local I-cache so that any instructions fetched
|
||||
* speculatively from the PoC are discarded, since they may have
|
||||
* been dynamically patched at the PoU.
|
||||
*/
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// AArch64:
|
||||
// Table D4-8-2021: check supported granule sizes, select alloc policy based on results.
|
||||
// TTBR_ELx is the pdbr for specific page tables
|
||||
|
||||
// Page 2068 actual page descriptor formats
|
||||
|
||||
/// Errors from mapping layer
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum TranslationError {
|
||||
NoPage,
|
||||
}
|
||||
|
||||
// Pointer to currently active page table
|
||||
// Could be either user space (TTBR0) or kernel space (TTBR1) -- ??
|
||||
pub struct ActivePageTable {
|
||||
|
|
|
@ -1,344 +1,536 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: BlueOak-1.0.0
|
||||
* Copyright (c) Berkus Decker <berkus+vesper@metta.systems>
|
||||
*/
|
||||
|
||||
//! MMU initialisation.
|
||||
//!
|
||||
//! Paging is mostly based on [previous version](https://os.phil-opp.com/page-tables/) of
|
||||
//! Phil Opp's [paging guide](https://os.phil-opp.com/paging-implementation/) and
|
||||
//! [ARMv8 ARM memory addressing](https://static.docs.arm.com/100940/0100/armv8_a_address%20translation_100940_0100_en.pdf).
|
||||
//! It includes ideas from Sergio Benitez' cs140e OSdev course material on type-safe access.
|
||||
|
||||
use crate::memory::PageSize;
|
||||
use {
|
||||
crate::{
|
||||
arch::aarch64::memory::{
|
||||
get_virt_addr_properties, AttributeFields, /*FrameAllocator, PhysAddr, VirtAddr,*/
|
||||
},
|
||||
println,
|
||||
crate::memory::{
|
||||
page_size::{Size1GiB, Size2MiB, Size4KiB},
|
||||
virt_page::Page,
|
||||
PhysAddr, PhysFrame, VirtAddr,
|
||||
},
|
||||
// bitflags::bitflags,
|
||||
core::{
|
||||
// convert::TryInto,
|
||||
// fmt,
|
||||
marker::PhantomData,
|
||||
ops::{Index, IndexMut},
|
||||
// ptr::Unique,
|
||||
ptr::Unique,
|
||||
},
|
||||
cortex_a::{
|
||||
asm::barrier,
|
||||
registers::{ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
|
||||
},
|
||||
tock_registers::{
|
||||
fields::FieldValue,
|
||||
interfaces::{ReadWriteable, Readable, Writeable},
|
||||
register_bitfields,
|
||||
},
|
||||
// ux::*,
|
||||
register::register_bitfields,
|
||||
snafu::Snafu,
|
||||
};
|
||||
|
||||
mod mair {
|
||||
use cortex_a::registers::MAIR_EL1;
|
||||
use tock_registers::interfaces::Writeable;
|
||||
/*
|
||||
* With 4k page granule, a virtual address is split into 4 lookup parts
|
||||
* spanning 9 bits each:
|
||||
*
|
||||
* _______________________________________________
|
||||
* | | | | | | |
|
||||
* | signx | Lv0 | Lv1 | Lv2 | Lv3 | off |
|
||||
* |_______|_______|_______|_______|_______|_______|
|
||||
* 63-48 47-39 38-30 29-21 20-12 11-00
|
||||
*
|
||||
* mask page size
|
||||
*
|
||||
* Lv0: FF8000000000 --
|
||||
* Lv1: 7FC0000000
|
||||
* off: 3FFFFFFF 1G
|
||||
* Lv2: 3FE00000
|
||||
* off: 1FFFFF 2M
|
||||
* Lv3: 1FF000
|
||||
* off: FFF 4K
|
||||
*
|
||||
* RPi3 supports 64K and 4K granules, also 40-bit physical addresses.
|
||||
* It also can address only 1G physical memory, so these 40-bit phys addresses are a fake.
|
||||
*
|
||||
* 48-bit virtual address space; different mappings in VBAR0 (EL0) and VBAR1 (EL1+).
|
||||
*/
|
||||
|
||||
/// Setup function for the MAIR_EL1 register.
|
||||
pub fn set_up() {
|
||||
// Define the three memory types that we will map. Normal DRAM, Uncached and device.
|
||||
MAIR_EL1.write(
|
||||
// Attribute 2 -- Device Memory
|
||||
MAIR_EL1::Attr2_Device::nonGathering_nonReordering_EarlyWriteAck
|
||||
// Attribute 1 -- Non Cacheable DRAM
|
||||
+ MAIR_EL1::Attr1_Normal_Outer::NonCacheable
|
||||
+ MAIR_EL1::Attr1_Normal_Inner::NonCacheable
|
||||
// Attribute 0 -- Regular Cacheable
|
||||
+ MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc
|
||||
+ MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc,
|
||||
);
|
||||
}
|
||||
|
||||
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
|
||||
pub mod attr {
|
||||
pub const NORMAL: u64 = 0;
|
||||
pub const NORMAL_NON_CACHEABLE: u64 = 1;
|
||||
pub const DEVICE_NGNRE: u64 = 2;
|
||||
// DEVICE_GRE
|
||||
// DEVICE_NGNRNE
|
||||
}
|
||||
register_bitfields! {
|
||||
u64,
|
||||
VA_INDEX [
|
||||
LEVEL0 OFFSET(39) NUMBITS(9) [],
|
||||
LEVEL1 OFFSET(30) NUMBITS(9) [],
|
||||
LEVEL2 OFFSET(21) NUMBITS(9) [],
|
||||
LEVEL3 OFFSET(12) NUMBITS(9) [],
|
||||
OFFSET OFFSET(0) NUMBITS(12) []
|
||||
]
|
||||
}
|
||||
|
||||
/// A function that maps the generic memory range attributes to HW-specific
|
||||
/// attributes of the MMU.
|
||||
fn into_mmu_attributes(
|
||||
attribute_fields: AttributeFields,
|
||||
) -> FieldValue<u64, STAGE1_DESCRIPTOR::Register> {
|
||||
use super::{AccessPermissions, MemAttributes};
|
||||
register_bitfields! {
|
||||
u64,
|
||||
// AArch64 Reference Manual page 2150, D5-2445
|
||||
TABLE_DESCRIPTOR [
|
||||
// In table descriptors
|
||||
|
||||
// Memory attributes
|
||||
let mut desc = match attribute_fields.mem_attributes {
|
||||
MemAttributes::CacheableDRAM => {
|
||||
STAGE1_DESCRIPTOR::SH::InnerShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
|
||||
}
|
||||
MemAttributes::NonCacheableDRAM => {
|
||||
STAGE1_DESCRIPTOR::SH::InnerShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
|
||||
}
|
||||
MemAttributes::Device => {
|
||||
STAGE1_DESCRIPTOR::SH::OuterShareable
|
||||
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
|
||||
}
|
||||
};
|
||||
NSTable_EL3 OFFSET(63) NUMBITS(1) [],
|
||||
|
||||
// Access Permissions
|
||||
desc += match attribute_fields.acc_perms {
|
||||
AccessPermissions::ReadOnly => STAGE1_DESCRIPTOR::AP::RO_EL1,
|
||||
AccessPermissions::ReadWrite => STAGE1_DESCRIPTOR::AP::RW_EL1,
|
||||
};
|
||||
/// Access Permissions for subsequent tables
|
||||
APTable OFFSET(61) NUMBITS(2) [
|
||||
RW_EL1 = 0b00,
|
||||
RW_EL1_EL0 = 0b01,
|
||||
RO_EL1 = 0b10,
|
||||
RO_EL1_EL0 = 0b11
|
||||
],
|
||||
|
||||
// Execute Never
|
||||
desc += if attribute_fields.execute_never {
|
||||
STAGE1_DESCRIPTOR::PXN::NeverExecute
|
||||
} else {
|
||||
STAGE1_DESCRIPTOR::PXN::Execute
|
||||
};
|
||||
// User execute-never for subsequent tables
|
||||
UXNTable OFFSET(60) NUMBITS(1) [
|
||||
Execute = 0,
|
||||
NeverExecute = 1
|
||||
],
|
||||
|
||||
desc
|
||||
/// Privileged execute-never for subsequent tables
|
||||
PXNTable OFFSET(59) NUMBITS(1) [
|
||||
Execute = 0,
|
||||
NeverExecute = 1
|
||||
],
|
||||
|
||||
// In block descriptors
|
||||
|
||||
// OS-specific data
|
||||
OSData OFFSET(55) NUMBITS(4) [],
|
||||
|
||||
// User execute-never
|
||||
UXN OFFSET(54) NUMBITS(1) [
|
||||
Execute = 0,
|
||||
NeverExecute = 1
|
||||
],
|
||||
|
||||
/// Privileged execute-never
|
||||
PXN OFFSET(53) NUMBITS(1) [
|
||||
Execute = 0,
|
||||
NeverExecute = 1
|
||||
],
|
||||
|
||||
// @fixme ?? where is this described
|
||||
CONTIGUOUS OFFSET(52) NUMBITS(1) [
|
||||
False = 0,
|
||||
True = 1
|
||||
],
|
||||
|
||||
// @fixme ?? where is this described
|
||||
DIRTY OFFSET(51) NUMBITS(1) [
|
||||
False = 0,
|
||||
True = 1
|
||||
],
|
||||
|
||||
/// Various address fields, depending on use case
|
||||
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
|
||||
NEXT_LVL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12]
|
||||
|
||||
// @fixme ?? where is this described
|
||||
NON_GLOBAL OFFSET(11) NUMBITS(1) [
|
||||
False = 0,
|
||||
True = 1
|
||||
],
|
||||
|
||||
/// Access flag
|
||||
AF OFFSET(10) NUMBITS(1) [
|
||||
False = 0,
|
||||
True = 1
|
||||
],
|
||||
|
||||
/// Share-ability field
|
||||
SH OFFSET(8) NUMBITS(2) [
|
||||
OuterShareable = 0b10,
|
||||
InnerShareable = 0b11
|
||||
],
|
||||
|
||||
/// Access Permissions
|
||||
AP OFFSET(6) NUMBITS(2) [
|
||||
RW_EL1 = 0b00,
|
||||
RW_EL1_EL0 = 0b01,
|
||||
RO_EL1 = 0b10,
|
||||
RO_EL1_EL0 = 0b11
|
||||
],
|
||||
|
||||
NS_EL3 OFFSET(5) NUMBITS(1) [],
|
||||
|
||||
/// Memory attributes index into the MAIR_EL1 register
|
||||
AttrIndx OFFSET(2) NUMBITS(3) [],
|
||||
|
||||
TYPE OFFSET(1) NUMBITS(1) [
|
||||
Block = 0,
|
||||
Table = 1
|
||||
],
|
||||
|
||||
VALID OFFSET(0) NUMBITS(1) [
|
||||
False = 0,
|
||||
True = 1
|
||||
]
|
||||
]
|
||||
}
|
||||
|
||||
/// Type-safe enum wrapper covering Table<L>'s 64-bit entries.
|
||||
#[derive(Clone)]
|
||||
// #[repr(transparent)]
|
||||
enum PageTableEntry {
|
||||
/// Empty page table entry.
|
||||
Invalid,
|
||||
/// Table descriptor is a L0, L1 or L2 table pointing to another table.
|
||||
/// L0 tables can only point to L1 tables.
|
||||
/// A descriptor pointing to the next page table.
|
||||
TableDescriptor(EntryFlags),
|
||||
/// A Level2 block descriptor with 2 MiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
Lvl2BlockDescriptor(EntryFlags),
|
||||
/// A page PageTableEntry::descriptor with 4 KiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
PageDescriptor(EntryFlags),
|
||||
type VaIndex = register::FieldValue<u64, VA_INDEX::Register>;
|
||||
type EntryFlags = register::FieldValue<u64, TABLE_DESCRIPTOR::Register>;
|
||||
// type EntryRegister = register::LocalRegisterCopy<u64, TABLE_DESCRIPTOR::Register>;
|
||||
|
||||
// Possible mappings:
|
||||
// * TTBR0 pointing to user page global directory
|
||||
// * TTBR0 pointing to user page upper directory (only if mmu is set up differently)
|
||||
// * TTBR1 pointing to kernel page global directory with full physmem access
|
||||
|
||||
// * Paging system uses a separate address space in top kernel region (TTBR1) to access
|
||||
// * entire physical memory contents.
|
||||
// * This mapping is not available to user space (user space uses TTBR0).
|
||||
// *
|
||||
// * Use the largest possible granule size to map physical memory since we want to use
|
||||
// * the least amount of memory for these mappings.
|
||||
|
||||
// TTBR0 Page Global Directory
|
||||
|
||||
// Level 0 descriptors can only output the address of a Level 1 table.
|
||||
// Level 3 descriptors cannot point to another table and can only output block addresses.
|
||||
// The format of the table is therefore slightly different for Level 3.
|
||||
//
|
||||
// this means:
|
||||
// - in level 0 page table can be only TableDescriptors
|
||||
// - in level 1,2 page table can be TableDescriptors, Lvl2BlockDescriptors (PageDescriptors)
|
||||
// - in level 3 page table can be only PageDescriptors
|
||||
|
||||
// Level / Types | Table Descriptor | Lvl2BlockDescriptor (PageDescriptor)
|
||||
// --------------+------------------+--------------------------------------
|
||||
// 0 | X | (with 4KiB granule)
|
||||
// 1 | X | X (1GiB range)
|
||||
// 2 | X | X (2MiB range)
|
||||
// 3 | | X (4KiB range) -- called PageDescriptor
|
||||
// encoding actually the same as in Table Descriptor
|
||||
|
||||
// Translation granule affects the size of the block addressed.
|
||||
// Lets use 4KiB granule on RPi3 for simplicity.
|
||||
|
||||
// 1, set 4KiB granule size to use the PGD - we could use 16KiB granule instead?
|
||||
// - need to measure waste level
|
||||
// - but lets stick with 4KiB for now
|
||||
//
|
||||
|
||||
// If I have, for example, Table<Level0> I can get from it N `Table<Level1>` (via impl HierarchicalTable)
|
||||
// From Table<Level1> I can get either `Table<Level2>` (via impl HierarchicalTable) or `BlockDescriptor<Size1GiB>`
|
||||
// From Table<Level2> I can get either `Table<Level3>` (via impl HierarchicalTable) or `BlockDescriptor<Size2MiB>`
|
||||
// From Table<Level3> I can only get `PageDescriptor<Size4KiB>` (because no impl HierarchicalTable exists)
|
||||
|
||||
/// GlobalDirectory [ UpperDirectory entries ]
|
||||
/// UpperDirectory [ PageDirectory | GiantPage ]
|
||||
/// PageDirectory [ PageTable | LargePage ]
|
||||
/// PageTable [ PageFrames ]
|
||||
|
||||
// do those as separate types, then in accessors allow only certain combinations
|
||||
// e.g.
|
||||
// struct UpperDirectoryEntry; // DirectoryEntry<L0>
|
||||
// struct PageDirectoryEntry; // DirectoryEntry<L1>
|
||||
// struct GiantPageFrame; // PageFrame<Size1GiB>
|
||||
// struct PageTableEntry; // DirectoryEntry<L2>
|
||||
// struct LargePageFrame; // PageFrame<Size2MiB>
|
||||
// struct PageFrame; // PageFrame<Size4KiB>
|
||||
|
||||
// enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) }
|
||||
// impl PageTabelEntry { fn new_from_entry_addr(&u64) }
|
||||
// return enum PageTableEntry constructed from table bits in u64
|
||||
|
||||
enum L0Entries {
|
||||
UpperDirectoryEntry(VirtAddr),
|
||||
}
|
||||
enum L1Entries {
|
||||
PageDirectoryEntry(VirtAddr),
|
||||
GiantPageFrame(PhysFrame<Size1GiB>),
|
||||
}
|
||||
enum L2Entries {
|
||||
PageTableEntry(VirtAddr),
|
||||
LargePageFrame(PhysFrame<Size2MiB>),
|
||||
}
|
||||
enum L3Entries {
|
||||
PageFrame(PhysFrame<Size4KiB>),
|
||||
}
|
||||
|
||||
/// A descriptor pointing to the next page table. (within PageTableEntry enum)
|
||||
// struct TableDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_table_descriptor(next_lvl_table_addr: usize) -> Result<PageTableEntry, &'static str> {
|
||||
if next_lvl_table_addr % Size4KiB::SIZE as usize != 0 {
|
||||
// @todo SIZE must be usize
|
||||
return Err("TableDescriptor: Address is not 4 KiB aligned.");
|
||||
}
|
||||
|
||||
let shifted = next_lvl_table_addr >> Size4KiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::TableDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Table
|
||||
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
enum Frames {
|
||||
GiantPageFrame,
|
||||
LargePageFrame,
|
||||
PageFrame,
|
||||
}
|
||||
|
||||
#[derive(Snafu, Debug)]
|
||||
enum PageTableError {
|
||||
#[snafu(display("BlockDescriptor: Address is not 2 MiB aligned."))]
|
||||
//"PageDescriptor: Address is not 4 KiB aligned."
|
||||
NotAligned(&'static str),
|
||||
}
|
||||
// ----
|
||||
// ----
|
||||
// ---- Table levels
|
||||
// ----
|
||||
// ----
|
||||
|
||||
/// A Level2 block descriptor with 2 MiB aperture.
|
||||
/// L0 table -- only pointers to L1 tables
|
||||
pub enum L0PageGlobalDirectory {}
|
||||
/// L1 tables -- pointers to L2 tables or giant 1GiB pages
|
||||
pub enum L1PageUpperDirectory {}
|
||||
/// L2 tables -- pointers to L3 tables or huge 2MiB pages
|
||||
pub enum L2PageDirectory {}
|
||||
/// L3 tables -- only pointers to 4/16KiB pages
|
||||
pub enum L3PageTable {}
|
||||
|
||||
/// Shared trait for specific table levels.
|
||||
pub trait TableLevel {}
|
||||
|
||||
/// Shared trait for hierarchical table levels.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
// struct Lvl2BlockDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
|
||||
/// Specifies what is the next level of page table hierarchy.
|
||||
pub trait HierarchicalLevel: TableLevel {
|
||||
/// Level of the next translation table below this one.
|
||||
type NextLevel: TableLevel;
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_lvl2_block_descriptor(
|
||||
output_addr: usize,
|
||||
attribute_fields: AttributeFields,
|
||||
) -> Result<PageTableEntry, PageTableError> {
|
||||
if output_addr % Size2MiB::SIZE as usize != 0 {
|
||||
return Err(PageTableError::NotAligned(Size2MiB::SIZE_AS_DEBUG_STR));
|
||||
}
|
||||
|
||||
let shifted = output_addr >> Size2MiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::Lvl2BlockDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::AF::True
|
||||
+ into_mmu_attributes(attribute_fields)
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Block
|
||||
+ STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
// fn translate() -> Directory<NextLevel>;
|
||||
}
|
||||
|
||||
/// A page descriptor with 4 KiB aperture.
|
||||
///
|
||||
/// The output points to physical memory.
|
||||
|
||||
impl PageTableEntry {
|
||||
fn new_page_descriptor(
|
||||
output_addr: usize,
|
||||
attribute_fields: AttributeFields,
|
||||
) -> Result<PageTableEntry, PageTableError> {
|
||||
if output_addr % Size4KiB::SIZE as usize != 0 {
|
||||
return Err(PageTableError::NotAligned(Size4KiB::SIZE_AS_DEBUG_STR));
|
||||
}
|
||||
|
||||
let shifted = output_addr >> Size4KiB::SHIFT;
|
||||
|
||||
Ok(PageTableEntry::PageDescriptor(
|
||||
STAGE1_DESCRIPTOR::VALID::True
|
||||
+ STAGE1_DESCRIPTOR::AF::True
|
||||
+ into_mmu_attributes(attribute_fields)
|
||||
+ STAGE1_DESCRIPTOR::TYPE::Table
|
||||
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
|
||||
))
|
||||
}
|
||||
pub trait HierarchicalPageLevel: TableLevel {
|
||||
/// Size of the page that can be contained in this table level.
|
||||
type PageLevel: PageSize;
|
||||
}
|
||||
|
||||
impl From<u64> for PageTableEntry {
|
||||
fn from(_val: u64) -> PageTableEntry {
|
||||
// xxx0 -> Invalid
|
||||
// xx11 -> TableDescriptor on L0, L1 and L2
|
||||
// xx10 -> Block Entry L1 and L2
|
||||
// xx11 -> PageDescriptor L3
|
||||
PageTableEntry::Invalid
|
||||
}
|
||||
impl TableLevel for L0PageGlobalDirectory {}
|
||||
impl TableLevel for L1PageUpperDirectory {}
|
||||
impl TableLevel for L2PageDirectory {}
|
||||
impl TableLevel for L3PageTable {}
|
||||
|
||||
impl HierarchicalLevel for L0PageGlobalDirectory {
|
||||
type NextLevel = L1PageUpperDirectory;
|
||||
}
|
||||
impl HierarchicalLevel for L1PageUpperDirectory {
|
||||
type NextLevel = L2PageDirectory;
|
||||
}
|
||||
impl HierarchicalLevel for L2PageDirectory {
|
||||
type NextLevel = L3PageTable;
|
||||
}
|
||||
// L3PageTables do not have next level, therefore they are not HierarchicalLevel
|
||||
|
||||
// L0PageGlobalDirectory does not contain pages, so they are not HierarchicalPageLevel
|
||||
impl HierarchicalPageLevel for L1PageUpperDirectory {
|
||||
type PageLevel = Size1GiB;
|
||||
}
|
||||
impl HierarchicalPageLevel for L2PageDirectory {
|
||||
type PageLevel = Size2MiB;
|
||||
}
|
||||
impl HierarchicalPageLevel for L3PageTable {
|
||||
type PageLevel = Size4KiB;
|
||||
}
|
||||
|
||||
impl From<PageTableEntry> for u64 {
|
||||
fn from(val: PageTableEntry) -> u64 {
|
||||
match val {
|
||||
PageTableEntry::Invalid => 0,
|
||||
PageTableEntry::TableDescriptor(x)
|
||||
| PageTableEntry::Lvl2BlockDescriptor(x)
|
||||
| PageTableEntry::PageDescriptor(x) => x.value,
|
||||
// ----
|
||||
// ----
|
||||
// ---- Directory
|
||||
// ----
|
||||
// ----
|
||||
|
||||
// Maximum OA is 48 bits.
|
||||
//
|
||||
// Level 0 table descriptor has Output Address in [47:12] --> level 1 table
|
||||
// Level 0 descriptor cannot be block descriptor.
|
||||
//
|
||||
// Level 1 table descriptor has Output Address in [47:12] --> level 2 table
|
||||
// Level 1 block descriptor has Output Address in [47:30]
|
||||
//
|
||||
// Level 2 table descriptor has Output Address in [47:12] --> level 3 table
|
||||
// Level 2 block descriptor has Output Address in [47:21]
|
||||
//
|
||||
// Level 3 block descriptor has Output Address in [47:12]
|
||||
// Upper Attributes [63:51]
|
||||
// Res0 [50:48]
|
||||
// Lower Attributes [11:2]
|
||||
// 11b [1:0]
|
||||
|
||||
// Each table consists of 2**9 entries
|
||||
const TABLE_BITS: usize = 9;
|
||||
const INDEX_MASK: usize = (1 << TABLE_BITS) - 1;
|
||||
|
||||
static_assert!(INDEX_MASK == 0x1ff);
|
||||
|
||||
// @todo Table in mmu.rs
|
||||
/// MMU address translation table.
|
||||
/// Contains just u64 internally, provides enum interface on top
|
||||
#[repr(C)]
|
||||
#[repr(align(4096))]
|
||||
struct Directory<Level: TableLevel> {
|
||||
entries: [u64; 1 << TABLE_BITS],
|
||||
level: PhantomData<Level>,
|
||||
}
|
||||
|
||||
impl Directory<L0PageGlobalDirectory> {
|
||||
fn next(&self, address: VirtAddr) -> Option<L0Entries> {
|
||||
let va = VaIndex::new(address.to_u64());
|
||||
let index = va.read(VA_INDEX::LEVEL0);
|
||||
match self.next_table_address(index.into()) {
|
||||
Some(phys_addr) => Some(L0Entries::UpperDirectoryEntry(phys_addr.user_to_kernel())),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// to get L0 we must allocate a few frames from boot region allocator.
|
||||
// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu,
|
||||
// this part will be inited in mmu::init():
|
||||
|
||||
// @todo do NOT keep these statically, always allocate from available bump memory
|
||||
static mut LVL2_TABLE: Table<PageDirectory> = Table::<PageDirectory> {
|
||||
entries: [0; NUM_ENTRIES_4KIB as usize],
|
||||
level: PhantomData,
|
||||
};
|
||||
|
||||
// @todo do NOT keep these statically, always allocate from available bump memory
|
||||
static mut LVL3_TABLE: Table<PageTable> = Table::<PageTable> {
|
||||
entries: [0; NUM_ENTRIES_4KIB as usize],
|
||||
level: PhantomData,
|
||||
};
|
||||
|
||||
trait BaseAddr {
|
||||
fn base_addr_u64(&self) -> u64;
|
||||
fn base_addr_usize(&self) -> usize;
|
||||
}
|
||||
|
||||
impl BaseAddr for [u64; 512] {
|
||||
fn base_addr_u64(&self) -> u64 {
|
||||
self as *const u64 as u64
|
||||
}
|
||||
|
||||
fn base_addr_usize(&self) -> usize {
|
||||
self as *const u64 as usize
|
||||
impl Directory<L1PageUpperDirectory> {
|
||||
fn next(&self, address: VirtAddr) -> Option<L1Entries> {
|
||||
let va = VaIndex::new(address.to_u64());
|
||||
let index = va.read(VA_INDEX::LEVEL1);
|
||||
match self.next_table_address(index.into()) {
|
||||
Some(phys_addr) => Some(L1Entries::PageDirectoryEntry(phys_addr.user_to_kernel())),
|
||||
None => None, // @todo could be 1GiB frame
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set up identity mapped page tables for the first 1 gigabyte of address space.
|
||||
/// default: 880 MB ARM ram, 128MB VC
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Completely unsafe, we're in the hardware land! Incorrectly initialised tables will just
|
||||
/// restart the CPU.
|
||||
pub unsafe fn init() -> Result<(), &'static str> {
|
||||
// Prepare the memory attribute indirection register.
|
||||
mair::set_up();
|
||||
|
||||
// Point the first 2 MiB of virtual addresses to the follow-up LVL3
|
||||
// page-table.
|
||||
LVL2_TABLE.entries[0] =
|
||||
PageTableEntry::new_table_descriptor(LVL3_TABLE.entries.base_addr_usize())?.into();
|
||||
|
||||
// Fill the rest of the LVL2 (2 MiB) entries as block descriptors.
|
||||
//
|
||||
// Notice the skip(1) which makes the iteration start at the second 2 MiB
|
||||
// block (0x20_0000).
|
||||
for (block_descriptor_nr, entry) in LVL2_TABLE.entries.iter_mut().enumerate().skip(1) {
|
||||
let virt_addr = block_descriptor_nr << Size2MiB::SHIFT;
|
||||
|
||||
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
|
||||
Err(s) => return Err(s),
|
||||
Ok((a, b)) => (a, b),
|
||||
};
|
||||
|
||||
let block_desc =
|
||||
match PageTableEntry::new_lvl2_block_descriptor(output_addr, attribute_fields) {
|
||||
Err(s) => return Err(s),
|
||||
Ok(desc) => desc,
|
||||
};
|
||||
|
||||
*entry = block_desc.into();
|
||||
impl Directory<L2PageDirectory> {
|
||||
fn next(&self, address: VirtAddr) -> Option<L2Entries> {
|
||||
let va = VaIndex::new(address.to_u64());
|
||||
let index = va.read(VA_INDEX::LEVEL2);
|
||||
match self.next_table_address(index.into()) {
|
||||
Some(phys_addr) => Some(L2Entries::PageTableEntry(phys_addr.user_to_kernel())),
|
||||
None => None, // @todo could be 2MiB frame
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Directory<L3PageTable> {
|
||||
fn next(&self, address: VirtAddr) -> Option<L3Entries> {
|
||||
let va = VaIndex::new(address.as_u64());
|
||||
let index = va.read(VA_INDEX::LEVEL3);
|
||||
match self.next_table_address(index.into()) {
|
||||
// @fixme wrong function
|
||||
Some(phys_addr) => Some(L3Entries::PageFrame(phys_addr.user_to_kernel())),
|
||||
None => None, // Nothing there
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation code shared for all levels of page tables
|
||||
impl<Level> Directory<Level>
|
||||
where
|
||||
Level: TableLevel,
|
||||
{
|
||||
/// Construct a zeroed table at given physical location.
|
||||
// unsafe fn at(location: PhysAddr) -> &Self {}
|
||||
|
||||
/// Construct and return zeroed table.
|
||||
fn zeroed() -> Self {
|
||||
Self {
|
||||
entries: [0; 1 << TABLE_BITS],
|
||||
level: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Zero out entire table.
|
||||
pub fn zero(&mut self) {
|
||||
for entry in self.entries.iter_mut() {
|
||||
*entry = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Level> Index<usize> for Directory<Level>
|
||||
where
|
||||
Level: TableLevel,
|
||||
{
|
||||
type Output = u64;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<Level> IndexMut<usize> for Directory<Level>
|
||||
where
|
||||
Level: TableLevel,
|
||||
{
|
||||
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
|
||||
&mut self.entries[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl<Level> Directory<Level>
|
||||
where
|
||||
Level: HierarchicalLevel,
|
||||
{
|
||||
fn next_table_address(&self, index: usize) -> Option<PhysAddr> {
|
||||
let entry_flags = EntryRegister::new(self[index]);
|
||||
// If table entry has 0b11 mask set, it is a valid table entry.
|
||||
// Address of the following table may be extracted from bits 47:12
|
||||
if entry_flags.matches_all(TABLE_DESCRIPTOR::VALID::True + TABLE_DESCRIPTOR::TYPE::Table) {
|
||||
Some(entry_flags.read(NEXT_LVL_TABLE_ADDR_4KiB) << Page4KiB::SHIFT)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_table(&self, index: usize) -> Option<&Directory<Level::NextLevel>> {
|
||||
self.next_table_address(index)
|
||||
.map(|address| unsafe { &*(address.user_to_kernel() as *const _) })
|
||||
}
|
||||
|
||||
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Directory<Level::NextLevel>> {
|
||||
self.next_table_address(index)
|
||||
.map(|address| unsafe { &mut *(address.user_to_kernel() as *mut _) })
|
||||
}
|
||||
|
||||
pub fn translate_levels(&self, address: VirtAddr) -> Option<Frames> {}
|
||||
}
|
||||
|
||||
// ----
|
||||
// ----
|
||||
// ---- VirtSpace
|
||||
// ----
|
||||
// ----
|
||||
|
||||
/// Errors from mapping layer
|
||||
#[derive(Debug, Snafu)]
|
||||
pub enum TranslationError {
|
||||
NoPage,
|
||||
}
|
||||
|
||||
pub struct VirtSpace {
|
||||
l0: Unique<Directory<L0PageGlobalDirectory>>,
|
||||
}
|
||||
|
||||
// translation steps:
|
||||
// l0: upper page directory or Err()
|
||||
// l1: lower page directory or 1Gb aperture or Err()
|
||||
// l2: page table or 2MiB aperture or Err()
|
||||
// l3: 4KiB aperture or Err()
|
||||
|
||||
impl VirtSpace {
|
||||
// Translate translates address all the way down to physical address or error.
|
||||
// On each level there's next_table() fn that resolves to the next level table if possible.
|
||||
// pub fn translate(&self, virtual_address: VirtAddr) -> Result<PhysAddr, TranslationError> {
|
||||
// // let offset = virtual_address % Self::PageLevel::SIZE as usize; // use the size of the last page?
|
||||
// self.translate_page(Page::<Self::PageLevel>::containing_address(virtual_address))?
|
||||
// .map(|frame, offset| frame.start_address() + offset)
|
||||
// }
|
||||
}
|
||||
|
||||
// pageglobaldirectory.translate() {
|
||||
// get page index <- generic over page level (xx << (10 + (3 - level) * 9))
|
||||
// return page[index]?.translate(rest);
|
||||
// }
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test_case]
|
||||
fn table_construction() {
|
||||
let mut level0_table = Directory::<L0PageGlobalDirectory>::zeroed();
|
||||
let level1_table = Directory::<L1PageUpperDirectory>::zeroed();
|
||||
let level2_table = Directory::<L2PageDirectory>::zeroed();
|
||||
let level3_table = Directory::<L3PageTable>::zeroed();
|
||||
|
||||
assert!(level0_table.next_table_address(0).is_none());
|
||||
|
||||
// Make entry map to a level1 table
|
||||
level0_table[0] = EntryFlags::from(
|
||||
TABLE_DESCRIPTOR::VALID::True
|
||||
+ TABLE_DESCRIPTOR::TYPE::Table
|
||||
+ TABLE_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(0x424242),
|
||||
)
|
||||
.into();
|
||||
|
||||
assert!(level0_table.next_table_address(0).is_some());
|
||||
|
||||
let addr = level0_table.next_table_address(0).unwrap();
|
||||
assert_eq!(addr, (0x424242 << 12));
|
||||
}
|
||||
|
||||
// Finally, fill the single LVL3 table (4 KiB granule).
|
||||
for (page_descriptor_nr, entry) in LVL3_TABLE.entries.iter_mut().enumerate() {
|
||||
let virt_addr = page_descriptor_nr << Size4KiB::SHIFT;
|
||||
|
||||
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
|
||||
Err(s) => return Err(s),
|
||||
Ok((a, b)) => (a, b),
|
||||
};
|
||||
|
||||
let page_desc = match PageTableEntry::new_page_descriptor(output_addr, attribute_fields) {
|
||||
Err(s) => return Err(s),
|
||||
Ok(desc) => desc,
|
||||
};
|
||||
|
||||
*entry = page_desc.into();
|
||||
}
|
||||
|
||||
// Point to the LVL2 table base address in TTBR0.
|
||||
TTBR0_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // User (lo-)space addresses
|
||||
|
||||
// TTBR1_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // Kernel (hi-)space addresses
|
||||
|
||||
// Configure various settings of stage 1 of the EL1 translation regime.
|
||||
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
|
||||
TCR_EL1.write(
|
||||
TCR_EL1::TBI0::Ignored // @todo TBI1 also set to Ignored??
|
||||
+ TCR_EL1::IPS.val(ips) // Intermediate Physical Address Size
|
||||
// ttbr0 user memory addresses
|
||||
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
|
||||
+ TCR_EL1::SH0::Inner
|
||||
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::EPD0::EnableTTBR0Walks
|
||||
+ TCR_EL1::T0SZ.val(34) // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
|
||||
// ttbr1 kernel memory addresses
|
||||
+ TCR_EL1::TG1::KiB_4 // 4 KiB granule
|
||||
+ TCR_EL1::SH1::Inner
|
||||
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
|
||||
+ TCR_EL1::EPD1::EnableTTBR1Walks
|
||||
+ TCR_EL1::T1SZ.val(34), // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
|
||||
);
|
||||
|
||||
// Switch the MMU on.
|
||||
//
|
||||
// First, force all previous changes to be seen before the MMU is enabled.
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
// use cortex_a::regs::RegisterReadWrite;
|
||||
// Enable the MMU and turn on data and instruction caching.
|
||||
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
|
||||
|
||||
// Force MMU init to complete before next instruction
|
||||
/*
|
||||
* Invalidate the local I-cache so that any instructions fetched
|
||||
* speculatively from the PoC are discarded, since they may have
|
||||
* been dynamically patched at the PoU.
|
||||
*/
|
||||
barrier::isb(barrier::SY);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -11,15 +11,13 @@ use {
|
|||
};
|
||||
|
||||
mod addr;
|
||||
// pub mod mmu;
|
||||
pub mod features; // @todo make only pub re-export?
|
||||
pub mod mmu;
|
||||
mod page_size;
|
||||
mod phys_frame;
|
||||
mod virt_page;
|
||||
|
||||
pub mod mmu2;
|
||||
|
||||
pub use mmu2::*;
|
||||
pub use mmu::*;
|
||||
|
||||
// mod area_frame_allocator;
|
||||
// pub use self::area_frame_allocator::AreaFrameAllocator;
|
||||
|
@ -31,8 +29,6 @@ pub use addr::VirtAddr;
|
|||
pub use page_size::PageSize;
|
||||
pub use phys_frame::PhysFrame;
|
||||
|
||||
use mmu_experimental::PhysFrame;
|
||||
|
||||
// @todo ??
|
||||
pub trait FrameAllocator {
|
||||
fn allocate_frame(&mut self) -> Option<PhysFrame>; // @todo Result<>
|
||||
|
|
Loading…
Reference in New Issue