feat: Implement MMU based on Andre Richter's tutorial

As per https://github.com/rust-embedded/rust-raspberrypi-OS-tutorials/tree/master/10_virtual_mem_part1_identity_mapping

Bring better separation of abstract, platform and BSP code.

Init MMU and traps after serial output.
This commit is contained in:
Berkus Decker 2022-05-08 12:15:21 +03:00
parent 4a02f5fd2c
commit 07df330b62
14 changed files with 1081 additions and 1003 deletions

View File

@ -7,6 +7,8 @@
ENTRY(_boot_cores);
PAGE_SIZE = 65536;
/* Symbols between __BOOT_START and __BOOT_END should be dropped after init is complete.
Symbols between __RO_START and __RO_END are the kernel code.
Symbols between __BSS_START and __BSS_END must be initialized to zero by r0 code in kernel.
@ -20,9 +22,9 @@ SECTIONS
{
KEEP(*(.text.boot.entry)) // Entry point must go first
*(.text.boot)
. = ALIGN(4096);
//. = ALIGN(PAGE_SIZE);
*(.data.boot)
. = ALIGN(4096); /* Here boot code ends */
. = ALIGN(PAGE_SIZE); /* Here boot code ends */
__BOOT_END = .; // __BOOT_END must be 4KiB aligned
__RO_START = .;
*(.text .text.*)
@ -38,7 +40,7 @@ SECTIONS
*(.rodata .rodata.*)
FILL(0x00)
}
. = ALIGN(4096); /* Fill up to 4KiB */
. = ALIGN(PAGE_SIZE); /* Fill up to page size */
__RO_END = .; /* __RO_END must be 4KiB aligned */
__DATA_START = .; /* __DATA_START must be 4KiB aligned */
@ -55,7 +57,7 @@ SECTIONS
__BSS_START = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(4096); /* Align up to 4KiB */
. = ALIGN(PAGE_SIZE); /* Align up to page size */
__BSS_END = .;
}

View File

@ -106,6 +106,7 @@ fn shared_setup_and_enter_pre() {
// Set EL1 execution state to AArch64
// @todo Explain the SWIO bit (SWIO hardwired on Pi3)
HCR_EL2.write(HCR_EL2::RW::EL1IsAarch64 + HCR_EL2::SWIO::SET);
// @todo disable VM bit to prevent stage 2 MMU translations
}
#[link_section = ".text.boot"]

View File

@ -13,30 +13,79 @@
use {
crate::{
arch::aarch64::memory::{get_virt_addr_properties, AttributeFields},
println,
},
core::{
marker::PhantomData,
ops::{Index, IndexMut},
memory::mmu::{
interface, interface::MMU, translation_table::KernelTranslationTable, AddressSpace,
MMUEnableError, TranslationGranule,
},
platform, println,
},
core::intrinsics::unlikely,
cortex_a::{
asm::barrier,
registers::{ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
},
tock_registers::{
fields::FieldValue,
interfaces::{ReadWriteable, Readable, Writeable},
register_bitfields,
},
tock_registers::interfaces::{ReadWriteable, Readable, Writeable},
};
mod mair {
use {cortex_a::registers::MAIR_EL1, tock_registers::interfaces::Writeable};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Memory Management Unit type.
struct MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
pub mod attr {
pub const NORMAL: u64 = 0;
pub const NORMAL_NON_CACHEABLE: u64 = 1;
pub const DEVICE_NGNRE: u64 = 2;
}
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut KERNEL_TABLES: KernelTranslationTable = KernelTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Private Implementations
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
pub fn set_up() {
// Define the three memory types that we will map. Normal DRAM, Uncached and device.
fn set_up_mair(&self) {
use cortex_a::registers::MAIR_EL1;
// Define the three memory types that we will map: Normal DRAM, Uncached and device.
MAIR_EL1.write(
// Attribute 2 -- Device Memory
MAIR_EL1::Attr2_Device::nonGathering_nonReordering_EarlyWriteAck
@ -49,665 +98,201 @@ mod mair {
);
}
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
pub mod attr {
pub const NORMAL: u64 = 0;
pub const NORMAL_NON_CACHEABLE: u64 = 1;
pub const DEVICE_NGNRE: u64 = 2;
// DEVICE_GRE
// DEVICE_NGNRNE
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let t0sz = (64 - platform::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::A1::TTBR0 // TTBR0 defines the ASID
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
/// Parse the ID_AA64MMFR0_EL1 register for runtime information about supported MMU features.
/// Print the current state of TCR register.
pub fn print_features() {
// use crate::cortex_a::regs::RegisterReadWrite;
let sctlr = SCTLR_EL1.extract();
//--------------------------------------------------------------------------------------------------
// Public Implementations
//--------------------------------------------------------------------------------------------------
if let Some(SCTLR_EL1::M::Value::Enable) = sctlr.read_as_enum(SCTLR_EL1::M) {
println!("[i] MMU currently enabled");
}
if let Some(SCTLR_EL1::I::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::I) {
println!("[i] MMU I-cache enabled");
}
if let Some(SCTLR_EL1::C::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::C) {
println!("[i] MMU D-cache enabled");
}
let mmfr = ID_AA64MMFR0_EL1.extract();
if let Some(ID_AA64MMFR0_EL1::TGran4::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran4)
{
println!("[i] MMU: 4 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran16::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran16)
{
println!("[i] MMU: 16 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran64::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran64)
{
println!("[i] MMU: 64 KiB granule supported!");
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::ASIDBits) {
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_16) => {
println!("[i] MMU: 16 bit ASIDs supported!")
}
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_8) => {
println!("[i] MMU: 8 bit ASIDs supported!")
}
_ => println!("[i] MMU: Invalid ASID bits specified!"),
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::PARange) {
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_32) => {
println!("[i] MMU: Up to 32 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_36) => {
println!("[i] MMU: Up to 36 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_40) => {
println!("[i] MMU: Up to 40 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_42) => {
println!("[i] MMU: Up to 42 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => {
println!("[i] MMU: Up to 44 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => {
println!("[i] MMU: Up to 48 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => {
println!("[i] MMU: Up to 52 Bit physical address range supported!")
}
_ => println!("[i] MMU: Invalid PARange specified!"),
}
let tcr = TCR_EL1.extract();
match tcr.read_as_enum(TCR_EL1::IPS) {
Some(TCR_EL1::IPS::Value::Bits_32) => {
println!("[i] MMU: 32 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_36) => {
println!("[i] MMU: 36 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_40) => {
println!("[i] MMU: 40 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_42) => {
println!("[i] MMU: 42 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_44) => {
println!("[i] MMU: 44 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_48) => {
println!("[i] MMU: 48 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_52) => {
println!("[i] MMU: 52 Bit intermediate physical address size supported!")
}
_ => println!("[i] MMU: Invalid IPS specified!"),
}
match tcr.read_as_enum(TCR_EL1::TG0) {
Some(TCR_EL1::TG0::Value::KiB_4) => println!("[i] MMU: TTBR0 4 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_16) => println!("[i] MMU: TTBR0 16 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_64) => println!("[i] MMU: TTBR0 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR0 granule size specified!"),
}
let t0sz = tcr.read(TCR_EL1::T0SZ);
println!("[i] MMU: T0sz = 64-{} = {} bits", t0sz, 64 - t0sz);
match tcr.read_as_enum(TCR_EL1::TG1) {
Some(TCR_EL1::TG1::Value::KiB_4) => println!("[i] MMU: TTBR1 4 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_16) => println!("[i] MMU: TTBR1 16 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_64) => println!("[i] MMU: TTBR1 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR1 granule size specified!"),
}
let t1sz = tcr.read(TCR_EL1::T1SZ);
println!("[i] MMU: T1sz = 64-{} = {} bits", t1sz, 64 - t1sz);
/// Return a reference to the MMU instance.
pub fn mmu() -> &'static impl MMU {
&MMU
}
register_bitfields! {
u64,
// AArch64 Reference Manual page 2150, D5-2445
STAGE1_DESCRIPTOR [
// In table descriptors
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
NSTable_EL3 OFFSET(63) NUMBITS(1) [],
/// Access Permissions for subsequent tables
APTable OFFSET(61) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
// User execute-never for subsequent tables
UXNTable OFFSET(60) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Privileged execute-never for subsequent tables
PXNTable OFFSET(59) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
// In block descriptors
// OS-specific data
OSData OFFSET(55) NUMBITS(4) [],
// User execute-never
UXN OFFSET(54) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Privileged execute-never
PXN OFFSET(53) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
// @fixme ?? where is this described
CONTIGUOUS OFFSET(52) NUMBITS(1) [
False = 0,
True = 1
],
// @fixme ?? where is this described
DIRTY OFFSET(51) NUMBITS(1) [
False = 0,
True = 1
],
/// Various address fields, depending on use case
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
NEXT_LVL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12]
// @fixme ?? where is this described
NON_GLOBAL OFFSET(11) NUMBITS(1) [
False = 0,
True = 1
],
/// Access flag
AF OFFSET(10) NUMBITS(1) [
NotAccessed = 0,
Accessed = 1
],
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
NS_EL3 OFFSET(5) NUMBITS(1) [],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
/// A function that maps the generic memory range attributes to HW-specific
/// attributes of the MMU.
fn into_mmu_attributes(
attribute_fields: AttributeFields,
) -> FieldValue<u64, STAGE1_DESCRIPTOR::Register> {
use super::{AccessPermissions, MemAttributes};
// Memory attributes
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
}
MemAttributes::NonCacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
}
MemAttributes::Device => {
STAGE1_DESCRIPTOR::SH::OuterShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
}
};
// Access Permissions
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_DESCRIPTOR::AP::RW_EL1,
};
// Execute Never
desc += if attribute_fields.execute_never {
STAGE1_DESCRIPTOR::PXN::NeverExecute
} else {
STAGE1_DESCRIPTOR::PXN::Execute
};
desc
}
/*
* With 4k page granule, a virtual address is split into 4 lookup parts
* spanning 9 bits each:
*
* _______________________________________________
* | | | | | | |
* | signx | Lv0 | Lv1 | Lv2 | Lv3 | off |
* |_______|_______|_______|_______|_______|_______|
* 63-48 47-39 38-30 29-21 20-12 11-00
*
* mask page size
*
* Lv0: FF8000000000 --
* Lv1: 7FC0000000 1G
* Lv2: 3FE00000 2M
* Lv3: 1FF000 4K
* off: FFF
*
* RPi3 supports 64K and 4K granules, also 40-bit physical addresses.
* It also can address only 1G physical memory, so these 40-bit phys addresses are a fake.
*
* 48-bit virtual address space; different mappings in VBAR0 (EL0) and VBAR1 (EL1+).
*/
/// Number of entries in a 4KiB mmu table.
pub const NUM_ENTRIES_4KIB: u64 = 512;
/// Trait for abstracting over the possible page sizes, 4KiB, 16KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: u64;
/// A string representation of the page size for debug output.
const SIZE_AS_DEBUG_STR: &'static str;
/// The page shift in bits.
const SHIFT: usize;
/// The page mask in bits.
const MASK: u64;
}
/// This trait is implemented for 4KiB, 16KiB, and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {} // @todo doesn't have to be pub??
/// A standard 4KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
const SHIFT: usize = 12;
const MASK: u64 = 0xfff;
}
impl NotGiantPageSize for Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * NUM_ENTRIES_4KIB;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
const SHIFT: usize = 21;
const MASK: u64 = 0x1fffff;
}
impl NotGiantPageSize for Size2MiB {}
type EntryFlags = tock_registers::fields::FieldValue<u64, STAGE1_DESCRIPTOR::Register>;
// type EntryRegister = register::LocalRegisterCopy<u64, STAGE1_DESCRIPTOR::Register>;
/// L0 table -- only pointers to L1 tables
pub enum PageGlobalDirectory {}
/// L1 tables -- pointers to L2 tables or giant 1GiB pages
pub enum PageUpperDirectory {}
/// L2 tables -- pointers to L3 tables or huge 2MiB pages
pub enum PageDirectory {}
/// L3 tables -- only pointers to 4/16KiB pages
pub enum PageTable {}
/// Shared trait for specific table levels.
pub trait TableLevel {}
/// Shared trait for hierarchical table levels.
///
/// Specifies what is the next level of page table hierarchy.
pub trait HierarchicalLevel: TableLevel {
/// Level of the next translation table below this one.
type NextLevel: TableLevel;
}
impl TableLevel for PageGlobalDirectory {}
impl TableLevel for PageUpperDirectory {}
impl TableLevel for PageDirectory {}
impl TableLevel for PageTable {}
impl HierarchicalLevel for PageGlobalDirectory {
type NextLevel = PageUpperDirectory;
}
impl HierarchicalLevel for PageUpperDirectory {
type NextLevel = PageDirectory;
}
impl HierarchicalLevel for PageDirectory {
type NextLevel = PageTable;
}
// PageTables do not have next level, therefore they are not HierarchicalLevel
/// MMU address translation table.
/// Contains just u64 internally, provides enum interface on top
#[repr(C)]
#[repr(align(4096))]
pub struct Table<L: TableLevel> {
entries: [u64; NUM_ENTRIES_4KIB as usize],
level: PhantomData<L>,
}
// Implementation code shared for all levels of page tables
impl<L> Table<L>
where
L: TableLevel,
{
/// Zero out entire table.
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
*entry = 0;
}
}
}
impl<L> Index<usize> for Table<L>
where
L: TableLevel,
{
type Output = u64;
fn index(&self, index: usize) -> &u64 {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L>
where
L: TableLevel,
{
fn index_mut(&mut self, index: usize) -> &mut u64 {
&mut self.entries[index]
}
}
/// Type-safe enum wrapper covering Table<L>'s 64-bit entries.
#[derive(Clone)]
// #[repr(transparent)]
enum PageTableEntry {
/// Empty page table entry.
Invalid,
/// Table descriptor is a L0, L1 or L2 table pointing to another table.
/// L0 tables can only point to L1 tables.
/// A descriptor pointing to the next page table.
TableDescriptor(EntryFlags),
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
Lvl2BlockDescriptor(EntryFlags),
/// A page PageTableEntry::descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
PageDescriptor(EntryFlags),
}
/// A descriptor pointing to the next page table. (within PageTableEntry enum)
// struct TableDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_table_descriptor(next_lvl_table_addr: usize) -> Result<PageTableEntry, &'static str> {
if next_lvl_table_addr % Size4KiB::SIZE as usize != 0 {
// @todo SIZE must be usize
return Err("TableDescriptor: Address is not 4 KiB aligned.");
impl interface::MMU for MemoryManagementUnit {
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
let shifted = next_lvl_table_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::TableDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
// struct Lvl2BlockDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_lvl2_block_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, &'static str> {
if output_addr % Size2MiB::SIZE as usize != 0 {
return Err("BlockDescriptor: Address is not 2 MiB aligned.");
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
let shifted = output_addr >> Size2MiB::SHIFT;
// Prepare the memory attribute indirection register.
self.set_up_mair();
Ok(PageTableEntry::Lvl2BlockDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::Accessed
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(shifted as u64),
))
// Populate translation tables.
KERNEL_TABLES
.populate_translation_table_entries()
.map_err(MMUEnableError::Other)?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction.
barrier::isb(barrier::SY);
Ok(())
}
}
/// A page descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
impl PageTableEntry {
fn new_page_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, &'static str> {
if output_addr % Size4KiB::SIZE as usize != 0 {
return Err("PageDescriptor: Address is not 4 KiB aligned.");
/// Parse the ID_AA64MMFR0_EL1 register for runtime information about supported MMU features.
/// Print the current state of TCR register.
fn print_features(&self) {
// use crate::cortex_a::regs::RegisterReadWrite;
let sctlr = SCTLR_EL1.extract();
if let Some(SCTLR_EL1::M::Value::Enable) = sctlr.read_as_enum(SCTLR_EL1::M) {
println!("[i] MMU currently enabled");
}
let shifted = output_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::PageDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::Accessed
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
impl From<u64> for PageTableEntry {
fn from(_val: u64) -> PageTableEntry {
// xxx0 -> Invalid
// xx11 -> TableDescriptor on L0, L1 and L2
// xx10 -> Block Entry L1 and L2
// xx11 -> PageDescriptor L3
PageTableEntry::Invalid
}
}
impl From<PageTableEntry> for u64 {
fn from(val: PageTableEntry) -> u64 {
match val {
PageTableEntry::Invalid => 0,
PageTableEntry::TableDescriptor(x)
| PageTableEntry::Lvl2BlockDescriptor(x)
| PageTableEntry::PageDescriptor(x) => x.value,
if let Some(SCTLR_EL1::I::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::I) {
println!("[i] MMU I-cache enabled");
}
if let Some(SCTLR_EL1::C::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::C) {
println!("[i] MMU D-cache enabled");
}
let mmfr = ID_AA64MMFR0_EL1.extract();
if let Some(ID_AA64MMFR0_EL1::TGran4::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran4)
{
println!("[i] MMU: 4 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran16::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran16)
{
println!("[i] MMU: 16 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran64::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran64)
{
println!("[i] MMU: 64 KiB granule supported!");
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::ASIDBits) {
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_16) => {
println!("[i] MMU: 16 bit ASIDs supported!")
}
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_8) => {
println!("[i] MMU: 8 bit ASIDs supported!")
}
_ => println!("[i] MMU: Invalid ASID bits specified!"),
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::PARange) {
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_32) => {
println!("[i] MMU: Up to 32 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_36) => {
println!("[i] MMU: Up to 36 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_40) => {
println!("[i] MMU: Up to 40 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_42) => {
println!("[i] MMU: Up to 42 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => {
println!("[i] MMU: Up to 44 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => {
println!("[i] MMU: Up to 48 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => {
println!("[i] MMU: Up to 52 Bit physical address range supported!")
}
_ => println!("[i] MMU: Invalid PARange specified!"),
}
let tcr = TCR_EL1.extract();
match tcr.read_as_enum(TCR_EL1::IPS) {
Some(TCR_EL1::IPS::Value::Bits_32) => {
println!("[i] MMU: 32 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_36) => {
println!("[i] MMU: 36 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_40) => {
println!("[i] MMU: 40 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_42) => {
println!("[i] MMU: 42 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_44) => {
println!("[i] MMU: 44 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_48) => {
println!("[i] MMU: 48 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_52) => {
println!("[i] MMU: 52 Bit intermediate physical address size supported!")
}
_ => println!("[i] MMU: Invalid IPS specified!"),
}
match tcr.read_as_enum(TCR_EL1::TG0) {
Some(TCR_EL1::TG0::Value::KiB_4) => println!("[i] MMU: TTBR0 4 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_16) => println!("[i] MMU: TTBR0 16 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_64) => println!("[i] MMU: TTBR0 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR0 granule size specified!"),
}
let t0sz = tcr.read(TCR_EL1::T0SZ);
println!("[i] MMU: T0sz = 64-{} = {} bits", t0sz, 64 - t0sz);
match tcr.read_as_enum(TCR_EL1::TG1) {
Some(TCR_EL1::TG1::Value::KiB_4) => println!("[i] MMU: TTBR1 4 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_16) => println!("[i] MMU: TTBR1 16 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_64) => println!("[i] MMU: TTBR1 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR1 granule size specified!"),
}
let t1sz = tcr.read(TCR_EL1::T1SZ);
println!("[i] MMU: T1sz = 64-{} = {} bits", t1sz, 64 - t1sz);
}
}
static mut LVL2_TABLE: Table<PageDirectory> = Table::<PageDirectory> {
entries: [0; NUM_ENTRIES_4KIB as usize],
level: PhantomData,
};
static mut LVL3_TABLE: Table<PageTable> = Table::<PageTable> {
entries: [0; NUM_ENTRIES_4KIB as usize],
level: PhantomData,
};
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
impl BaseAddr for [u64; 512] {
fn base_addr_u64(&self) -> u64 {
self as *const u64 as u64
}
fn base_addr_usize(&self) -> usize {
self as *const u64 as usize
}
}
/// Set up identity mapped page tables for the first 1 gigabyte of address space.
/// default: 880 MB ARM ram, 128MB VC
///
/// # Safety
///
/// Completely unsafe, we're in the hardware land! Incorrectly initialised tables will just
/// restart the CPU.
pub unsafe fn init() -> Result<(), &'static str> {
// Prepare the memory attribute indirection register.
mair::set_up();
// Point the first 2 MiB of virtual addresses to the follow-up LVL3
// page-table.
LVL2_TABLE.entries[0] =
PageTableEntry::new_table_descriptor(LVL3_TABLE.entries.base_addr_usize())?.into();
// Fill the rest of the LVL2 (2 MiB) entries as block descriptors.
//
// Notice the skip(1) which makes the iteration start at the second 2 MiB
// block (0x20_0000).
for (block_descriptor_nr, entry) in LVL2_TABLE.entries.iter_mut().enumerate().skip(1) {
let virt_addr = block_descriptor_nr << Size2MiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let block_desc =
match PageTableEntry::new_lvl2_block_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = block_desc.into();
}
// Finally, fill the single LVL3 table (4 KiB granule).
for (page_descriptor_nr, entry) in LVL3_TABLE.entries.iter_mut().enumerate() {
let virt_addr = page_descriptor_nr << Size4KiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let page_desc = match PageTableEntry::new_page_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = page_desc.into();
}
// Point to the LVL2 table base address in TTBR0.
TTBR0_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // User (lo-)space addresses
// TTBR1_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // Kernel (hi-)space addresses
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI0::Ignored // @todo TBI1 also set to Ignored??
+ TCR_EL1::IPS.val(ips) // Intermediate Physical Address Size
// ttbr0 user memory addresses
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(34) // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
// ttbr1 kernel memory addresses
+ TCR_EL1::TG1::KiB_4 // 4 KiB granule
+ TCR_EL1::SH1::Inner
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD1::EnableTTBR1Walks
+ TCR_EL1::T1SZ.val(34), // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// use cortex_a::regs::RegisterReadWrite;
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
barrier::isb(barrier::SY);
Ok(())
}

View File

@ -0,0 +1,287 @@
use {
crate::{
memory::mmu::{
arch_mmu::{mair, Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes,
},
platform,
},
core::convert,
tock_registers::{
interfaces::{Readable, Writeable},
register_bitfields,
registers::InMemoryRegister,
},
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_bitfields! {
u64,
// AArch64 Reference Manual page 2150, D5-2445
STAGE1_TABLE_DESCRIPTOR [
/// Physical address of the next descriptor.
NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
NEXT_LEVEL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12]
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
register_bitfields! {
u64,
// AArch64 Reference Manual page 2150, D5-2445
STAGE1_PAGE_DESCRIPTOR [
// User execute-never
UXN OFFSET(54) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Privileged execute-never
PXN OFFSET(53) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Physical address of the next table descriptor (lvl2) or the page descriptor (lvl3).
LVL2_OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
/// Access flag
AF OFFSET(10) NUMBITS(1) [
NotAccessed = 0,
Accessed = 1
],
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
/// A table descriptor with 64 KiB aperture.
///
/// The output points to the next table.
#[derive(Copy, Clone)]
#[repr(C)]
struct TableDescriptor {
value: u64,
}
/// A page descriptor with 64 KiB aperture.
///
/// The output points to physical memory.
#[derive(Copy, Clone)]
#[repr(C)]
struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = platform::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
/// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[PageDescriptor; 8192]; NUM_TABLES],
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
}
/// A translation table type for the kernel space.
pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
//--------------------------------------------------------------------------------------------------
// Private Implementations
//--------------------------------------------------------------------------------------------------
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
self as *const T as usize
}
}
impl TableDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
}
}
impl PageDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance.
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::LVL2_OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::Accessed
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ (*attribute_fields).into(),
);
Self { value: val.get() }
}
}
/// Convert the kernel's generic memory attributes to HW-specific attributes of the MMU.
impl convert::From<AttributeFields>
for tock_registers::fields::FieldValue<u64, STAGE1_PAGE_DESCRIPTOR::Register>
{
fn from(attribute_fields: AttributeFields) -> Self {
// Memory attributes
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
}
MemAttributes::NonCacheableDRAM => {
STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
}
MemAttributes::Device => {
STAGE1_PAGE_DESCRIPTOR::SH::OuterShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
}
};
// Access Permissions
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_PAGE_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_PAGE_DESCRIPTOR::AP::RW_EL1,
};
// The execute-never attribute is mapped to PXN in AArch64.
desc += if attribute_fields.execute_never {
STAGE1_PAGE_DESCRIPTOR::PXN::NeverExecute
} else {
STAGE1_PAGE_DESCRIPTOR::PXN::Execute
};
// Always set unprivileged execute-never as long as userspace is not implemented yet.
desc += STAGE1_PAGE_DESCRIPTOR::UXN::NeverExecute;
desc
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
pub const fn new() -> Self {
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
}
}
/// Iterates over all static translation table entries and fills them at once.
///
/// # Safety
///
/// - Modifies a `static mut`. Ensure it only happens from here.
pub unsafe fn populate_translation_table_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
let (phys_output_addr, attribute_fields) =
platform::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
}
Ok(())
}
/// The translation table's base address to be used for programming the MMU.
pub fn phys_base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
}
}

View File

@ -5,13 +5,8 @@
//! Memory management functions for aarch64.
use {
crate::println,
core::{fmt, fmt::Formatter, ops::RangeInclusive},
};
mod addr;
pub mod mmu;
// pub mod mmu; included directly as arch_mmu from main memory.rs module
pub use addr::{PhysAddr, VirtAddr};
@ -20,331 +15,3 @@ pub use addr::{PhysAddr, VirtAddr};
/// Default page size used by the kernel.
pub const PAGE_SIZE: usize = 4096;
/// System memory map.
/// This is a fixed memory map for RasPi3,
/// @todo we need to infer the memory map from the provided DTB.
#[rustfmt::skip]
pub mod map {
/// Beginning of memory.
pub const START: usize = 0x0000_0000;
/// End of memory.
pub const END: usize = 0x3FFF_FFFF;
/// Physical RAM addresses.
pub mod phys {
/// Base address of video (VC) memory.
pub const VIDEOMEM_BASE: usize = 0x3e00_0000;
/// Base address of MMIO register range.
pub const MMIO_BASE: usize = 0x3F00_0000;
/// Base address of ARM<->VC mailbox area.
pub const VIDEOCORE_MBOX_BASE: usize = MMIO_BASE + 0x0000_B880;
/// Base address of GPIO registers.
pub const GPIO_BASE: usize = MMIO_BASE + 0x0020_0000;
/// Base address of regular UART.
pub const PL011_UART_BASE: usize = MMIO_BASE + 0x0020_1000;
/// Base address of MiniUART.
pub const MINI_UART_BASE: usize = MMIO_BASE + 0x0021_5000;
/// End of MMIO memory.
pub const MMIO_END: usize = super::END;
}
/// Virtual (mapped) addresses.
pub mod virt {
/// Start (top) of kernel stack.
pub const KERN_STACK_START: usize = super::START;
/// End (bottom) of kernel stack. SP starts at KERN_STACK_END + 1.
pub const KERN_STACK_END: usize = 0x0007_FFFF;
/// Location of DMA-able memory region (in the second 2 MiB block).
pub const DMA_HEAP_START: usize = 0x0020_0000;
/// End of DMA-able memory region.
pub const DMA_HEAP_END: usize = 0x005F_FFFF;
}
}
/// Types used for compiling the virtual memory layout of the kernel using address ranges.
pub mod kernel_mem_range {
use core::ops::RangeInclusive;
/// Memory region attributes.
#[derive(Copy, Clone)]
pub enum MemAttributes {
/// Regular memory
CacheableDRAM,
/// Memory without caching
NonCacheableDRAM,
/// Device memory
Device,
}
/// Memory region access permissions.
#[derive(Copy, Clone)]
pub enum AccessPermissions {
/// Read-only access
ReadOnly,
/// Read-write access
ReadWrite,
}
/// Memory region translation.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum Translation {
/// One-to-one address mapping
Identity,
/// Mapping with a specified offset
Offset(usize),
}
/// Summary structure of memory region properties.
#[derive(Copy, Clone)]
pub struct AttributeFields {
/// Attributes
pub mem_attributes: MemAttributes,
/// Permissions
pub acc_perms: AccessPermissions,
/// Disable executable code in this region
pub execute_never: bool,
}
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
/// Memory region descriptor.
///
/// Used to construct iterable kernel memory ranges.
pub struct Descriptor {
/// Name of the region
pub name: &'static str,
/// Virtual memory range
pub virtual_range: fn() -> RangeInclusive<usize>,
/// Mapping translation
pub translation: Translation,
/// Attributes
pub attribute_fields: AttributeFields,
}
}
pub use kernel_mem_range::*;
/// A virtual memory layout that is agnostic of the paging granularity that the
/// hardware MMU will use.
///
/// Contains only special ranges, aka anything that is _not_ normal cacheable
/// DRAM.
static KERNEL_VIRTUAL_LAYOUT: [Descriptor; 6] = [
Descriptor {
name: "Kernel stack",
virtual_range: || {
RangeInclusive::new(map::virt::KERN_STACK_START, map::virt::KERN_STACK_END)
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "Boot code and data",
virtual_range: || {
// Using the linker script, we ensure that the boot area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__BOOT_START, __BOOT_END)
extern "C" {
// The inclusive start of the boot area, aka the address of the
// first byte of the area.
static __BOOT_START: u64;
// The exclusive end of the boot area, aka the address of
// the first byte _after_ the RO area.
static __BOOT_END: u64;
}
unsafe {
// Notice the subtraction to turn the exclusive end into an
// inclusive end
RangeInclusive::new(
&__BOOT_START as *const _ as usize,
&__BOOT_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
Descriptor {
name: "Kernel code and RO data",
virtual_range: || {
// Using the linker script, we ensure that the RO area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__RO_START, __RO_END)
extern "C" {
// The inclusive start of the read-only area, aka the address of the
// first byte of the area.
static __RO_START: u64;
// The exclusive end of the read-only area, aka the address of
// the first byte _after_ the RO area.
static __RO_END: u64;
}
unsafe {
// Notice the subtraction to turn the exclusive end into an
// inclusive end
RangeInclusive::new(
&__RO_START as *const _ as usize,
&__RO_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
Descriptor {
name: "Kernel data and BSS",
virtual_range: || {
extern "C" {
static __DATA_START: u64;
static __BSS_END: u64;
}
unsafe {
RangeInclusive::new(
&__DATA_START as *const _ as usize,
&__BSS_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "DMA heap pool",
virtual_range: || RangeInclusive::new(map::virt::DMA_HEAP_START, map::virt::DMA_HEAP_END),
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::NonCacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "Device MMIO",
virtual_range: || RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::phys::MMIO_END),
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
];
/// For a given virtual address, find and return the output address and
/// according attributes.
///
/// If the address is not covered in VIRTUAL_LAYOUT, return a default for normal
/// cacheable DRAM.
pub fn get_virt_addr_properties(
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > map::END {
return Err("Address out of range.");
}
for i in KERNEL_VIRTUAL_LAYOUT.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Human-readable output of AttributeFields
impl fmt::Display for AttributeFields {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let attr = match self.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match self.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if self.execute_never { "PXN" } else { "PX" };
write!(f, "{: <3} {} {: <3}", attr, acc_p, xn)
}
}
/// Human-readable output of a Descriptor.
impl fmt::Display for Descriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the
// result, which causes Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024)
const KIB_RSHIFT: u32 = 10;
// log2(1024 * 1024)
const MIB_RSHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
} else if (size >> KIB_RSHIFT) > 0 {
(size >> KIB_RSHIFT, "KiB")
} else {
(size, "Byte")
};
write!(
f,
" {:#010x} - {:#010x} | {: >3} {} | {} | {}",
start, end, size, unit, self.attribute_fields, self.name
)
}
}
/// Print the kernel memory layout.
pub fn print_layout() {
println!("[i] Kernel memory layout:");
for i in KERNEL_VIRTUAL_LAYOUT.iter() {
println!("{}", i);
}
}

View File

@ -3,6 +3,7 @@
#![feature(decl_macro)]
#![feature(allocator_api)]
#![feature(format_args_nl)]
#![feature(core_intrinsics)]
#![feature(stmt_expr_attributes)]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(custom_test_frameworks)]
@ -25,6 +26,7 @@ pub use arch::*;
pub mod devices;
pub mod macros;
pub mod memory;
mod mm;
pub mod panic;
pub mod platform;
@ -42,8 +44,8 @@ pub static CONSOLE: sync::NullLock<devices::Console> = sync::NullLock::new(devic
static DMA_ALLOCATOR: sync::NullLock<mm::BumpAllocator> =
sync::NullLock::new(mm::BumpAllocator::new(
// @todo Init this after we loaded boot memory map
memory::map::virt::DMA_HEAP_START as usize,
memory::map::virt::DMA_HEAP_END as usize,
platform::memory::map::virt::DMA_HEAP_START as usize,
platform::memory::map::virt::DMA_HEAP_END as usize,
"Global DMA Allocator",
// Try the following arguments instead to see all mailbox operations
// fail. It will cause the allocator to use memory that are marked

1
machine/src/memory.rs Normal file
View File

@ -0,0 +1 @@
pub mod mmu;

275
machine/src/memory/mmu.rs Normal file
View File

@ -0,0 +1,275 @@
use {
crate::println,
core::{
fmt::{self, Formatter},
ops::RangeInclusive,
},
};
pub mod translation_table;
#[cfg(target_arch = "aarch64")]
#[path = "../arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_mmu::mmu;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
/// Called by the kernel during early init. Supposed to take the translation tables from the
/// `BSP`-supplied `virt_mem_layout()` and install/activate them for the respective MMU.
///
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
fn print_features(&self); // debug
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic memory attributes.
#[derive(Copy, Clone)]
pub enum MemAttributes {
/// Regular memory
CacheableDRAM,
/// Memory without caching
NonCacheableDRAM,
/// Device memory
Device,
}
/// Architecture agnostic memory region access permissions.
#[derive(Copy, Clone)]
pub enum AccessPermissions {
/// Read-only access
ReadOnly,
/// Read-write access
ReadWrite,
}
// Architecture agnostic memory region translation types.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum Translation {
/// One-to-one address mapping
Identity,
/// Mapping with a specified offset
Offset(usize),
}
/// Summary structure of memory region properties.
#[derive(Copy, Clone)]
pub struct AttributeFields {
/// Attributes
pub mem_attributes: MemAttributes,
/// Permissions
pub acc_perms: AccessPermissions,
/// Disable executable code in this region
pub execute_never: bool,
}
/// Types used for compiling the virtual memory layout of the kernel using address ranges.
///
/// Memory region descriptor.
///
/// Used to construct iterable kernel memory ranges.
pub struct TranslationDescriptor {
/// Name of the region
pub name: &'static str,
/// Virtual memory range
pub virtual_range: fn() -> RangeInclusive<usize>,
/// Mapping translation
pub physical_range_translation: Translation,
/// Attributes
pub attribute_fields: AttributeFields,
}
/// Type for expressing the kernel's virtual memory layout.
pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
/// The last (inclusive) address of the address space.
max_virt_addr_inclusive: usize,
/// Array of descriptors for non-standard (normal cacheable DRAM) memory regions.
inner: [TranslationDescriptor; NUM_SPECIAL_RANGES],
}
//--------------------------------------------------------------------------------------------------
// Public Implementations
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
/// The granule's shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(GRANULE_SIZE.is_power_of_two());
GRANULE_SIZE
}
}
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}
}
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
/// Human-readable output of AttributeFields
impl fmt::Display for AttributeFields {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let attr = match self.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match self.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if self.execute_never { "PXN" } else { "PX" };
write!(f, "{: <3} {} {: <3}", attr, acc_p, xn)
}
}
/// Human-readable output of a Descriptor.
impl fmt::Display for TranslationDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the
// result, which causes Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024)
const KIB_SHIFT: u32 = 10;
// log2(1024 * 1024)
const MIB_SHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_SHIFT) > 0 {
(size >> MIB_SHIFT, "MiB")
} else if (size >> KIB_SHIFT) > 0 {
(size >> KIB_SHIFT, "KiB")
} else {
(size, "Byte")
};
write!(
f,
" {:#010x} - {:#010x} | {: >3} {} | {} | {}",
start, end, size, unit, self.attribute_fields, self.name
)
}
}
impl<const NUM_SPECIAL_RANGES: usize> KernelVirtualLayout<{ NUM_SPECIAL_RANGES }> {
/// Create a new instance.
pub const fn new(max: usize, layout: [TranslationDescriptor; NUM_SPECIAL_RANGES]) -> Self {
Self {
max_virt_addr_inclusive: max,
inner: layout,
}
}
/// For a given virtual address, find and return the output address and
/// corresponding attributes.
///
/// If the address is not found in `inner`, return an identity mapped default for normal
/// cacheable DRAM.
pub fn virt_addr_properties(
&self,
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > self.max_virt_addr_inclusive {
return Err("Address out of range");
}
for i in self.inner.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.physical_range_translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Print the kernel memory layout.
pub fn print_layout(&self) {
println!("[i] Kernel memory layout:"); //info!
for i in self.inner.iter() {
// for i in KERNEL_VIRTUAL_LAYOUT.iter() {
println!("{}", i); //info!
}
}
}

View File

@ -0,0 +1,10 @@
//! Translation table.
#[cfg(target_arch = "aarch64")]
#[path = "../../arch/aarch64/memory/mmu/translation_table.rs"]
mod arch_translation_table;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_translation_table::KernelTranslationTable;

View File

@ -10,6 +10,9 @@ use core::{marker::PhantomData, ops};
pub mod rpi3;
#[cfg(any(feature = "rpi3", feature = "rpi4"))]
pub use rpi3::*;
pub struct MMIODerefWrapper<T> {
base_addr: usize,
phantom: PhantomData<fn() -> T>,

View File

@ -0,0 +1,127 @@
use core::cell::UnsafeCell;
pub mod mmu;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "Rust" {
// Boot code.
//
// Using the linker script, we ensure that the boot area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__BOOT_START, __BOOT_END)
//
// The inclusive start of the boot area, aka the address of the
// first byte of the area.
static __BOOT_START: UnsafeCell<()>;
// The exclusive end of the boot area, aka the address of
// the first byte _after_ the RO area.
static __BOOT_END: UnsafeCell<()>;
// Kernel code and RO data.
//
// Using the linker script, we ensure that the RO area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__RO_START, __RO_END)
//
// The inclusive start of the read-only area, aka the address of the
// first byte of the area.
static __RO_START: UnsafeCell<()>;
// The exclusive end of the read-only area, aka the address of
// the first byte _after_ the RO area.
static __RO_END: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// System memory map.
/// This is a fixed memory map for RasPi3,
/// @todo we need to infer the memory map from the provided DTB.
#[rustfmt::skip]
pub mod map { // @todo only pub(super) for proper isolation!
/// Beginning of memory.
pub const START: usize = 0x0000_0000;
/// End of memory - 8Gb RPi4
pub const END_INCLUSIVE: usize = 0x1_FFFF_FFFF;
/// Physical RAM addresses.
pub mod phys {
/// Base address of video (VC) memory.
pub const VIDEOMEM_BASE: usize = 0x3e00_0000;
/// Base address of MMIO register range.
pub const MMIO_BASE: usize = 0x3F00_0000;
/// Base address of ARM<->VC mailbox area.
pub const VIDEOCORE_MBOX_BASE: usize = MMIO_BASE + 0x0000_B880;
/// Base address of GPIO registers.
pub const GPIO_BASE: usize = MMIO_BASE + 0x0020_0000;
/// Base address of regular UART.
pub const PL011_UART_BASE: usize = MMIO_BASE + 0x0020_1000;
/// Base address of MiniUART.
pub const MINI_UART_BASE: usize = MMIO_BASE + 0x0021_5000;
/// End of MMIO memory.
pub const MMIO_END: usize = super::END_INCLUSIVE;
}
/// Virtual (mapped) addresses.
pub mod virt {
/// Start (top) of kernel stack.
pub const KERN_STACK_START: usize = super::START;
/// End (bottom) of kernel stack. SP starts at KERN_STACK_END + 1.
pub const KERN_STACK_END: usize = 0x0007_FFFF;
/// Location of DMA-able memory region (in the second 2 MiB block).
pub const DMA_HEAP_START: usize = 0x0020_0000;
/// End of DMA-able memory region.
pub const DMA_HEAP_END: usize = 0x005F_FFFF;
}
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start page address of the boot segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn boot_start() -> usize {
unsafe { __BOOT_START.get() as usize }
}
/// Exclusive end page address of the boot segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn boot_end_exclusive() -> usize {
unsafe { __BOOT_END.get() as usize }
}
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn code_start() -> usize {
unsafe { __RO_START.get() as usize }
}
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn code_end_exclusive() -> usize {
unsafe { __RO_END.get() as usize }
}

View File

@ -0,0 +1,113 @@
use {super::map as memory_map, crate::memory::mmu::*, core::ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
const NUM_MEM_RANGES: usize = 5;
/// The virtual memory layout that is agnostic of the paging granularity that the
/// hardware MMU will use.
///
/// Contains only special ranges, aka anything that is _not_ normal cacheable
/// DRAM.
pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::new(
memory_map::END_INCLUSIVE,
[
TranslationDescriptor {
name: "Boot code and data",
virtual_range: boot_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
TranslationDescriptor {
name: "Remapped Device MMIO",
virtual_range: remapped_mmio_range_inclusive,
physical_range_translation: Translation::Offset(
memory_map::phys::MMIO_BASE + 0x20_0000,
),
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
TranslationDescriptor {
name: "Device MMIO",
virtual_range: mmio_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
TranslationDescriptor {
name: "DMA heap pool",
virtual_range: dma_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::NonCacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
],
);
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
fn boot_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(super::boot_start(), super::boot_end_exclusive() - 1)
}
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
}
fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {
// The last 64 KiB slot in the first 512 MiB
RangeInclusive::new(0x1FFF_0000, 0x1FFF_FFFF)
}
fn mmio_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(memory_map::phys::MMIO_BASE, memory_map::phys::MMIO_END)
// RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::phys::MMIO_END),
}
fn dma_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(
memory_map::virt::DMA_HEAP_START,
memory_map::virt::DMA_HEAP_END,
)
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the virtual memory layout.
pub fn virt_mem_layout() -> &'static KernelVirtualLayout<NUM_MEM_RANGES> {
&LAYOUT
}

View File

@ -9,6 +9,7 @@ pub mod display;
pub mod fb;
pub mod gpio;
pub mod mailbox;
pub mod memory;
pub mod mini_uart;
pub mod pl011_uart;
pub mod power;

View File

@ -44,12 +44,16 @@ fn panicked(info: &PanicInfo) -> ! {
}
fn print_mmu_state_and_features() {
memory::mmu::print_features();
use machine::memory::mmu::interface::MMU;
memory::mmu::mmu().print_features();
}
fn init_mmu() {
unsafe {
memory::mmu::init().unwrap();
use machine::memory::mmu::interface::MMU;
if let Err(e) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", e);
}
}
println!("[!] MMU initialised");
print_mmu_state_and_features();
@ -117,12 +121,12 @@ pub fn kmain() -> ! {
#[cfg(feature = "jtag")]
machine::arch::jtag::wait_debugger();
init_mmu();
init_exception_traps();
#[cfg(not(feature = "noserial"))]
init_uart_serial();
init_exception_traps();
init_mmu();
#[cfg(test)]
test_main();
@ -145,7 +149,7 @@ fn command_prompt() {
b"uart" => init_uart_serial(),
b"disp" => check_display_init(),
b"trap" => check_data_abort_trap(),
b"map" => machine::arch::memory::print_layout(),
b"map" => machine::platform::memory::mmu::virt_mem_layout().print_layout(),
b"led on" => set_led(true),
b"led off" => set_led(false),
b"help" => print_help(),