feat: Add kernel and MMIO mapping support

Not all the memory is mapped now, only kernel
sections and MMIO remap space
are mapped on the go.
This commit is contained in:
Berkus Decker 2023-08-09 01:29:13 +03:00 committed by Berkus Decker
parent 028866fdbb
commit a656a9bdd7
25 changed files with 2299 additions and 506 deletions

View File

@ -4,7 +4,5 @@
*/
mod asid;
mod phys_addr;
mod virt_addr;
pub use {asid::*, phys_addr::*, virt_addr::*};
pub use asid::*;

View File

@ -13,9 +13,9 @@
use {
crate::{
memory::mmu::{
interface, interface::MMU, translation_table::KernelTranslationTable, AddressSpace,
MMUEnableError, TranslationGranule,
memory::{
mmu::{interface, interface::MMU, AddressSpace, MMUEnableError, TranslationGranule},
Address, Physical,
},
platform, println,
},
@ -58,13 +58,6 @@ pub mod mair {
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut KERNEL_TABLES: KernelTranslationTable = KernelTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
@ -75,7 +68,7 @@ impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
assert!((AS_SIZE % Granule512MiB::SIZE) == 0); // assert!() is const-friendly
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
@ -102,7 +95,7 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let t0sz = (64 - platform::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
let t0sz = (64 - platform::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Used
@ -124,7 +117,7 @@ impl MemoryManagementUnit {
//--------------------------------------------------------------------------------------------------
/// Return a reference to the MMU instance.
pub fn mmu() -> &'static impl MMU {
pub fn mmu() -> &'static impl interface::MMU {
&MMU
}
@ -133,7 +126,10 @@ pub fn mmu() -> &'static impl MMU {
//------------------------------------------------------------------------------
impl interface::MMU for MemoryManagementUnit {
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
@ -141,20 +137,20 @@ impl interface::MMU for MemoryManagementUnit {
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other {
err: "Translation granule not supported in HW",
err: "Translation granule not supported by hardware",
});
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Populate translation tables.
KERNEL_TABLES
.populate_translation_table_entries()
.map_err(|err| MMUEnableError::Other { err })?;
// // Populate translation tables.
// KERNEL_TABLES
// .populate_translation_table_entries()
// .map_err(|err| MMUEnableError::Other { err })?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
TTBR0_EL1.set_baddr(phys_tables_base_addr.as_usize() as u64);
self.configure_translation_control();

View File

@ -1,7 +1,11 @@
use {
super::{mair, Granule512MiB, Granule64KiB},
crate::{
memory::mmu::{AccessPermissions, AttributeFields, MemAttributes},
memory::{
self,
mmu::{AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress},
Address, Physical, Virtual,
},
platform,
},
core::convert,
@ -18,7 +22,9 @@ use {
register_bitfields! {
u64,
// AArch64 Reference Manual page 2150, D5-2445
/// A table descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-15.
/// AArch64 Reference Manual page 2150, D5-2445
STAGE1_TABLE_DESCRIPTOR [
/// Physical address of the next descriptor.
NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
@ -38,9 +44,11 @@ register_bitfields! {
register_bitfields! {
u64,
// AArch64 Reference Manual page 2150, D5-2445
/// A level 3 page descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-17.
/// AArch64 Reference Manual page 2150, D5-2445
STAGE1_PAGE_DESCRIPTOR [
// User execute-never
/// Unprivileged execute-never.
UXN OFFSET(54) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
@ -53,8 +61,8 @@ register_bitfields! {
],
/// Physical address of the next table descriptor (lvl2) or the page descriptor (lvl3).
LVL2_OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
/// Access flag
AF OFFSET(10) NUMBITS(1) [
@ -110,11 +118,12 @@ struct PageDescriptor {
}
trait BaseAddr {
fn phys_start_addr(&self) -> Address<Physical>;
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = platform::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
// const NUM_LVL2_TABLES: usize = platform::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -130,17 +139,24 @@ pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Have the tables been initialized?
initialized: bool,
}
/// A translation table type for the kernel space.
pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
// /// A translation table type for the kernel space.
// pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
//--------------------------------------------------------------------------------------------------
// Private Implementations
//--------------------------------------------------------------------------------------------------
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> BaseAddr for [T; N] {
// The binary is still identity mapped, so we don't need to convert here.
fn phys_start_addr(&self) -> Address<Physical> {
Address::new(self as *const _ as usize)
}
fn base_addr_u64(&self) -> u64 {
self as *const T as u64
}
@ -159,10 +175,10 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr.as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
@ -182,12 +198,15 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
pub fn from_output_page_addr(
phys_output_page_addr: PageAddress<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_page_addr.into_inner().as_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::LVL2_OUTPUT_ADDR_64KiB.val(shifted)
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64)
+ STAGE1_PAGE_DESCRIPTOR::AF::Accessed
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
@ -196,6 +215,12 @@ impl PageDescriptor {
Self { value: val.get() }
}
/// Returns the valid bit.
fn is_valid(&self) -> bool {
InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.is_set(STAGE1_PAGE_DESCRIPTOR::VALID)
}
}
/// Convert the kernel's generic memory attributes to HW-specific attributes of the MMU.
@ -243,43 +268,174 @@ impl convert::From<AttributeFields>
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AssociatedTranslationTable
for memory::mmu::AddressSpace<AS_SIZE>
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
type TableStartFromBottom = FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }>;
}
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
assert!(platform::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE); // assert! is const-fn-friendly
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
initialized: false,
}
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from_page_addr(
&self,
virt_page_addr: PageAddress<Virtual>,
) -> Result<(usize, usize), &'static str> {
let addr = virt_page_addr.into_inner().as_usize();
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
}
Ok((lvl2_index, lvl3_index))
}
/// Sets the PageDescriptor corresponding to the supplied page address.
///
/// Doesn't allow overriding an already valid page.
#[inline(always)]
fn set_page_descriptor_from_page_addr(
&mut self,
virt_page_addr: PageAddress<Virtual>,
new_desc: &PageDescriptor,
) -> Result<(), &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from_page_addr(virt_page_addr)?;
let desc = &mut self.lvl3[lvl2_index][lvl3_index];
if desc.is_valid() {
return Err("Virtual page is already mapped");
}
*desc = *new_desc;
Ok(())
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::TranslationTable
for FixedSizeTranslationTable<NUM_TABLES>
{
/// Iterates over all static translation table entries and fills them at once.
///
/// # Safety
///
/// - Modifies a `static mut`. Ensure it only happens from here.
pub unsafe fn populate_translation_table_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
// pub unsafe fn populate_translation_table_entries(&mut self) -> Result<(), &'static str> {
// for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
// *l2_entry =
// TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
//
// for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
// let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
//
// let (phys_output_addr, attribute_fields) =
// platform::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
//
// *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
// }
// }
//
// Ok(())
// }
fn init(&mut self) {
if self.initialized {
return;
}
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
// Populate the l2 entries.
for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() {
let phys_table_addr = self.lvl3[lvl2_nr].phys_start_addr();
let (phys_output_addr, attribute_fields) =
platform::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
let new_desc = TableDescriptor::from_next_lvl_table_addr(phys_table_addr);
*lvl2_entry = new_desc;
}
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
self.initialized = true;
}
fn phys_base_address(&self) -> Address<Physical> {
self.lvl2.phys_start_addr()
}
unsafe fn map_at(
&mut self,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert!(self.initialized, "Translation tables not initialized");
if virt_region.size() != phys_region.size() {
return Err("Tried to map memory regions with different sizes");
}
if phys_region.end_exclusive_page_addr()
> platform::memory::phys_addr_space_end_exclusive_addr()
{
return Err("Tried to map outside of physical address space");
}
#[allow(clippy::useless_conversion)]
let iter = phys_region.into_iter().zip(virt_region.into_iter());
for (phys_page_addr, virt_page_addr) in iter {
let new_desc = PageDescriptor::from_output_page_addr(phys_page_addr, attr);
let virt_page = virt_page_addr;
self.set_page_descriptor_from_page_addr(virt_page, &new_desc)?;
}
Ok(())
}
}
/// The translation table's base address to be used for programming the MMU.
pub fn phys_base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>;
#[cfg(test)]
mod tests {
use super::*;
/// Check if the size of `struct TableDescriptor` is as expected.
#[test_case]
fn size_of_tabledescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<TableDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if the size of `struct PageDescriptor` is as expected.
#[test_case]
fn size_of_pagedescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<PageDescriptor>(),
core::mem::size_of::<u64>()
);
}
}

View File

@ -8,10 +8,10 @@
mod addr;
pub mod mmu;
pub use addr::{PhysAddr, VirtAddr};
// pub use addr::{PhysAddr, VirtAddr};
// aarch64 granules and page sizes howto:
// https://stackoverflow.com/questions/34269185/simultaneous-existence-of-different-sized-pages-on-aarch64
/// Default page size used by the kernel.
pub const PAGE_SIZE: usize = 4096;
pub const PAGE_SIZE: usize = 65536;

View File

@ -1,19 +1,27 @@
#![no_std]
#![no_main]
#![allow(stable_features)]
#![allow(incomplete_features)]
#![feature(asm_const)]
#![feature(const_option)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(generic_const_exprs)]
#![feature(int_roundings)]
#![feature(is_sorted)]
#![feature(linkage)]
#![feature(nonzero_min_max)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(trait_alias)]
#![feature(unchecked_math)]
#![feature(decl_macro)]
#![feature(ptr_internals)]
#![feature(allocator_api)]
#![feature(format_args_nl)]
#![feature(core_intrinsics)]
#![feature(const_option)]
#![feature(strict_provenance)]
#![feature(stmt_expr_attributes)]
#![feature(slice_ptr_get)]
#![feature(panic_info_message)]
#![feature(nonnull_slice_from_raw_parts)] // stabilised in 1.71 nightly
#![feature(unchecked_math)]
#![feature(custom_test_frameworks)]
#![test_runner(crate::tests::test_runner)]
#![reexport_test_harness_main = "test_main"]
@ -91,8 +99,21 @@ mod lib_tests {
#[no_mangle]
pub unsafe fn main() -> ! {
exception::handling_init();
let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() {
Err(string) => panic!("Error mapping kernel binary: {}", string),
Ok(addr) => addr,
};
if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) {
panic!("Enabling MMU failed: {}", e);
}
memory::mmu::post_enable_init();
platform::drivers::qemu_bring_up_console();
test_main();
qemu::semihosting::exit_success()
}
}

View File

@ -0,0 +1,245 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2022 Andre Richter <andre.o.richter@gmail.com>
//! A record of mapped pages.
use {
super::{
types::{AccessPermissions, AttributeFields, MMIODescriptor, MemAttributes, MemoryRegion},
Address, Physical, Virtual,
},
crate::{
info, mm, platform,
synchronization::{self, InitStateLock},
warn,
},
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Type describing a virtual memory mapping.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_start_addr: Address<Physical>,
pub virt_start_addr: Address<Virtual>,
pub num_pages: usize,
pub attribute_fields: AttributeFields,
}
struct MappingRecord {
inner: [Option<MappingRecordEntry>; 12],
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
InitStateLock::new(MappingRecord::new());
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_start_addr: phys_region.start_addr(),
virt_start_addr: virt_region.start_addr(),
num_pages: phys_region.num_pages(),
attribute_fields: *attr,
}
}
fn find_next_free_user(&mut self) -> Result<&mut Option<&'static str>, &'static str> {
if let Some(x) = self.users.iter_mut().find(|x| x.is_none()) {
return Ok(x);
};
Err("Storage for user info exhausted")
}
pub fn add_user(&mut self, user: &'static str) -> Result<(), &'static str> {
let x = self.find_next_free_user()?;
*x = Some(user);
Ok(())
}
}
impl MappingRecord {
pub const fn new() -> Self {
Self { inner: [None; 12] }
}
fn size(&self) -> usize {
self.inner.iter().filter(|x| x.is_some()).count()
}
fn sort(&mut self) {
let upper_bound_exclusive = self.size();
let entries = &mut self.inner[0..upper_bound_exclusive];
if !entries.is_sorted_by_key(|item| item.unwrap().virt_start_addr) {
entries.sort_unstable_by_key(|item| item.unwrap().virt_start_addr)
}
}
fn find_next_free(&mut self) -> Result<&mut Option<MappingRecordEntry>, &'static str> {
if let Some(x) = self.inner.iter_mut().find(|x| x.is_none()) {
return Ok(x);
}
Err("Storage for mapping info exhausted")
}
fn find_duplicate(
&mut self,
phys_region: &MemoryRegion<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter_map(|x| x.as_mut())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| {
if x.phys_start_addr != phys_region.start_addr() {
return false;
}
if x.num_pages != phys_region.num_pages() {
return false;
}
true
})
}
pub fn add(
&mut self,
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(
name,
virt_region,
phys_region,
attr,
));
self.sort();
Ok(())
}
pub fn print(&self) {
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
info!(
" {:^44} {:^30} {:^7} {:^9} {:^35}",
"Virtual", "Physical", "Size", "Attr", "Entity"
);
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self.inner.iter().flatten() {
let size = i.num_pages * platform::memory::mmu::KernelGranule::SIZE;
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + (size - 1);
let phys_start = i.phys_start_addr;
let phys_end_inclusive = phys_start + (size - 1);
let (size, unit) = mm::size_human_readable_ceil(size);
let attr = match i.attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match i.attribute_fields.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if i.attribute_fields.execute_never {
"XN"
} else {
"X"
};
info!(
" {}..{} --> {}..{} | {:>3} {} | {:<3} {} {:<2} | {}",
virt_start,
virt_end_inclusive,
phys_start,
phys_end_inclusive,
size,
unit,
attr,
acc_p,
xn,
i.users[0].unwrap()
);
for k in i.users[1..].iter() {
if let Some(additional_user) = *k {
info!(
" | {}",
additional_user
);
}
}
}
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_region, phys_region, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_region: MemoryRegion<Physical> = (*mmio_descriptor).into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_region)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);
}
Some(dup.virt_start_addr)
})
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print() {
KERNEL_MAPPING_RECORD.read(|mr| mr.print());
}

View File

@ -1,7 +1,11 @@
use {
crate::println,
crate::{
memory::{Address, Physical, Virtual},
platform, println, synchronization, warn,
},
core::{
fmt::{self, Formatter},
num::NonZeroUsize,
ops::RangeInclusive,
},
snafu::Snafu,
@ -10,12 +14,17 @@ use {
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::memory::mmu as arch_mmu;
pub mod translation_table;
mod mapping_record;
mod page_alloc;
pub(crate) mod translation_table;
mod types;
pub use types::*;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_mmu::mmu;
// pub use arch_mmu::mmu;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -37,13 +46,15 @@ pub mod interface {
/// MMU functions.
pub trait MMU {
/// Called by the kernel during early init. Supposed to take the translation tables from the
/// `BSP`-supplied `virt_mem_layout()` and install/activate them for the respective MMU.
/// Turns on the MMU for the first time and enables data and instruction caching.
///
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// - Changes the hardware's global state.
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
@ -58,80 +69,64 @@ pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic memory attributes.
#[derive(Copy, Clone)]
pub enum MemAttributes {
/// Regular memory
CacheableDRAM,
/// Memory without caching
NonCacheableDRAM,
/// Device memory
Device,
}
/// Architecture agnostic memory region access permissions.
#[derive(Copy, Clone)]
pub enum AccessPermissions {
/// Read-only access
ReadOnly,
/// Read-write access
ReadWrite,
}
// Architecture agnostic memory region translation types.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum Translation {
/// One-to-one address mapping
Identity,
/// Mapping with a specified offset
Offset(usize),
}
/// Summary structure of memory region properties.
#[derive(Copy, Clone)]
pub struct AttributeFields {
/// Attributes
pub mem_attributes: MemAttributes,
/// Permissions
pub acc_perms: AccessPermissions,
/// Disable executable code in this region
pub execute_never: bool,
}
/// Types used for compiling the virtual memory layout of the kernel using address ranges.
///
/// Memory region descriptor.
///
/// Used to construct iterable kernel memory ranges.
pub struct TranslationDescriptor {
/// Name of the region
pub name: &'static str,
/// Virtual memory range
pub virtual_range: fn() -> RangeInclusive<usize>,
/// Mapping translation
pub physical_range_translation: Translation,
/// Attributes
pub attribute_fields: AttributeFields,
}
/// Type for expressing the kernel's virtual memory layout.
pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
/// The last (inclusive) address of the address space.
max_virt_addr_inclusive: usize,
/// Array of descriptors for non-standard (normal cacheable DRAM) memory regions.
inner: [TranslationDescriptor; NUM_SPECIAL_RANGES],
/// Intended to be implemented for [`AddressSpace`].
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
/// [AS_SIZE - 1, 0]
type TableStartFromBottom;
}
//--------------------------------------------------------------------------------------------------
// Public Implementations
// Private Code
//--------------------------------------------------------------------------------------------------
use {
interface::MMU, synchronization::interface::*, translation_table::interface::TranslationTable,
};
/// Query the platform for the reserved virtual addresses for MMIO remapping
/// and initialize the kernel's MMIO VA allocator with it.
fn kernel_init_mmio_va_allocator() {
let region = platform::memory::mmu::virt_mmio_remap_region();
page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.init(region));
}
/// Map a region in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_at_unchecked(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
platform::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_at(virt_region, phys_region, attr))?;
if let Err(x) = mapping_record::kernel_add(name, virt_region, phys_region, attr) {
warn!("{}", x);
}
Ok(())
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
/// The granule's mask.
pub const MASK: usize = Self::SIZE - 1;
/// The granule's shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
@ -159,110 +154,158 @@ impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
}
}
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Raw mapping of a virtual to physical region in the kernel translation tables.
///
/// Prevents mapping into the MMIO range of the tables.
///
/// # Safety
///
/// - See `kernel_map_at_unchecked()`.
/// - Does not prevent aliasing. Currently, the callers must be trusted.
pub unsafe fn kernel_map_at(
name: &'static str,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
if platform::memory::mmu::virt_mmio_remap_region().overlaps(virt_region) {
return Err("Attempt to manually map into MMIO region");
}
kernel_map_at_unchecked(name, virt_region, phys_region, attr)?;
Ok(())
}
/// MMIO remapping in the kernel translation tables.
///
/// Typically used by device drivers.
///
/// # Safety
///
/// - Same as `kernel_map_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_region = MemoryRegion::from(*mmio_descriptor);
let offset_into_start_page = mmio_descriptor.start_addr().offset_into_page();
// Check if an identical region has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new region and map it.
} else {
let num_pages = match NonZeroUsize::new(phys_region.num_pages()) {
None => return Err("Requested 0 pages"),
Some(x) => x,
};
let virt_region =
page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
kernel_map_at_unchecked(
name,
&virt_region,
&phys_region,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
virt_region.start_addr()
};
Ok(virt_addr + offset_into_start_page)
}
/// Map the kernel's binary. Returns the translation table's base address.
///
/// # Safety
///
/// - See [`bsp::memory::mmu::kernel_map_binary()`].
pub unsafe fn kernel_map_binary() -> Result<Address<Physical>, &'static str> {
let phys_kernel_tables_base_addr =
platform::memory::mmu::kernel_translation_tables().write(|tables| {
tables.init();
tables.phys_base_address()
});
platform::memory::mmu::kernel_map_binary()?;
Ok(phys_kernel_tables_base_addr)
}
/// Enable the MMU and data + instruction caching.
///
/// # Safety
///
/// - Crucial function during kernel init. Changes the the complete memory view of the processor.
#[inline]
pub unsafe fn enable_mmu_and_caching(
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Finish initialization of the MMU subsystem.
#[inline]
pub fn post_enable_init() {
kernel_init_mmio_va_allocator();
}
/// Human-readable print of all recorded kernel mappings.
#[inline]
pub fn kernel_print_mappings() {
mapping_record::kernel_print()
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use {
super::*,
crate::memory::mmu::types::{
AccessPermissions, AttributeFields, MemAttributes, MemoryRegion, PageAddress,
},
core::num::NonZeroUsize,
};
/// Check that you cannot map into the MMIO VA range from kernel_map_at().
#[test_case]
fn no_manual_mmio_map() {
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap();
let virt_region = page_alloc::kernel_mmio_va_allocator()
.lock(|allocator| allocator.alloc(num_pages))
.unwrap();
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
/// Human-readable output of AttributeFields
impl fmt::Display for AttributeFields {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let attr = match self.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match self.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
unsafe {
assert_eq!(
kernel_map_at("test", &virt_region, &phys_region, &attr),
Err("Attempt to manually map into MMIO region")
)
};
let xn = if self.execute_never { "PXN" } else { "PX" };
write!(f, "{: <3} {} {: <3}", attr, acc_p, xn)
}
}
/// Human-readable output of a Descriptor.
impl fmt::Display for TranslationDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the
// result, which causes Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024)
const KIB_SHIFT: u32 = 10;
// log2(1024 * 1024)
const MIB_SHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_SHIFT) > 0 {
(size >> MIB_SHIFT, "MiB")
} else if (size >> KIB_SHIFT) > 0 {
(size >> KIB_SHIFT, "KiB")
} else {
(size, "Byte")
};
write!(
f,
" {:#010x} - {:#010x} | {: >3} {} | {} | {}",
start, end, size, unit, self.attribute_fields, self.name
)
}
}
impl<const NUM_SPECIAL_RANGES: usize> KernelVirtualLayout<{ NUM_SPECIAL_RANGES }> {
/// Create a new instance.
pub const fn new(max: usize, layout: [TranslationDescriptor; NUM_SPECIAL_RANGES]) -> Self {
Self {
max_virt_addr_inclusive: max,
inner: layout,
}
}
/// For a given virtual address, find and return the output address and
/// corresponding attributes.
///
/// If the address is not found in `inner`, return an identity mapped default for normal
/// cacheable DRAM.
pub fn virt_addr_properties(
&self,
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > self.max_virt_addr_inclusive {
return Err("Address out of range");
}
for i in self.inner.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.physical_range_translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Print the kernel memory layout.
pub fn print_layout(&self) {
println!("[i] Kernel memory layout:"); //info!
for i in self.inner.iter() {
// for i in KERNEL_VIRTUAL_LAYOUT.iter() {
println!("{}", i); //info!
}
}
}

View File

@ -0,0 +1,72 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
//! Page allocation.
use {
super::MemoryRegion,
crate::{
memory::{AddressType, Virtual},
synchronization::IRQSafeNullLock,
warn,
},
core::num::NonZeroUsize,
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// A page allocator that can be lazyily initialized.
pub struct PageAllocator<ATYPE: AddressType> {
pool: Option<MemoryRegion<ATYPE>>,
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
IRQSafeNullLock::new(PageAllocator::new());
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's MMIO virtual address allocator.
pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
&KERNEL_MMIO_VA_ALLOCATOR
}
impl<ATYPE: AddressType> PageAllocator<ATYPE> {
/// Create an instance.
pub const fn new() -> Self {
Self { pool: None }
}
/// Initialize the allocator.
pub fn init(&mut self, pool: MemoryRegion<ATYPE>) {
if self.pool.is_some() {
warn!("Already initialized");
return;
}
self.pool = Some(pool);
}
/// Allocate a number of pages.
pub fn alloc(
&mut self,
num_requested_pages: NonZeroUsize,
) -> Result<MemoryRegion<ATYPE>, &'static str> {
if self.pool.is_none() {
return Err("Allocator not initialized");
}
self.pool
.as_mut()
.unwrap()
.take_first_n_pages(num_requested_pages)
}
}

View File

@ -3,7 +3,94 @@
#[cfg(target_arch = "aarch64")]
use crate::arch::aarch64::memory::mmu::translation_table as arch_translation_table;
use {
super::{AttributeFields, MemoryRegion},
crate::memory::{Address, Physical, Virtual},
};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_translation_table::KernelTranslationTable;
#[cfg(target_arch = "aarch64")]
pub use arch_translation_table::FixedSizeTranslationTable;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Translation table interfaces.
pub mod interface {
use super::*;
/// Translation table operations.
pub trait TranslationTable {
/// Anything that needs to run before any of the other provided functions can be used.
///
/// # Safety
///
/// - Implementor must ensure that this function can run only once or is harmless if invoked
/// multiple times.
fn init(&mut self);
/// The translation table's base address to be used for programming the MMU.
fn phys_base_address(&self) -> Address<Physical>;
/// Map the given virtual memory region to the given physical memory region.
///
/// # Safety
///
/// - Using wrong attributes can cause multiple issues of different nature in the system.
/// - It is not required that the architectural implementation prevents aliasing. That is,
/// mapping to the same physical memory using multiple virtual addresses, which would
/// break Rust's ownership assumptions. This should be protected against in the kernel's
/// generic MMU code.
unsafe fn map_at(
&mut self,
virt_region: &MemoryRegion<Virtual>,
phys_region: &MemoryRegion<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str>;
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use {
super::*,
crate::memory::mmu::{AccessPermissions, MemAttributes, PageAddress},
arch_translation_table::MinSizeTranslationTable,
interface::TranslationTable,
};
/// Sanity checks for the TranslationTable implementation.
#[test_case]
fn translation_table_implementation_sanity() {
// This will occupy a lot of space on the stack.
let mut tables = MinSizeTranslationTable::new();
tables.init();
let virt_start_page_addr: PageAddress<Virtual> = PageAddress::from(0);
let virt_end_exclusive_page_addr: PageAddress<Virtual> =
virt_start_page_addr.checked_offset(5).unwrap();
let phys_start_page_addr: PageAddress<Physical> = PageAddress::from(0);
let phys_end_exclusive_page_addr: PageAddress<Physical> =
phys_start_page_addr.checked_offset(5).unwrap();
let virt_region = MemoryRegion::new(virt_start_page_addr, virt_end_exclusive_page_addr);
let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
let attr = AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
};
unsafe { assert_eq!(tables.map_at(&virt_region, &phys_region, &attr), Ok(())) };
}
}

View File

@ -0,0 +1,402 @@
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
use {
crate::{
memory::{Address, AddressType, Physical},
mm,
platform::{self, memory::mmu::KernelGranule},
},
core::{
fmt::{self, Formatter},
iter::Step,
num::NonZeroUsize,
ops::Range,
},
};
/// A wrapper type around [Address] that ensures page alignment.
#[derive(Copy, Clone, Debug, Eq, PartialOrd, PartialEq)]
pub struct PageAddress<ATYPE: AddressType> {
inner: Address<ATYPE>,
}
/// A type that describes a region of memory in quantities of pages.
#[derive(Copy, Clone, Debug, Eq, PartialOrd, PartialEq)]
pub struct MemoryRegion<ATYPE: AddressType> {
start: PageAddress<ATYPE>,
end_exclusive: PageAddress<ATYPE>,
}
/// Architecture agnostic memory attributes.
#[derive(Copy, Clone, Debug, Eq, PartialOrd, PartialEq)]
pub enum MemAttributes {
/// Regular memory
CacheableDRAM,
/// Memory without caching
NonCacheableDRAM,
/// Device memory
Device,
}
/// Architecture agnostic memory region access permissions.
#[derive(Copy, Clone, Debug, Eq, PartialOrd, PartialEq)]
pub enum AccessPermissions {
/// Read-only access
ReadOnly,
/// Read-write access
ReadWrite,
}
/// Summary structure of memory region properties.
#[derive(Copy, Clone, Debug, Eq, PartialOrd, PartialEq)]
pub struct AttributeFields {
/// Attributes
pub mem_attributes: MemAttributes,
/// Permissions
pub acc_perms: AccessPermissions,
/// Disable executable code in this region
pub execute_never: bool,
}
/// An MMIO descriptor for use in device drivers.
#[derive(Copy, Clone)]
pub struct MMIODescriptor {
start_addr: Address<Physical>,
end_addr_exclusive: Address<Physical>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// PageAddress
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageAddress<ATYPE> {
/// Unwraps the value.
pub fn into_inner(self) -> Address<ATYPE> {
self.inner
}
/// Calculates the offset from the page address.
///
/// `count` is in units of [PageAddress]. For example, a count of 2 means `result = self + 2 *
/// page_size`.
pub fn checked_offset(self, count: isize) -> Option<Self> {
if count == 0 {
return Some(self);
}
let delta = count.unsigned_abs().checked_mul(KernelGranule::SIZE)?;
let result = if count.is_positive() {
self.inner.as_usize().checked_add(delta)?
} else {
self.inner.as_usize().checked_sub(delta)?
};
Some(Self {
inner: Address::new(result),
})
}
}
impl<ATYPE: AddressType> From<usize> for PageAddress<ATYPE> {
fn from(addr: usize) -> Self {
assert!(
mm::is_aligned(addr, KernelGranule::SIZE),
"Input usize not page aligned"
);
Self {
inner: Address::new(addr),
}
}
}
impl<ATYPE: AddressType> From<Address<ATYPE>> for PageAddress<ATYPE> {
fn from(addr: Address<ATYPE>) -> Self {
assert!(addr.is_page_aligned(), "Input Address not page aligned");
Self { inner: addr }
}
}
impl<ATYPE: AddressType> Step for PageAddress<ATYPE> {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if start > end {
return None;
}
// Since start <= end, do unchecked arithmetic.
Some((end.inner.as_usize() - start.inner.as_usize()) >> KernelGranule::SHIFT)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(count as isize)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.checked_offset(-(count as isize))
}
}
//------------------------------------------------------------------------------
// MemoryRegion
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> MemoryRegion<ATYPE> {
/// Create an instance.
pub fn new(start: PageAddress<ATYPE>, end_exclusive: PageAddress<ATYPE>) -> Self {
assert!(start <= end_exclusive);
Self {
start,
end_exclusive,
}
}
fn as_range(&self) -> Range<PageAddress<ATYPE>> {
self.into_iter()
}
/// Returns the start page address.
pub fn start_page_addr(&self) -> PageAddress<ATYPE> {
self.start
}
/// Returns the start address.
pub fn start_addr(&self) -> Address<ATYPE> {
self.start.into_inner()
}
/// Returns the exclusive end page address.
pub fn end_exclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive
}
/// Returns the exclusive end page address.
pub fn end_inclusive_page_addr(&self) -> PageAddress<ATYPE> {
self.end_exclusive.checked_offset(-1).unwrap()
}
/// Checks if self contains an address.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
let page_addr = PageAddress::from(addr.align_down_page());
self.as_range().contains(&page_addr)
}
/// Checks if there is an overlap with another memory region.
pub fn overlaps(&self, other_region: &Self) -> bool {
let self_range = self.as_range();
self_range.contains(&other_region.start_page_addr())
|| self_range.contains(&other_region.end_inclusive_page_addr())
}
/// Returns the number of pages contained in this region.
pub fn num_pages(&self) -> usize {
PageAddress::steps_between(&self.start, &self.end_exclusive).unwrap()
}
/// Returns the size in bytes of this region.
pub fn size(&self) -> usize {
// Invariant: start <= end_exclusive, so do unchecked arithmetic.
let end_exclusive = self.end_exclusive.into_inner().as_usize();
let start = self.start.into_inner().as_usize();
end_exclusive - start
}
/// Splits the MemoryRegion like:
///
/// --------------------------------------------------------------------------------
/// | | | | | | | | | | | | | | | | | | |
/// --------------------------------------------------------------------------------
/// ^ ^ ^
/// | | |
/// left_start left_end_exclusive |
/// |
/// ^ |
/// | |
/// right_start right_end_exclusive
///
/// Left region is returned to the caller. Right region is the new region for this struct.
pub fn take_first_n_pages(&mut self, num_pages: NonZeroUsize) -> Result<Self, &'static str> {
let count: usize = num_pages.into();
let left_end_exclusive = self.start.checked_offset(count as isize);
let left_end_exclusive = match left_end_exclusive {
None => return Err("Overflow while calculating left_end_exclusive"),
Some(x) => x,
};
if left_end_exclusive > self.end_exclusive {
return Err("Not enough free pages");
}
let allocation = Self {
start: self.start,
end_exclusive: left_end_exclusive,
};
self.start = left_end_exclusive;
Ok(allocation)
}
}
impl<ATYPE: AddressType> IntoIterator for MemoryRegion<ATYPE> {
type Item = PageAddress<ATYPE>;
type IntoIter = Range<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
Range {
start: self.start,
end: self.end_exclusive,
}
}
}
impl From<MMIODescriptor> for MemoryRegion<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start = PageAddress::from(desc.start_addr.align_down_page());
let end_exclusive = PageAddress::from(desc.end_addr_exclusive().align_up_page());
Self {
start,
end_exclusive,
}
}
}
//------------------------------------------------------------------------------
// MMIODescriptor
//------------------------------------------------------------------------------
impl MMIODescriptor {
/// Create an instance.
pub const fn new(start_addr: Address<Physical>, size: usize) -> Self {
assert!(size > 0);
let end_addr_exclusive = Address::new(start_addr.as_usize() + size);
Self {
start_addr,
end_addr_exclusive,
}
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<Physical> {
self.start_addr
}
/// Return the exclusive end address.
pub fn end_addr_exclusive(&self) -> Address<Physical> {
self.end_addr_exclusive
}
}
//------------------------------------------------------------------------------
// AttributeFields
//------------------------------------------------------------------------------
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
/// Human-readable output of AttributeFields
impl fmt::Display for AttributeFields {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let attr = match self.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match self.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if self.execute_never { "PXN" } else { "PX" };
write!(f, "{: <3} {} {: <3}", attr, acc_p, xn)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use {super::*, crate::memory::Virtual};
/// Sanity of [PageAddress] methods.
#[test_case]
fn pageaddress_type_method_sanity() {
let page_addr: PageAddress<Virtual> = PageAddress::from(KernelGranule::SIZE * 2);
assert_eq!(
page_addr.checked_offset(-2),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(
page_addr.checked_offset(2),
Some(PageAddress::<Virtual>::from(KernelGranule::SIZE * 4))
);
assert_eq!(
PageAddress::<Virtual>::from(0).checked_offset(0),
Some(PageAddress::<Virtual>::from(0))
);
assert_eq!(PageAddress::<Virtual>::from(0).checked_offset(-1), None);
let max_page_addr = Address::<Virtual>::new(usize::MAX).align_down_page();
assert_eq!(
PageAddress::<Virtual>::from(max_page_addr).checked_offset(1),
None
);
let zero = PageAddress::<Virtual>::from(0);
let three = PageAddress::<Virtual>::from(KernelGranule::SIZE * 3);
assert_eq!(PageAddress::steps_between(&zero, &three), Some(3));
}
/// Sanity of [MemoryRegion] methods.
#[test_case]
fn memoryregion_type_method_sanity() {
let zero = PageAddress::<Virtual>::from(0);
let zero_region = MemoryRegion::new(zero, zero);
assert_eq!(zero_region.num_pages(), 0);
assert_eq!(zero_region.size(), 0);
let one = PageAddress::<Virtual>::from(KernelGranule::SIZE);
let one_region = MemoryRegion::new(zero, one);
assert_eq!(one_region.num_pages(), 1);
assert_eq!(one_region.size(), KernelGranule::SIZE);
let three = PageAddress::<Virtual>::from(KernelGranule::SIZE * 3);
let mut three_region = MemoryRegion::new(zero, three);
assert!(three_region.contains(zero.into_inner()));
assert!(!three_region.contains(three.into_inner()));
assert!(three_region.overlaps(&one_region));
let allocation = three_region
.take_first_n_pages(NonZeroUsize::new(2).unwrap())
.unwrap();
assert_eq!(allocation.num_pages(), 2);
assert_eq!(three_region.num_pages(), 1);
for (i, alloc) in allocation.into_iter().enumerate() {
assert_eq!(alloc.into_inner().as_usize(), i * KernelGranule::SIZE);
}
}
}

View File

@ -0,0 +1,124 @@
//--------------------------------------------------------------------------------------------------
// Laterrrr
//--------------------------------------------------------------------------------------------------
/// Architecture agnostic memory region translation types.
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum Translation {
/// One-to-one address mapping
Identity,
/// Mapping with a specified offset
Offset(usize),
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Types used for compiling the virtual memory layout of the kernel using address ranges.
///
/// Memory region descriptor.
///
/// Used to construct iterable kernel memory ranges.
pub struct TranslationDescriptor {
/// Name of the region
pub name: &'static str,
/// Virtual memory range
pub virtual_range: fn() -> RangeInclusive<usize>,
/// Mapping translation
pub physical_range_translation: Translation,
/// Attributes
pub attribute_fields: AttributeFields,
}
/// Type for expressing the kernel's virtual memory layout.
pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
/// The last (inclusive) address of the address space.
max_virt_addr_inclusive: usize,
/// Array of descriptors for non-standard (normal cacheable DRAM) memory regions.
inner: [TranslationDescriptor; NUM_SPECIAL_RANGES],
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Human-readable output of a Descriptor.
impl fmt::Display for TranslationDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the
// result, which causes Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024)
const KIB_SHIFT: u32 = 10;
// log2(1024 * 1024)
const MIB_SHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_SHIFT) > 0 {
(size >> MIB_SHIFT, "MiB")
} else if (size >> KIB_SHIFT) > 0 {
(size >> KIB_SHIFT, "KiB")
} else {
(size, "Byte")
};
write!(
f,
" {:#010x} - {:#010x} | {: >3} {} | {} | {}",
start, end, size, unit, self.attribute_fields, self.name
)
}
}
impl<const NUM_SPECIAL_RANGES: usize> KernelVirtualLayout<{ NUM_SPECIAL_RANGES }> {
/// Create a new instance.
pub const fn new(max: usize, layout: [TranslationDescriptor; NUM_SPECIAL_RANGES]) -> Self {
Self {
max_virt_addr_inclusive: max,
inner: layout,
}
}
/// For a given virtual address, find and return the output address and
/// corresponding attributes.
///
/// If the address is not found in `inner`, return an identity mapped default for normal
/// cacheable DRAM.
pub fn virt_addr_properties(
&self,
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > self.max_virt_addr_inclusive {
return Err("Address out of range");
}
for i in self.inner.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.physical_range_translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Print the kernel memory layout.
pub fn print_layout(&self) {
println!("[i] Kernel memory layout:"); //info!
for i in self.inner.iter() {
// for i in KERNEL_VIRTUAL_LAYOUT.iter() {
println!("{}", i); //info!
}
}
}

View File

@ -1 +1,168 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2022 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management.
use {
crate::{mm, platform},
core::{
fmt,
marker::PhantomData,
ops::{Add, Sub},
},
};
pub mod mmu;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Metadata trait for marking the type of an address.
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq + Ord + Eq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Ord, Eq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Ord, Eq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Ord, Eq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl AddressType for Physical {}
impl AddressType for Virtual {}
impl<ATYPE: AddressType> Address<ATYPE> {
/// Create an instance.
pub const fn new(value: usize) -> Self {
Self {
value,
_address_type: PhantomData,
}
}
/// Convert to usize.
pub const fn as_usize(self) -> usize {
self.value
}
/// Align down to page size.
#[must_use]
pub const fn align_down_page(self) -> Self {
let aligned = mm::align_down(self.value, platform::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Align up to page size.
#[must_use]
pub const fn align_up_page(self) -> Self {
let aligned = mm::align_up(self.value, platform::memory::mmu::KernelGranule::SIZE);
Self::new(aligned)
}
/// Checks if the address is page aligned.
pub const fn is_page_aligned(&self) -> bool {
mm::is_aligned(self.value, platform::memory::mmu::KernelGranule::SIZE)
}
/// Return the address' offset into the corresponding page.
pub const fn offset_into_page(&self) -> usize {
self.value & platform::memory::mmu::KernelGranule::MASK
}
}
impl<ATYPE: AddressType> Add<usize> for Address<ATYPE> {
type Output = Self;
#[inline(always)]
fn add(self, rhs: usize) -> Self::Output {
match self.value.checked_add(rhs) {
None => panic!("Overflow on Address::add"),
Some(x) => Self::new(x),
}
}
}
impl<ATYPE: AddressType> Sub<Address<ATYPE>> for Address<ATYPE> {
type Output = Self;
#[inline(always)]
fn sub(self, rhs: Address<ATYPE>) -> Self::Output {
match self.value.checked_sub(rhs.value) {
None => panic!("Overflow on Address::sub"),
Some(x) => Self::new(x),
}
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q3: u8 = ((self.value >> 32) & 0xff) as u8;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:02x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
impl fmt::Display for Address<Virtual> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q4: u16 = ((self.value >> 48) & 0xffff) as u16;
let q3: u16 = ((self.value >> 32) & 0xffff) as u16;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:04x}_", q4)?;
write!(f, "{:04x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
/// Sanity of [Address] methods.
#[test_case]
fn address_type_method_sanity() {
let addr = Address::<Virtual>::new(platform::memory::mmu::KernelGranule::SIZE + 100);
assert_eq!(
addr.align_down_page().as_usize(),
platform::memory::mmu::KernelGranule::SIZE
);
assert_eq!(
addr.align_up_page().as_usize(),
platform::memory::mmu::KernelGranule::SIZE * 2
);
assert!(!addr.is_page_aligned());
assert_eq!(addr.offset_into_page(), 100);
}
}

View File

@ -3,29 +3,62 @@
* Copyright (c) Berkus Decker <berkus+vesper@metta.systems>
*/
pub mod bump_allocator;
mod bump_allocator;
pub use bump_allocator::BumpAllocator;
/// Align address downwards.
///
/// Returns the greatest x with alignment `align` so that x <= addr.
/// The alignment must be a power of 2.
pub fn align_down(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
#[inline(always)]
pub const fn align_down(addr: usize, alignment: usize) -> usize {
assert!(
alignment.is_power_of_two(),
"`alignment` must be a power of two"
);
addr & !(alignment - 1)
}
/// Align address upwards.
///
/// Returns the smallest x with alignment `align` so that x >= addr.
/// The alignment must be a power of 2.
pub fn align_up(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr // already aligned
#[inline(always)]
pub const fn align_up(value: usize, alignment: usize) -> usize {
assert!(
alignment.is_power_of_two(),
"`alignment` must be a power of two"
);
(value + alignment - 1) & !(alignment - 1)
}
/// Check if a value is aligned to a given alignment.
/// The alignment must be a power of 2.
#[inline(always)]
pub const fn is_aligned(value: usize, alignment: usize) -> bool {
assert!(
alignment.is_power_of_two(),
"`alignment` must be a power of two"
);
(value & (alignment - 1)) == 0
}
/// Convert a size into human readable format.
pub const fn size_human_readable_ceil(size: usize) -> (usize, &'static str) {
const KIB: usize = 1024;
const MIB: usize = 1024 * 1024;
const GIB: usize = 1024 * 1024 * 1024;
if (size / GIB) > 0 {
(size.div_ceil(GIB), "GiB")
} else if (size / MIB) > 0 {
(size.div_ceil(MIB), "MiB")
} else if (size / KIB) > 0 {
(size.div_ceil(KIB), "KiB")
} else {
(addr | align_mask) + 1
(size, "Byte")
}
}

View File

@ -10,6 +10,7 @@ use {
crate::{
drivers,
exception::{self, asynchronous::IRQHandlerDescriptor},
memory::{Address, Virtual},
platform::device_driver::common::BoundedUsize,
},
core::fmt,
@ -93,7 +94,7 @@ impl InterruptController {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(periph_mmio_start_addr: usize) -> Self {
pub const unsafe fn new(periph_mmio_start_addr: Address<Virtual>) -> Self {
Self {
periph: peripheral_ic::PeripheralIC::new(periph_mmio_start_addr),
}

View File

@ -81,7 +81,7 @@ impl PeripheralIC {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
pub const unsafe fn new(mmio_start_addr: Address<Virtual>) -> Self {
Self {
wo_registers: IRQSafeNullLock::new(WriteOnlyRegisters::new(mmio_start_addr)),
ro_registers: ReadOnlyRegisters::new(mmio_start_addr),
@ -101,7 +101,10 @@ impl PeripheralIC {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::{Mutex, ReadWriteEx};
use {
crate::memory::{Address, Virtual},
synchronization::interface::{Mutex, ReadWriteEx},
};
impl exception::asynchronous::interface::IRQManager for PeripheralIC {
type IRQNumberType = PeripheralIRQ;

View File

@ -12,6 +12,7 @@ use {
console::interface,
devices::serial::SerialOps,
exception::asynchronous::IRQNumber,
memory::{Address, Virtual},
platform::{
device_driver::{common::MMIODerefWrapper, gpio},
BcmHost,
@ -195,9 +196,9 @@ impl MiniUart {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(base_addr: usize) -> Self {
pub const unsafe fn new(mmio_base_addr: Address<Virtual>) -> Self {
Self {
inner: IRQSafeNullLock::new(MiniUartInner::new(base_addr)),
inner: IRQSafeNullLock::new(MiniUartInner::new(mmio_base_addr)),
}
}
@ -224,9 +225,9 @@ impl MiniUartInner {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(base_addr: usize) -> Self {
pub const unsafe fn new(mmio_base_addr: Address<Virtual>) -> Self {
Self {
registers: Registers::new(base_addr),
registers: Registers::new(mmio_base_addr),
}
}

View File

@ -14,6 +14,7 @@ use {
cpu::loop_while,
devices::serial::SerialOps,
exception,
memory::{Address, Virtual},
platform::{
device_driver::{common::MMIODerefWrapper, gpio, IRQNumber},
mailbox::{self, Mailbox, MailboxOps},
@ -282,9 +283,6 @@ pub struct RateDivisors {
fractional_baud_rate_divisor: u32,
}
// [temporary] Used in mmu.rs to set up local paging
pub const UART0_BASE: usize = BcmHost::get_peripheral_address() + 0x20_1000;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
@ -329,9 +327,9 @@ impl PL011Uart {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(base_addr: usize) -> Self {
pub const unsafe fn new(mmio_base_addr: Address<Virtual>) -> Self {
Self {
inner: IRQSafeNullLock::new(PL011UartInner::new(base_addr)),
inner: IRQSafeNullLock::new(PL011UartInner::new(mmio_base_addr)),
}
}
@ -362,9 +360,9 @@ impl PL011UartInner {
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(base_addr: usize) -> Self {
pub const unsafe fn new(mmio_base_addr: Address<Virtual>) -> Self {
Self {
registers: Registers::new(base_addr),
registers: Registers::new(mmio_base_addr),
}
}

View File

@ -4,14 +4,17 @@
//! Common device driver code.
use core::{fmt, marker::PhantomData, ops};
use {
crate::memory::{Address, Virtual},
core::{fmt, marker::PhantomData, ops},
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct MMIODerefWrapper<T> {
pub base_addr: usize, // @todo unmake public, GPIO::Pin uses it
pub base_addr: Address<Virtual>, // @todo unmake public, GPIO::Pin uses it
phantom: PhantomData<fn() -> T>,
}
@ -25,7 +28,7 @@ pub struct BoundedUsize<const MAX_INCLUSIVE: usize>(usize);
impl<T> MMIODerefWrapper<T> {
/// Create an instance.
pub const fn new(base_addr: usize) -> Self {
pub const fn new(base_addr: Address<Virtual>) -> Self {
Self {
base_addr,
phantom: PhantomData,
@ -47,7 +50,7 @@ impl<T> ops::Deref for MMIODerefWrapper<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*(self.base_addr as *const _) }
unsafe { &*(self.base_addr.as_usize() as *const _) }
}
}

View File

@ -3,9 +3,13 @@ use {
crate::{
console, drivers,
exception::{self as generic_exception},
memory::{self, mmu::MMIODescriptor},
platform::{device_driver, memory::map::mmio},
},
core::sync::atomic::{AtomicBool, Ordering},
core::{
mem::MaybeUninit,
sync::atomic::{AtomicBool, Ordering},
},
};
//--------------------------------------------------------------------------------------------------
@ -34,9 +38,9 @@ pub unsafe fn init() -> Result<(), &'static str> {
return Err("Init already done");
}
driver_gpio()?;
#[cfg(not(feature = "noserial"))]
driver_uart()?;
driver_gpio()?;
driver_interrupt_controller()?;
INIT_DONE.store(true, Ordering::Relaxed);
@ -47,66 +51,107 @@ pub unsafe fn init() -> Result<(), &'static str> {
/// than on real hardware due to QEMU's abstractions.
#[cfg(test)]
pub fn qemu_bring_up_console() {
console::register_console(&PL011_UART);
unsafe {
instantiate_uart().unwrap_or_else(|_| crate::qemu::semihosting::exit_failure());
console::register_console(PL011_UART.assume_init_ref());
};
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
// static MINI_UART: device_driver::MiniUart =
// unsafe { device_driver::MiniUart::new(device_driver::UART1_BASE) };
static PL011_UART: device_driver::PL011Uart =
unsafe { device_driver::PL011Uart::new(device_driver::UART0_BASE) };
static GPIO: device_driver::GPIO = unsafe { device_driver::GPIO::new(device_driver::GPIO_BASE) };
static mut PL011_UART: MaybeUninit<device_driver::PL011Uart> = MaybeUninit::uninit();
static mut GPIO: MaybeUninit<device_driver::GPIO> = MaybeUninit::uninit();
#[cfg(feature = "rpi3")]
static INTERRUPT_CONTROLLER: device_driver::InterruptController =
unsafe { device_driver::InterruptController::new(mmio::PERIPHERAL_IC_START) };
static mut INTERRUPT_CONTROLLER: MaybeUninit<device_driver::InterruptController> =
MaybeUninit::uninit();
#[cfg(feature = "rpi4")]
static INTERRUPT_CONTROLLER: device_driver::GICv2 =
unsafe { device_driver::GICv2::new(mmio::GICD_START, mmio::GICC_START) };
static mut INTERRUPT_CONTROLLER: MaybeUninit<device_driver::GICv2> = MaybeUninit::uninit();
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// This must be called only after successful init of the Mini UART driver.
// fn post_init_mini_uart() -> Result<(), &'static str> {
// console::register_console(&MINI_UART);
// crate::info!("[0] MiniUART is live!");
// Ok(())
// }
/// This must be called only after successful init of the memory subsystem.
unsafe fn instantiate_uart() -> Result<(), &'static str> {
let mmio_descriptor = MMIODescriptor::new(mmio::PL011_UART_BASE, mmio::PL011_UART_SIZE);
let virt_addr =
memory::mmu::kernel_map_mmio(device_driver::PL011Uart::COMPATIBLE, &mmio_descriptor)?;
PL011_UART.write(device_driver::PL011Uart::new(virt_addr));
Ok(())
}
/// This must be called only after successful init of the PL011 UART driver.
fn post_init_pl011_uart() -> Result<(), &'static str> {
console::register_console(&PL011_UART);
unsafe fn post_init_pl011_uart() -> Result<(), &'static str> {
console::register_console(PL011_UART.assume_init_ref());
crate::info!("[0] UART0 is live!");
Ok(())
}
// This must be called only after successful init of the GPIO driver.
fn post_init_gpio() -> Result<(), &'static str> {
// device_driver::MiniUart::prepare_gpio(&GPIO);
device_driver::PL011Uart::prepare_gpio(&GPIO);
/// This must be called only after successful init of the memory subsystem.
unsafe fn instantiate_gpio() -> Result<(), &'static str> {
let mmio_descriptor = MMIODescriptor::new(mmio::GPIO_BASE, mmio::GPIO_SIZE);
let virt_addr =
memory::mmu::kernel_map_mmio(device_driver::GPIO::COMPATIBLE, &mmio_descriptor)?;
GPIO.write(device_driver::GPIO::new(virt_addr));
Ok(())
}
/// This must be called only after successful init of the GPIO driver.
unsafe fn post_init_gpio() -> Result<(), &'static str> {
device_driver::PL011Uart::prepare_gpio(GPIO.assume_init_ref());
Ok(())
}
/// This must be called only after successful init of the memory subsystem.
#[cfg(feature = "rpi3")]
unsafe fn instantiate_interrupt_controller() -> Result<(), &'static str> {
let periph_mmio_descriptor =
MMIODescriptor::new(mmio::PERIPHERAL_IC_BASE, mmio::PERIPHERAL_IC_SIZE);
let periph_virt_addr = memory::mmu::kernel_map_mmio(
device_driver::InterruptController::COMPATIBLE,
&periph_mmio_descriptor,
)?;
INTERRUPT_CONTROLLER.write(device_driver::InterruptController::new(periph_virt_addr));
Ok(())
}
/// This must be called only after successful init of the memory subsystem.
#[cfg(feature = "rpi4")]
unsafe fn instantiate_interrupt_controller() -> Result<(), &'static str> {
let gicd_mmio_descriptor = MMIODescriptor::new(mmio::GICD_BASE, mmio::GICD_SIZE);
let gicd_virt_addr = memory::mmu::kernel_map_mmio("GICv2 GICD", &gicd_mmio_descriptor)?;
let gicc_mmio_descriptor = MMIODescriptor::new(mmio::GICC_BASE, mmio::GICC_SIZE);
let gicc_virt_addr = memory::mmu::kernel_map_mmio("GICV2 GICC", &gicc_mmio_descriptor)?;
INTERRUPT_CONTROLLER.write(device_driver::GICv2::new(gicd_virt_addr, gicc_virt_addr));
Ok(())
}
/// This must be called only after successful init of the interrupt controller driver.
fn post_init_interrupt_controller() -> Result<(), &'static str> {
generic_exception::asynchronous::register_irq_manager(&INTERRUPT_CONTROLLER);
unsafe fn post_init_interrupt_controller() -> Result<(), &'static str> {
generic_exception::asynchronous::register_irq_manager(INTERRUPT_CONTROLLER.assume_init_ref());
Ok(())
}
fn driver_uart() -> Result<(), &'static str> {
// let uart_descriptor =
// drivers::DeviceDriverDescriptor::new(&MINI_UART, Some(post_init_mini_uart));
// drivers::driver_manager().register_driver(uart_descriptor);
/// Function needs to ensure that driver registration happens only after correct instantiation.
unsafe fn driver_uart() -> Result<(), &'static str> {
instantiate_uart()?;
let uart_descriptor = drivers::DeviceDriverDescriptor::new(
&PL011_UART,
PL011_UART.assume_init_ref(),
Some(post_init_pl011_uart),
Some(exception::asynchronous::irq_map::PL011_UART),
);
@ -115,16 +160,23 @@ fn driver_uart() -> Result<(), &'static str> {
Ok(())
}
fn driver_gpio() -> Result<(), &'static str> {
let gpio_descriptor = drivers::DeviceDriverDescriptor::new(&GPIO, Some(post_init_gpio), None);
/// Function needs to ensure that driver registration happens only after correct instantiation.
unsafe fn driver_gpio() -> Result<(), &'static str> {
instantiate_gpio()?;
let gpio_descriptor =
drivers::DeviceDriverDescriptor::new(GPIO.assume_init_ref(), Some(post_init_gpio), None);
drivers::driver_manager().register_driver(gpio_descriptor);
Ok(())
}
fn driver_interrupt_controller() -> Result<(), &'static str> {
/// Function needs to ensure that driver registration happens only after correct instantiation.
unsafe fn driver_interrupt_controller() -> Result<(), &'static str> {
instantiate_interrupt_controller()?;
let interrupt_controller_descriptor = drivers::DeviceDriverDescriptor::new(
&INTERRUPT_CONTROLLER,
INTERRUPT_CONTROLLER.assume_init_ref(),
Some(post_init_interrupt_controller),
None,
);

View File

@ -1,128 +1,336 @@
use {super::map as memory_map, crate::memory::mmu::*, core::ops::RangeInclusive};
//! Platform memory management unit.
use crate::{
memory::{
mmu::{
self as generic_mmu, AccessPermissions, AddressSpace, AssociatedTranslationTable,
AttributeFields, MemAttributes, MemoryRegion, PageAddress, TranslationGranule,
},
Physical, Virtual,
},
synchronization::InitStateLock,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromBottom;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
/// The translation granule chosen by this platform. This will be used everywhere else
/// in the kernel to derive respective data structures and their sizes.
/// For example, the `crate::memory::mmu::Page`.
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
const NUM_MEM_RANGES: usize = 6;
/// The kernel's virtual address space defined by this platform.
pub type KernelVirtAddrSpace = AddressSpace<{ 1024 * 1024 * 1024 }>;
/// The virtual memory layout that is agnostic of the paging granularity that the
/// hardware MMU will use.
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// Contains only special ranges, aka anything that is _not_ normal cacheable
/// DRAM.
pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::new(
memory_map::END_INCLUSIVE,
[
TranslationDescriptor {
name: "Boot code and data",
virtual_range: boot_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
TranslationDescriptor {
name: "Kernel code and RO data",
virtual_range: code_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
TranslationDescriptor {
name: "Remapped Device MMIO",
virtual_range: remapped_mmio_range_inclusive,
physical_range_translation: Translation::Offset(
memory_map::mmio::MMIO_BASE + 0x20_0000,
),
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
TranslationDescriptor {
name: "Device MMIO",
virtual_range: mmio_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
TranslationDescriptor {
name: "DMA heap pool",
virtual_range: dma_range_inclusive,
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::NonCacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
TranslationDescriptor {
name: "Framebuffer area (static for now)",
virtual_range: || {
RangeInclusive::new(
memory_map::phys::VIDEOMEM_BASE,
memory_map::mmio::MMIO_BASE - 1,
)
},
physical_range_translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
],
);
/// It is mandatory that InitStateLock is transparent.
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this property.
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new());
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
fn boot_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(super::boot_start(), super::boot_end_exclusive() - 1)
/// Helper function for calculating the number of pages the given parameter spans.
const fn size_to_num_pages(size: usize) -> usize {
assert!(size > 0);
assert!(size % KernelGranule::SIZE == 0); // assert! is const-fn-friendly
size >> KernelGranule::SHIFT
}
fn code_range_inclusive() -> RangeInclusive<usize> {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
/// The code pages of the kernel binary.
fn virt_code_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::code_size());
let start_page_addr = super::virt_code_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {
// The last 64 KiB slot in the first 512 MiB
RangeInclusive::new(0x1FFF_0000, 0x1FFF_FFFF)
/// The data pages of the kernel binary.
fn virt_data_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::data_size());
let start_page_addr = super::virt_data_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
fn mmio_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(memory_map::mmio::MMIO_BASE, memory_map::mmio::MMIO_END)
// RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::mmio::MMIO_END),
/// The boot core stack pages.
fn virt_boot_core_stack_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
let start_page_addr = super::virt_boot_core_stack_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
fn dma_range_inclusive() -> RangeInclusive<usize> {
RangeInclusive::new(
memory_map::virt::DMA_HEAP_START,
memory_map::virt::DMA_HEAP_END,
// The binary is still identity mapped, so use this trivial conversion function for mapping below.
fn kernel_virt_to_phys_region(virt_region: MemoryRegion<Virtual>) -> MemoryRegion<Physical> {
MemoryRegion::new(
PageAddress::from(virt_region.start_page_addr().into_inner().as_usize()),
PageAddress::from(
virt_region
.end_exclusive_page_addr()
.into_inner()
.as_usize(),
),
)
}
//--------------------------------------------------------------------------------------------------
// Subsumed by the kernel_map_binary() function
//--------------------------------------------------------------------------------------------------
// pub static LAYOUT: KernelVirtualLayout<NUM_MEM_RANGES> = KernelVirtualLayout::new(
// memory_map::END_INCLUSIVE,
// [
// TranslationDescriptor {
// name: "Remapped Device MMIO",
// virtual_range: remapped_mmio_range_inclusive,
// physical_range_translation: Translation::Offset(
// memory_map::mmio::MMIO_BASE + 0x20_0000,
// ),
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::Device,
// acc_perms: AccessPermissions::ReadWrite,
// execute_never: true,
// },
// },
// TranslationDescriptor {
// name: "Device MMIO",
// virtual_range: mmio_range_inclusive,
// physical_range_translation: Translation::Identity,
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::Device,
// acc_perms: AccessPermissions::ReadWrite,
// execute_never: true,
// },
// },
// TranslationDescriptor {
// name: "DMA heap pool",
// virtual_range: dma_range_inclusive,
// physical_range_translation: Translation::Identity,
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::NonCacheableDRAM,
// acc_perms: AccessPermissions::ReadWrite,
// execute_never: true,
// },
// },
// TranslationDescriptor {
// name: "Framebuffer area (static for now)",
// virtual_range: || {
// RangeInclusive::new(
// memory_map::phys::VIDEOMEM_BASE,
// memory_map::mmio::MMIO_BASE - 1,
// )
// },
// physical_range_translation: Translation::Identity,
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::Device,
// acc_perms: AccessPermissions::ReadWrite,
// execute_never: true,
// },
// },
// ],
// );
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the virtual memory layout.
pub fn virt_mem_layout() -> &'static KernelVirtualLayout<NUM_MEM_RANGES> {
&LAYOUT
/// Return a reference to the kernel's translation tables.
pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTable> {
&KERNEL_TABLES
}
/// The MMIO remap pages.
pub fn virt_mmio_remap_region() -> MemoryRegion<Virtual> {
let num_pages = size_to_num_pages(super::mmio_remap_size());
let start_page_addr = super::virt_mmio_remap_start();
let end_exclusive_page_addr = start_page_addr.checked_offset(num_pages as isize).unwrap();
MemoryRegion::new(start_page_addr, end_exclusive_page_addr)
}
/// Map the kernel binary.
///
/// # Safety
///
/// - Any miscalculation or attribute error will likely be fatal. Needs careful manual checking.
pub unsafe fn kernel_map_binary() -> Result<(), &'static str> {
generic_mmu::kernel_map_at(
"Kernel boot-core stack",
&virt_boot_core_stack_region(),
&kernel_virt_to_phys_region(virt_boot_core_stack_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
// TranslationDescriptor {
// name: "Boot code and data",
// virtual_range: boot_range_inclusive,
// physical_range_translation: Translation::Identity,
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::CacheableDRAM,
// acc_perms: AccessPermissions::ReadOnly,
// execute_never: false,
// },
// },
// TranslationDescriptor {
// name: "Kernel code and RO data",
// virtual_range: code_range_inclusive,
// physical_range_translation: Translation::Identity,
// attribute_fields: AttributeFields {
// mem_attributes: MemAttributes::CacheableDRAM,
// acc_perms: AccessPermissions::ReadOnly,
// execute_never: false,
// },
// },
generic_mmu::kernel_map_at(
"Kernel code and RO data",
&virt_code_region(),
&kernel_virt_to_phys_region(virt_code_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
)?;
generic_mmu::kernel_map_at(
"Kernel data and bss",
&virt_data_region(),
&kernel_virt_to_phys_region(virt_data_region()),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
Ok(())
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use {
super::*,
core::{cell::UnsafeCell, ops::Range},
};
/// Check alignment of the kernel's virtual memory layout sections.
#[test_case]
fn virt_mem_layout_sections_are_64KiB_aligned() {
for i in [
virt_boot_core_stack_region,
virt_code_region,
virt_data_region,
]
.iter()
{
let start = i().start_page_addr().into_inner();
let end_exclusive = i().end_exclusive_page_addr().into_inner();
assert!(start.is_page_aligned());
assert!(end_exclusive.is_page_aligned());
assert!(end_exclusive >= start);
}
}
/// Ensure the kernel's virtual memory layout is free of overlaps.
#[test_case]
fn virt_mem_layout_has_no_overlaps() {
let layout = [
virt_boot_core_stack_region(),
virt_code_region(),
virt_data_region(),
];
for (i, first_range) in layout.iter().enumerate() {
for second_range in layout.iter().skip(i + 1) {
assert!(!first_range.overlaps(second_range))
}
}
}
/// Check if KERNEL_TABLES is in .bss.
#[test_case]
fn kernel_tables_in_bss() {
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_exclusive: UnsafeCell<u64>;
}
let bss_range = unsafe {
Range {
start: __bss_start.get(),
end: __bss_end_exclusive.get(),
}
};
let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64;
assert!(bss_range.contains(&kernel_tables_addr));
}
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
// fn boot_range_inclusive() -> RangeInclusive<usize> {
// RangeInclusive::new(super::boot_start(), super::boot_end_exclusive() - 1)
// }
//
// fn code_range_inclusive() -> RangeInclusive<usize> {
// // Notice the subtraction to turn the exclusive end into an inclusive end.
// #[allow(clippy::range_minus_one)]
// RangeInclusive::new(super::code_start(), super::code_end_exclusive() - 1)
// }
//
// fn remapped_mmio_range_inclusive() -> RangeInclusive<usize> {
// // The last 64 KiB slot in the first 512 MiB
// RangeInclusive::new(0x1FFF_0000, 0x1FFF_FFFF)
// }
//
// fn mmio_range_inclusive() -> RangeInclusive<usize> {
// RangeInclusive::new(memory_map::mmio::MMIO_BASE, memory_map::mmio::MMIO_END)
// // RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::mmio::MMIO_END),
// }
//
// fn dma_range_inclusive() -> RangeInclusive<usize> {
// RangeInclusive::new(
// memory_map::virt::DMA_HEAP_START,
// memory_map::virt::DMA_HEAP_END,
// )
// }

View File

@ -1,11 +1,72 @@
use core::cell::UnsafeCell;
//! Platform memory Management.
//!
//! The physical memory layout.
//!
//! The Raspberry's firmware copies the kernel binary to 0x8_0000. The preceding region will be used
//! as the boot core's stack.
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | data_end_exclusive
//! | |
//!
//!
//!
//!
//!
//! The virtual memory layout is as follows:
//!
//! +---------------------------------------+
//! | | boot_core_stack_start @ 0x0
//! | | ^
//! | Boot-core Stack | | stack
//! | | | growth
//! | | | direction
//! +---------------------------------------+
//! | | code_start @ 0x8_0000 == boot_core_stack_end_exclusive
//! | .text |
//! | .rodata |
//! | .got |
//! | |
//! +---------------------------------------+
//! | | data_start == code_end_exclusive
//! | .data |
//! | .bss |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_start == data_end_exclusive
//! | VA region for MMIO remapping |
//! | |
//! +---------------------------------------+
//! | | mmio_remap_end_exclusive
//! | |
pub mod mmu;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
use {
crate::memory::{mmu::PageAddress, Address, Physical, Virtual},
core::cell::UnsafeCell,
};
// Symbols from the linker script.
extern "Rust" {
// Boot code.
@ -38,15 +99,32 @@ extern "Rust" {
static __RO_END: UnsafeCell<()>;
}
// Symbols from the linker script.
// extern "Rust" {
// static __code_start: UnsafeCell<()>; // __RO_START
// static __code_end_exclusive: UnsafeCell<()>; // __RO_END
//
// static __data_start: UnsafeCell<()>;
// static __data_end_exclusive: UnsafeCell<()>;
//
// static __mmio_remap_start: UnsafeCell<()>;
// static __mmio_remap_end_exclusive: UnsafeCell<()>;
//
// static __boot_core_stack_start: UnsafeCell<()>;
// static __boot_core_stack_end_exclusive: UnsafeCell<()>;
// }
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// System memory map.
/// The board's physical memory map.
/// This is a fixed memory map for Raspberry Pi,
/// @todo we need to infer the memory map from the provided DTB.
/// @todo we need to infer the memory map from the provided DTB instead.
#[rustfmt::skip]
pub mod map { // @todo only pub(super) for proper isolation!
pub(super) mod map {
use super::*;
/// Beginning of memory.
pub const START: usize = 0x0000_0000;
/// End of memory - 8Gb RPi4
@ -63,49 +141,75 @@ pub mod map { // @todo only pub(super) for proper isolation!
pub const UART_OFFSET: usize = 0x0020_1000;
pub const MINIUART_OFFSET: usize = 0x0021_5000;
/// Memory-mapped devices.
/// Physical devices.
#[cfg(feature = "rpi3")]
pub mod mmio {
use super::*;
/// Base address of MMIO register range.
pub const MMIO_BASE: usize = 0x3F00_0000;
/// Base address of ARM<->VC mailbox area.
pub const VIDEOCORE_MBOX_BASE: usize = MMIO_BASE + VIDEOCORE_MBOX_OFFSET;
/// Base address of GPIO registers.
pub const GPIO_BASE: usize = MMIO_BASE + GPIO_OFFSET;
/// Base address of regular UART.
pub const PL011_UART_BASE: usize = MMIO_BASE + UART_OFFSET;
/// Base address of MiniUART.
pub const MINI_UART_BASE: usize = MMIO_BASE + MINIUART_OFFSET;
/// Interrupt controller
pub const PERIPHERAL_IC_START: usize = MMIO_BASE + 0x0000_B200;
/// End of MMIO memory.
pub const MMIO_END: usize = super::END_INCLUSIVE;
pub const PERIPHERAL_IC_BASE: Address<Physical> = Address::new(MMIO_BASE + 0x0000_B200);
pub const PERIPHERAL_IC_SIZE: usize = 0x24;
/// Base address of ARM<->VC mailbox area.
pub const VIDEOCORE_MBOX_BASE: Address<Physical> = Address::new(MMIO_BASE + VIDEOCORE_MBOX_OFFSET);
/// Base address of GPIO registers.
pub const GPIO_BASE: Address<Physical> = Address::new(MMIO_BASE + GPIO_OFFSET);
pub const GPIO_SIZE: usize = 0xA0;
pub const PL011_UART_BASE: Address<Physical> = Address::new(MMIO_BASE + UART_OFFSET);
pub const PL011_UART_SIZE: usize = 0x48;
/// Base address of MiniUART.
pub const MINI_UART_BASE: Address<Physical> = Address::new(MMIO_BASE + MINIUART_OFFSET);
/// End of MMIO memory region.
pub const END: Address<Physical> = Address::new(0x4001_0000);
}
/// Memory-mapped devices.
/// Physical devices.
#[cfg(feature = "rpi4")]
pub mod mmio {
use super::*;
/// Base address of MMIO register range.
pub const MMIO_BASE: usize = 0xFE00_0000;
pub const MMIO_BASE: usize = 0xFE00_0000;
/// Base address of GPIO registers.
pub const GPIO_BASE: Address<Physical> = Address::new(MMIO_BASE + GPIO_OFFSET);
pub const GPIO_SIZE: usize = 0xA0;
/// Base address of regular UART.
pub const PL011_UART_BASE: Address<Physical> = Address::new(MMIO_BASE + UART_OFFSET);
pub const PL011_UART_SIZE: usize = 0x48;
/// Base address of MiniUART.
pub const MINI_UART_BASE: Address<Physical> = Address::new(MMIO_BASE + MINIUART_OFFSET);
/// Interrupt controller
pub const GICD_BASE: Address<Physical> = Address::new(0xFF84_1000);
pub const GICD_SIZE: usize = 0x824;
pub const GICC_BASE: Address<Physical> = Address::new(0xFF84_2000);
pub const GICC_SIZE: usize = 0x14;
/// Base address of ARM<->VC mailbox area.
pub const VIDEOCORE_MBOX_BASE: usize = MMIO_BASE + VIDEOCORE_MBOX_OFFSET;
/// Base address of GPIO registers.
pub const GPIO_BASE: usize = MMIO_BASE + GPIO_OFFSET;
/// Base address of regular UART.
pub const PL011_UART_BASE: usize = MMIO_BASE + UART_OFFSET;
/// Base address of MiniUART.
pub const MINI_UART_BASE: usize = MMIO_BASE + MINIUART_OFFSET;
/// Interrupt controller
pub const GICD_START: usize = 0xFF84_1000;
pub const GICC_START: usize = 0xFF84_2000;
/// End of MMIO memory.
pub const MMIO_END: usize = super::END_INCLUSIVE;
/// End of MMIO memory region.
pub const END: Address<Physical> = Address::new(0xFF85_0000);
}
/// End address of mapped memory.
pub const END: Address<Physical> = mmio::END;
//----
// Unused?
//----
/// Virtual (mapped) addresses.
pub mod virt {
/// Start (top) of kernel stack.
@ -153,11 +257,91 @@ fn code_start() -> usize {
unsafe { __RO_START.get() as usize }
}
/// Exclusive end page address of the code segment.
/// Start page address of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn code_end_exclusive() -> usize {
unsafe { __RO_END.get() as usize }
fn virt_code_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __RO_START.get() as usize })
}
/// Size of the code segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn code_size() -> usize {
unsafe { (__RO_END.get() as usize) - (__RO_START.get() as usize) }
}
/// Exclusive end page address of the code segment.
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
// #[inline(always)]
// fn code_end_exclusive() -> usize {
// unsafe { __RO_END.get() as usize }
// }
/// Start page address of the data segment.
#[inline(always)]
fn virt_data_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __data_start.get() as usize })
}
/// Size of the data segment.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn data_size() -> usize {
unsafe { (__data_end_exclusive.get() as usize) - (__data_start.get() as usize) }
}
/// Start page address of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_mmio_remap_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __mmio_remap_start.get() as usize })
}
/// Size of the MMIO remap reservation.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn mmio_remap_size() -> usize {
unsafe { (__mmio_remap_end_exclusive.get() as usize) - (__mmio_remap_start.get() as usize) }
}
/// Start page address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> PageAddress<Virtual> {
PageAddress::from(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
#[inline(always)]
fn boot_core_stack_size() -> usize {
unsafe {
(__boot_core_stack_end_exclusive.get() as usize) - (__boot_core_stack_start.get() as usize)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Exclusive end address of the physical address space.
#[inline(always)]
pub fn phys_addr_space_end_exclusive_addr() -> PageAddress<Physical> {
PageAddress::from(map::END)
}

View File

@ -11,7 +11,10 @@ use {
mailbox::{channel, Mailbox, MailboxOps},
BcmHost,
},
crate::platform::device_driver::common::MMIODerefWrapper,
crate::{
memory::{Address, Virtual},
platform::device_driver::common::MMIODerefWrapper,
},
snafu::Snafu,
tock_registers::{
interfaces::{Readable, Writeable},
@ -74,9 +77,9 @@ impl Power {
/// # Safety
///
/// Unsafe, duh!
pub const unsafe fn new(base_addr: usize) -> Power {
pub const unsafe fn new(mmio_base_addr: Address<Virtual>) -> Power {
Power {
registers: Registers::new(base_addr),
registers: Registers::new(mmio_base_addr),
}
}

View File

@ -36,17 +36,7 @@ use machine::devices::serial::SerialOps;
use {
cfg_if::cfg_if,
core::{cell::UnsafeCell, time::Duration},
machine::{
arch,
console::console,
entry, exception, info, memory,
platform::raspberrypi::{
display::{Color, DrawError},
mailbox::{channel, Mailbox, MailboxOps},
vc::VC,
},
println, time, warn,
},
machine::{arch, console::console, entry, exception, info, memory, println, time, warn},
};
entry!(kernel_init);
@ -65,17 +55,19 @@ pub unsafe fn kernel_init() -> ! {
#[cfg(feature = "jtag")]
machine::debug::jtag::wait_debugger();
// init_exception_traps(); // @todo
//
// init_mmu(); // @todo
exception::handling_init();
use machine::memory::mmu::interface::MMU;
let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() {
Err(string) => panic!("Error mapping kernel binary: {}", string),
Ok(addr) => addr,
};
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) {
panic!("Enabling MMU failed: {}", e);
}
memory::mmu::post_enable_init();
if let Err(x) = machine::platform::drivers::init() {
panic!("Error initializing platform drivers: {}", x);
}
@ -95,10 +87,8 @@ pub unsafe fn kernel_init() -> ! {
/// Safe kernel code.
// #[inline]
#[cfg(not(test))]
pub fn kernel_main() -> ! {
#[cfg(test)]
test_main();
// info!("{}", libkernel::version());
// info!("Booting on: {}", bsp::board_name());
@ -109,8 +99,8 @@ pub fn kernel_main() -> ! {
);
info!("Booting on: {}", machine::platform::BcmHost::board_name());
info!("MMU online. Special regions:");
machine::platform::memory::mmu::virt_mem_layout().print_layout();
// info!("MMU online. Special regions:");
// machine::platform::memory::mmu::virt_mem_layout().print_layout();
let (_, privilege_level) = exception::current_privilege_level();
info!("Current privilege level: {}", privilege_level);
@ -148,8 +138,8 @@ fn panicked(info: &PanicInfo) -> ! {
}
fn print_mmu_state_and_features() {
use machine::memory::mmu::interface::MMU;
memory::mmu::mmu().print_features();
// use machine::memory::mmu::interface::MMU;
// memory::mmu::mmu().print_features();
}
//------------------------------------------------------------
@ -162,11 +152,11 @@ fn command_prompt() {
match machine::console::command_prompt(&mut buf) {
// b"mmu" => init_mmu(),
b"feats" => print_mmu_state_and_features(),
b"disp" => check_display_init(),
// b"disp" => check_display_init(),
b"trap" => check_data_abort_trap(),
b"map" => machine::platform::memory::mmu::virt_mem_layout().print_layout(),
b"led on" => set_led(true),
b"led off" => set_led(false),
// b"map" => machine::platform::memory::mmu::virt_mem_layout().print_layout(),
// b"led on" => set_led(true),
// b"led off" => set_led(false),
b"help" => print_help(),
b"end" => break 'cmd_loop,
x => warn!("[!] Unknown command {:?}, try 'help'", x),
@ -180,26 +170,26 @@ fn print_help() {
println!(" feats - print MMU state and supported features");
#[cfg(not(feature = "noserial"))]
println!(" uart - try to reinitialize UART serial");
println!(" disp - try to init VC framebuffer and draw some text");
// println!(" disp - try to init VC framebuffer and draw some text");
println!(" trap - trigger and recover from a data abort exception");
println!(" map - show kernel memory layout");
println!(" led [on|off] - change RPi LED status");
// println!(" led [on|off] - change RPi LED status");
println!(" end - leave console and reset board");
}
fn set_led(enable: bool) {
let mut mbox = Mailbox::<8>::default();
let index = mbox.request();
let index = mbox.set_led_on(index, enable);
let mbox = mbox.end(index);
mbox.call(channel::PropertyTagsArmToVc)
.map_err(|e| {
warn!("Mailbox call returned error {}", e);
warn!("Mailbox contents: {:?}", mbox);
})
.ok();
}
// fn set_led(enable: bool) {
// let mut mbox = Mailbox::<8>::default();
// let index = mbox.request();
// let index = mbox.set_led_on(index, enable);
// let mbox = mbox.end(index);
//
// mbox.call(channel::PropertyTagsArmToVc)
// .map_err(|e| {
// warn!("Mailbox call returned error {}", e);
// warn!("Mailbox contents: {:?}", mbox);
// })
// .ok();
// }
fn reboot() -> ! {
cfg_if! {
@ -207,47 +197,48 @@ fn reboot() -> ! {
info!("Bye, shutting down QEMU");
machine::qemu::semihosting::exit_success()
} else {
use machine::platform::raspberrypi::power::Power;
// use machine::platform::raspberrypi::power::Power;
info!("Bye, going to reset now");
Power::default().reset()
// Power::default().reset()
machine::cpu::endless_sleep()
}
}
}
fn check_display_init() {
display_graphics()
.map_err(|e| {
warn!("Error in display: {}", e);
})
.ok();
}
fn display_graphics() -> Result<(), DrawError> {
if let Ok(mut display) = VC::init_fb(800, 600, 32) {
info!("Display created");
display.clear(Color::black());
info!("Display cleared");
display.rect(10, 10, 250, 250, Color::rgb(32, 96, 64));
display.draw_text(50, 50, "Hello there!", Color::rgb(128, 192, 255))?;
let mut buf = [0u8; 64];
let s = machine::write_to::show(&mut buf, format_args!("Display width {}", display.width));
if s.is_err() {
display.draw_text(50, 150, "Error displaying", Color::red())?
} else {
display.draw_text(50, 150, s.unwrap(), Color::white())?
}
display.draw_text(150, 50, "RED", Color::red())?;
display.draw_text(160, 60, "GREEN", Color::green())?;
display.draw_text(170, 70, "BLUE", Color::blue())?;
}
Ok(())
}
// fn check_display_init() {
// display_graphics()
// .map_err(|e| {
// warn!("Error in display: {}", e);
// })
// .ok();
// }
//
// fn display_graphics() -> Result<(), DrawError> {
// if let Ok(mut display) = VC::init_fb(800, 600, 32) {
// info!("Display created");
//
// display.clear(Color::black());
// info!("Display cleared");
//
// display.rect(10, 10, 250, 250, Color::rgb(32, 96, 64));
// display.draw_text(50, 50, "Hello there!", Color::rgb(128, 192, 255))?;
//
// let mut buf = [0u8; 64];
// let s = machine::write_to::show(&mut buf, format_args!("Display width {}", display.width));
//
// if s.is_err() {
// display.draw_text(50, 150, "Error displaying", Color::red())?
// } else {
// display.draw_text(50, 150, s.unwrap(), Color::white())?
// }
//
// display.draw_text(150, 50, "RED", Color::red())?;
// display.draw_text(160, 60, "GREEN", Color::green())?;
// display.draw_text(170, 70, "BLUE", Color::blue())?;
// }
// Ok(())
// }
fn check_data_abort_trap() {
// Cause an exception by accessing a virtual address for which no
@ -261,6 +252,11 @@ fn check_data_abort_trap() {
info!("[i] Whoa! We recovered from an exception.");
}
#[cfg(test)]
pub fn kernel_main() -> ! {
test_main()
}
#[cfg(test)]
mod main_tests {
use {super::*, core::panic::PanicInfo};