Add MMU init code

Switch to cortex-a git version temporarily
(waiting for PR to be merged) with support
for all necessary registers.
This commit is contained in:
Berkus Decker 2020-10-12 22:08:21 +03:00
parent f85020ef4e
commit ded53c16a7
11 changed files with 1648 additions and 15 deletions

34
Cargo.lock generated
View File

@ -1,10 +1,21 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "cortex-a"
version = "3.0.4"
name = "bit_field"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6922a40af4d1a2deac8c963b9f3e57311b8912490740234f1ad182425c547f80"
checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "cortex-a"
version = "3.0.5"
source = "git+https://github.com/berkus/cortex-a?branch=feature/add-registers#70e80eabbec6d8e20c19928c3b25d49b70d8327d"
dependencies = [
"register",
]
@ -42,12 +53,29 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70323afdb8082186c0986da0e10f6e4ed103d681c921c00597e98d9806dac20f"
[[package]]
name = "usize_conversions"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f70329e2cbe45d6c97a5112daad40c34cd9a4e18edb5a2a18fefeb584d8d25e5"
[[package]]
name = "ux"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88dfeb711b61ce620c0cb6fd9f8e3e678622f0c971da2a63c4b3e25e88ed012f"
[[package]]
name = "vesper"
version = "0.0.1"
dependencies = [
"bit_field",
"bitflags",
"cortex-a",
"qemu-exit",
"r0",
"register",
"rlibc",
"usize_conversions",
"ux",
]

View File

@ -6,14 +6,14 @@
ENTRY(_boot_cores);
/* Symbols between __boot_start and __boot_end should be dropped after init is complete.
Symbols between __ro_start and __ro_end are the kernel code.
/* Symbols between __BOOT_START and __BOOT_END should be dropped after init is complete.
Symbols between __RO_START and __RO_END are the kernel code.
Symbols between __BSS_START and __BSS_END must be initialized to zero by r0 code in kernel.
*/
SECTIONS
{
. = 0x80000; /* AArch64 boot address is 0x80000, 4K-aligned */
__boot_start = .;
__BOOT_START = .;
.text :
{
KEEP(*(.text.boot.entry)) // Entry point must go first
@ -21,8 +21,8 @@ SECTIONS
. = ALIGN(4096);
*(.data.boot)
. = ALIGN(4096); /* Here boot code ends */
__boot_end = .; // __boot_end must be 4KiB aligned
__ro_start = .;
__BOOT_END = .; // __BOOT_END must be 4KiB aligned
__RO_START = .;
*(.text .text.*)
}
@ -37,10 +37,10 @@ SECTIONS
FILL(0x00)
}
. = ALIGN(4096); /* Fill up to 4KiB */
__ro_end = .; /* __ro_end must be 4KiB aligned */
__data_start = .; /* __data_start must be 4KiB aligned */
__RO_END = .; /* __RO_END must be 4KiB aligned */
__DATA_START = .; /* __DATA_START must be 4KiB aligned */
.data : /* @todo align data to 4K -- it's already aligned up to __ro_end marker now */
.data : /* @todo align data to 4K -- it's already aligned up to __RO_END marker now */
{
*(.data .data.*)
FILL(0x00)

View File

@ -23,4 +23,9 @@ qemu = ["qemu-exit"]
r0 = "1.0"
rlibc = "1.0"
qemu-exit = { version = "1.0", optional = true }
cortex-a = "3.0"
cortex-a = { git = "https://github.com/berkus/cortex-a", branch = "feature/add-registers" }
ux = { version = "0.1.3", default-features = false }
usize_conversions = "0.2.0"
bit_field = "0.10.0"
register = "0.5.1"
bitflags = "1.2.1"

View File

@ -185,9 +185,9 @@ pub unsafe extern "C" fn _boot_cores() -> ! {
const CORE_MASK: u64 = 0x3;
// Can't match values with dots in match, so use intermediate consts.
#[cfg(qemu)]
const EL3: u32 = CurrentEL::EL::EL3.value;
const EL2: u32 = CurrentEL::EL::EL2.value;
const EL1: u32 = CurrentEL::EL::EL1.value;
const EL3: u64 = CurrentEL::EL::EL3.value;
const EL2: u64 = CurrentEL::EL::EL2.value;
const EL1: u64 = CurrentEL::EL::EL1.value;
if CORE_0 == MPIDR_EL1.get() & CORE_MASK {
match CurrentEL.get() {

View File

@ -0,0 +1,7 @@
# Memory Configuration
The types VirtAddr and PhysAddr are representing the addresses before and after the mapping in the MMU.
Page table types must represent pages of differing sizes.
For every entry in the MMU page table we should be able to receive a proper page type - e.g. Invalid, further page table, or
a specific-size page.

View File

@ -0,0 +1,507 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
use {
super::{align_down, align_up},
bit_field::BitField,
core::{
convert::{From, Into, TryInto},
fmt,
ops::{Add, AddAssign, Rem, RemAssign, Sub, SubAssign},
},
usize_conversions::{usize_from, FromUsize},
ux::*,
};
/// A canonical 64-bit virtual memory address.
///
/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
/// on non 64-bit systems. The `UsizeConversions` trait can be used for performing conversions
/// between `u64` and `usize`.
///
/// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need
/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterium
/// are called “canonical”. This type guarantees that it always represents a canonical address.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
#[repr(transparent)]
pub struct VirtAddr(u64);
/// A passed `u64` was not a valid virtual address.
///
/// This means that bits 48 to 64 are not
/// a valid sign extension and are not null either. So automatic sign extension would have
/// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid
/// address calculation.
#[derive(Debug)]
pub struct VirtAddrNotValid(u64);
impl VirtAddr {
/// Creates a new canonical virtual address.
///
/// This function performs sign extension of bit 47 to make the address canonical. Panics
/// if the bits in the range 48 to 64 contain data (i.e. are not null and not a sign extension).
///
/// @todo Support ASID byte in top bits of the address.
pub fn new(addr: u64) -> VirtAddr {
Self::try_new(addr).expect(
"address passed to VirtAddr::new must not contain any data \
in bits 48 to 64",
)
}
/// Tries to create a new canonical virtual address.
///
/// This function tries to performs sign extension of bit 47 to make the address canonical.
/// It succeeds if bits 48 to 64 are either a correct sign extension (i.e. copies of bit 47)
/// or all null. Else, an error is returned.
pub fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
match addr.get_bits(47..64) {
0 | 0x1ffff => Ok(VirtAddr(addr)), // address is canonical
1 => Ok(VirtAddr::new_unchecked(addr)), // address needs sign extension
_ => Err(VirtAddrNotValid(addr)),
}
}
/// Creates a new canonical virtual address without checks.
///
/// This function performs sign extension of bit 47 to make the address canonical, so
/// bits 48 to 64 are overwritten. If you want to check that these bits contain no data,
/// use `new` or `try_new`.
pub fn new_unchecked(mut addr: u64) -> VirtAddr {
if addr.get_bit(47) {
addr.set_bits(48..64, 0xffff);
} else {
addr.set_bits(48..64, 0);
}
VirtAddr(addr)
}
/// Creates a virtual address that points to `0`.
pub const fn zero() -> VirtAddr {
VirtAddr(0)
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Creates a virtual address from the given pointer
pub fn from_ptr<T>(ptr: *const T) -> Self {
Self::new(u64::from_usize(ptr as usize))
}
/// Converts the address to a raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_ptr<T>(self) -> *const T {
usize_from(self.as_u64()) as *const T
}
/// Converts the address to a mutable raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_mut_ptr<T>(self) -> *mut T {
self.as_ptr::<T>() as *mut T
}
/// Aligns the virtual address upwards to the given alignment.
///
/// See the `align_up` free function for more information.
pub fn aligned_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_up(self.0, align.into()))
}
/// Aligns the virtual address downwards to the given alignment.
///
/// See the `align_down` free function for more information.
pub fn aligned_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_down(self.0, align.into()))
}
/// Checks whether the virtual address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.aligned_down(align) == self
}
/// Returns the 12-bit page offset of this virtual address.
pub fn page_offset(&self) -> u12 {
u12::new((self.0 & 0xfff).try_into().unwrap())
}
// ^ @todo this only works for 4KiB pages
/// Returns the 9-bit level 3 page table index.
pub fn l3_index(&self) -> u9 {
u9::new(((self.0 >> 12) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 2 page table index.
pub fn l2_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 1 page table index.
pub fn l1_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 0 page table index.
pub fn l0_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9 >> 9) & 0o777).try_into().unwrap())
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VirtAddr({:#x})", self.0)
}
}
impl fmt::Binary for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl From<u64> for VirtAddr {
fn from(value: u64) -> Self {
VirtAddr::new(value)
}
}
impl From<VirtAddr> for u64 {
fn from(value: VirtAddr) -> Self {
value.as_u64()
}
}
impl Add<u64> for VirtAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for VirtAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for VirtAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for VirtAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
impl Rem<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = u64;
fn rem(self, rhs: usize) -> Self::Output {
self.0 % u64::from_usize(rhs)
}
}
impl RemAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn rem_assign(&mut self, rhs: usize) {
*self = VirtAddr::new(self.0 % u64::from_usize(rhs));
}
}
/// A 64-bit physical memory address.
///
/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
/// on non 64-bit systems. The `UsizeConversions` trait can be used for performing conversions
/// between `u64` and `usize`.
///
/// On `aarch64`, only the 52 lower bits of a physical address can be used. The top 12 bits need
/// to be zero. This type guarantees that it always represents a valid physical address.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
#[repr(transparent)]
pub struct PhysAddr(u64);
/// A passed `u64` was not a valid physical address.
///
/// This means that bits 52 to 64 were not all null.
#[derive(Debug)]
pub struct PhysAddrNotValid(u64);
impl PhysAddr {
/// Creates a new physical address.
///
/// Panics if any bits in the bit position 52 to 64 is set.
pub fn new(addr: u64) -> PhysAddr {
assert_eq!(
addr.get_bits(52..64),
0,
"physical addresses must not have any set bits in positions 52 to 64"
);
PhysAddr(addr)
}
/// Tries to create a new physical address.
///
/// Fails if any bits in the bit positions 52 to 64 are set.
pub fn try_new(addr: u64) -> Result<PhysAddr, PhysAddrNotValid> {
match addr.get_bits(52..64) {
0 => Ok(PhysAddr(addr)), // address is valid
_ => Err(PhysAddrNotValid(addr)),
}
}
/// Creates a physical address that points to `0`.
pub const fn zero() -> PhysAddr {
PhysAddr(0)
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Convenience method for checking if a physical address is null.
pub fn is_null(&self) -> bool {
self.0 == 0
}
/// Aligns the physical address upwards to the given alignment.
///
/// See the `align_up` function for more information.
pub fn aligned_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_up(self.0, align.into()))
}
/// Aligns the physical address downwards to the given alignment.
///
/// See the `align_down` function for more information.
pub fn aligned_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_down(self.0, align.into()))
}
/// Checks whether the physical address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.aligned_down(align) == self
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PhysAddr({:#x})", self.0)
}
}
impl fmt::Binary for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl From<u64> for PhysAddr {
fn from(value: u64) -> Self {
PhysAddr::new(value)
}
}
impl From<PhysAddr> for u64 {
fn from(value: PhysAddr) -> Self {
value.as_u64()
}
}
impl Add<u64> for PhysAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for PhysAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for PhysAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for PhysAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
#[cfg(test)]
mod tests {
// use super::*;
// #[test_case]
// pub fn test_invalid_phys_addr() {
// let result = PhysAddr::try_new(0xfafa_0123_0123_0123_3210_3210_3210_3210);
// if let Err(e) = result {
// assert_eq!(
// e,
// PhysAddrNotValid(0xfafa_0123_0123_0123_3210_3210_3210_3210)
// );
// } else {
// assert!(false)
// }
// }
}

View File

@ -0,0 +1,707 @@
/*
* SPDX-License-Identifier: MIT OR BlueOak-1.0.0
* Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
* Copyright (c) 2019 Berkus Decker <berkus+github@metta.systems>
* Original code distributed under MIT, additional changes are under BlueOak-1.0.0
*/
//! MMU initialisation.
//!
//! Paging is mostly based on [previous version](https://os.phil-opp.com/page-tables/) of
//! Phil Opp's [paging guide](https://os.phil-opp.com/paging-implementation/) and
//! [ARMv8 ARM memory addressing](https://static.docs.arm.com/100940/0100/armv8_a_address%20translation_100940_0100_en.pdf).
use {
crate::{
arch::aarch64::memory::{
get_virt_addr_properties, AttributeFields, /*FrameAllocator, PhysAddr, VirtAddr,*/
},
println,
},
// bitflags::bitflags,
core::{
// convert::TryInto,
// fmt,
marker::PhantomData,
ops::{Index, IndexMut},
// ptr::Unique,
},
cortex_a::{
barrier,
regs::{ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
},
register::{
cpu::{RegisterReadOnly, RegisterReadWrite},
register_bitfields,
},
// ux::*,
};
mod mair {
use cortex_a::regs::MAIR_EL1;
/// Setup function for the MAIR_EL1 register.
pub fn set_up() {
use cortex_a::regs::RegisterReadWrite;
// Define the three memory types that we will map. Normal DRAM, Uncached and device.
MAIR_EL1.write(
// Attribute 2 -- Device Memory
MAIR_EL1::Attr2_Device::nonGathering_nonReordering_EarlyWriteAck
// Attribute 1 -- Non Cacheable DRAM
+ MAIR_EL1::Attr1_Normal_Outer::NonCacheable
+ MAIR_EL1::Attr1_Normal_Inner::NonCacheable
// Attribute 0 -- Regular Cacheable
+ MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc
+ MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc,
);
}
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
pub mod attr {
pub const NORMAL: u64 = 0;
pub const NORMAL_NON_CACHEABLE: u64 = 1;
pub const DEVICE_NGNRE: u64 = 2;
// DEVICE_GRE
// DEVICE_NGNRNE
}
}
/// Parse the ID_AA64MMFR0_EL1 register for runtime information about supported MMU features.
/// Print the current state of TCR register.
pub fn print_features() {
// use crate::cortex_a::regs::RegisterReadWrite;
let sctlr = SCTLR_EL1.extract();
if let Some(SCTLR_EL1::M::Value::Enable) = sctlr.read_as_enum(SCTLR_EL1::M) {
println!("[i] MMU currently enabled");
}
if let Some(SCTLR_EL1::I::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::I) {
println!("[i] MMU I-cache enabled");
}
if let Some(SCTLR_EL1::C::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::C) {
println!("[i] MMU D-cache enabled");
}
let mmfr = ID_AA64MMFR0_EL1.extract();
if let Some(ID_AA64MMFR0_EL1::TGran4::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran4)
{
println!("[i] MMU: 4 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran16::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran16)
{
println!("[i] MMU: 16 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran64::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran64)
{
println!("[i] MMU: 64 KiB granule supported!");
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::ASIDBits) {
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_16) => {
println!("[i] MMU: 16 bit ASIDs supported!")
}
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_8) => {
println!("[i] MMU: 8 bit ASIDs supported!")
}
_ => println!("[i] MMU: Invalid ASID bits specified!"),
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::PARange) {
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_32) => {
println!("[i] MMU: Up to 32 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_36) => {
println!("[i] MMU: Up to 36 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_40) => {
println!("[i] MMU: Up to 40 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_42) => {
println!("[i] MMU: Up to 42 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => {
println!("[i] MMU: Up to 44 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => {
println!("[i] MMU: Up to 48 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => {
println!("[i] MMU: Up to 52 Bit physical address range supported!")
}
_ => println!("[i] MMU: Invalid PARange specified!"),
}
let tcr = TCR_EL1.extract();
match tcr.read_as_enum(TCR_EL1::IPS) {
Some(TCR_EL1::IPS::Value::Bits_32) => {
println!("[i] MMU: 32 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_36) => {
println!("[i] MMU: 36 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_40) => {
println!("[i] MMU: 40 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_42) => {
println!("[i] MMU: 42 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_44) => {
println!("[i] MMU: 44 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_48) => {
println!("[i] MMU: 48 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_52) => {
println!("[i] MMU: 52 Bit intermediate physical address size supported!")
}
_ => println!("[i] MMU: Invalid IPS specified!"),
}
match tcr.read_as_enum(TCR_EL1::TG0) {
Some(TCR_EL1::TG0::Value::KiB_4) => println!("[i] MMU: TTBR0 4 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_16) => println!("[i] MMU: TTBR0 16 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_64) => println!("[i] MMU: TTBR0 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR0 granule size specified!"),
}
let t0sz = tcr.read(TCR_EL1::T0SZ);
println!("[i] MMU: T0sz = 64-{}={} bits", t0sz, 64 - t0sz);
match tcr.read_as_enum(TCR_EL1::TG1) {
Some(TCR_EL1::TG1::Value::KiB_4) => println!("[i] MMU: TTBR1 4 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_16) => println!("[i] MMU: TTBR1 16 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_64) => println!("[i] MMU: TTBR1 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR1 granule size specified!"),
}
let t1sz = tcr.read(TCR_EL1::T1SZ);
println!("[i] MMU: T1sz = 64-{}={} bits", t1sz, 64 - t1sz);
}
register_bitfields! {u64,
// AArch64 Reference Manual page 2150, D5-2445
STAGE1_DESCRIPTOR [
// In table descriptors
NSTable_EL3 OFFSET(63) NUMBITS(1) [],
/// Access Permissions for subsequent tables
APTable OFFSET(61) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
// User execute-never for subsequent tables
UXNTable OFFSET(60) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Privileged execute-never for subsequent tables
PXNTable OFFSET(59) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
// In block descriptors
// OS-specific data
OSData OFFSET(55) NUMBITS(4) [],
// User execute-never
UXN OFFSET(54) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
/// Privileged execute-never
PXN OFFSET(53) NUMBITS(1) [
Execute = 0,
NeverExecute = 1
],
// @fixme ?? where is this described
CONTIGUOUS OFFSET(52) NUMBITS(1) [
False = 0,
True = 1
],
// @fixme ?? where is this described
DIRTY OFFSET(51) NUMBITS(1) [
False = 0,
True = 1
],
/// Various address fields, depending on use case
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
NEXT_LVL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12]
// @fixme ?? where is this described
NON_GLOBAL OFFSET(11) NUMBITS(1) [
False = 0,
True = 1
],
/// Access flag
AF OFFSET(10) NUMBITS(1) [
False = 0,
True = 1
],
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
NS_EL3 OFFSET(5) NUMBITS(1) [],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
/// A function that maps the generic memory range attributes to HW-specific
/// attributes of the MMU.
fn into_mmu_attributes(
attribute_fields: AttributeFields,
) -> register::FieldValue<u64, STAGE1_DESCRIPTOR::Register> {
use super::{AccessPermissions, MemAttributes};
// Memory attributes
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
}
MemAttributes::NonCacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
}
MemAttributes::Device => {
STAGE1_DESCRIPTOR::SH::OuterShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
}
};
// Access Permissions
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_DESCRIPTOR::AP::RW_EL1,
};
// Execute Never
desc += if attribute_fields.execute_never {
STAGE1_DESCRIPTOR::PXN::NeverExecute
} else {
STAGE1_DESCRIPTOR::PXN::Execute
};
desc
}
/*
* With 4k page granule, a virtual address is split into 4 lookup parts
* spanning 9 bits each:
*
* _______________________________________________
* | | | | | | |
* | signx | Lv0 | Lv1 | Lv2 | Lv3 | off |
* |_______|_______|_______|_______|_______|_______|
* 63-48 47-39 38-30 29-21 20-12 11-00
*
* mask page size
*
* Lv0: FF8000000000 --
* Lv1: 7FC0000000 1G
* Lv2: 3FE00000 2M
* Lv3: 1FF000 4K
* off: FFF
*
* RPi3 supports 64K and 4K granules, also 40-bit physical addresses.
* It also can address only 1G physical memory, so these 40-bit phys addresses are a fake.
*
* 48-bit virtual address space; different mappings in VBAR0 (EL0) and VBAR1 (EL1+).
*/
pub const NUM_ENTRIES_4KIB: u64 = 512;
/// Trait for abstracting over the possible page sizes, 4KiB, 16KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: u64;
/// A string representation of the page size for debug output.
const SIZE_AS_DEBUG_STR: &'static str;
/// The page shift in bits.
const SHIFT: usize;
/// The page mask in bits.
const MASK: u64;
}
/// This trait is implemented for 4KiB, 16KiB, and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {} // @todo doesn't have to be pub??
/// A standard 4KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
const SHIFT: usize = 12;
const MASK: u64 = 0xfff;
}
impl NotGiantPageSize for Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * NUM_ENTRIES_4KIB;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
const SHIFT: usize = 21;
const MASK: u64 = 0x1fffff;
}
impl NotGiantPageSize for Size2MiB {}
type EntryFlags = register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>;
// type EntryRegister = register::LocalRegisterCopy<u64, STAGE1_DESCRIPTOR::Register>;
/// L0 table -- only pointers to L1 tables
pub enum PageGlobalDirectory {}
/// L1 tables -- pointers to L2 tables or giant 1GiB pages
pub enum PageUpperDirectory {}
/// L2 tables -- pointers to L3 tables or huge 2MiB pages
pub enum PageDirectory {}
/// L3 tables -- only pointers to 4/16KiB pages
pub enum PageTable {}
pub trait TableLevel {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl TableLevel for PageGlobalDirectory {}
impl TableLevel for PageUpperDirectory {}
impl TableLevel for PageDirectory {}
impl TableLevel for PageTable {}
impl HierarchicalLevel for PageGlobalDirectory {
type NextLevel = PageUpperDirectory;
}
impl HierarchicalLevel for PageUpperDirectory {
type NextLevel = PageDirectory;
}
impl HierarchicalLevel for PageDirectory {
type NextLevel = PageTable;
}
// PageTables do not have next level, therefore they are not HierarchicalLevel
// Contains just u64 internally, provides enum interface on top
#[repr(C)]
#[repr(align(4096))]
pub struct Table<L: TableLevel> {
entries: [u64; NUM_ENTRIES_4KIB as usize],
level: PhantomData<L>,
}
// Implementation code shared for all levels of page tables
impl<L> Table<L>
where
L: TableLevel,
{
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
*entry = 0;
}
}
}
impl<L> Index<usize> for Table<L>
where
L: TableLevel,
{
type Output = u64;
fn index(&self, index: usize) -> &u64 {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L>
where
L: TableLevel,
{
fn index_mut(&mut self, index: usize) -> &mut u64 {
&mut self.entries[index]
}
}
/// Type-safe enum wrapper covering Table<L>'s 64-bit entries.
#[derive(Clone)]
// #[repr(transparent)]
enum PageTableEntry {
/// Empty page table entry.
Invalid,
/// Table descriptor is a L0, L1 or L2 table pointing to another table.
/// L0 tables can only point to L1 tables.
/// A descriptor pointing to the next page table.
TableDescriptor(EntryFlags),
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
Lvl2BlockDescriptor(EntryFlags),
/// A page PageTableEntry::descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
PageDescriptor(EntryFlags),
}
/// A descriptor pointing to the next page table. (within PageTableEntry enum)
// struct TableDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_table_descriptor(next_lvl_table_addr: usize) -> Result<PageTableEntry, &'static str> {
if next_lvl_table_addr % Size4KiB::SIZE as usize != 0 {
// @todo SIZE must be usize
return Err("TableDescriptor: Address is not 4 KiB aligned.");
}
let shifted = next_lvl_table_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::TableDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
// struct Lvl2BlockDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_lvl2_block_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, &'static str> {
if output_addr % Size2MiB::SIZE as usize != 0 {
return Err("BlockDescriptor: Address is not 2 MiB aligned.");
}
let shifted = output_addr >> Size2MiB::SHIFT;
Ok(PageTableEntry::Lvl2BlockDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::True
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(shifted as u64),
))
}
}
/// A page descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
impl PageTableEntry {
fn new_page_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, &'static str> {
if output_addr % Size4KiB::SIZE as usize != 0 {
return Err("PageDescriptor: Address is not 4 KiB aligned.");
}
let shifted = output_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::PageDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::True
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
impl From<u64> for PageTableEntry {
fn from(_val: u64) -> PageTableEntry {
// xxx0 -> Invalid
// xx11 -> TableDescriptor on L0, L1 and L2
// xx10 -> Block Entry L1 and L2
// xx11 -> PageDescriptor L3
PageTableEntry::Invalid
}
}
impl From<PageTableEntry> for u64 {
fn from(val: PageTableEntry) -> u64 {
match val {
PageTableEntry::Invalid => 0,
PageTableEntry::TableDescriptor(x)
| PageTableEntry::Lvl2BlockDescriptor(x)
| PageTableEntry::PageDescriptor(x) => x.value,
}
}
}
static mut LVL2_TABLE: Table<PageDirectory> = Table::<PageDirectory> {
entries: [0; NUM_ENTRIES_4KIB as usize],
level: PhantomData,
};
static mut LVL3_TABLE: Table<PageTable> = Table::<PageTable> {
entries: [0; NUM_ENTRIES_4KIB as usize],
level: PhantomData,
};
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
impl BaseAddr for [u64; 512] {
fn base_addr_u64(&self) -> u64 {
self as *const u64 as u64
}
fn base_addr_usize(&self) -> usize {
self as *const u64 as usize
}
}
/// Set up identity mapped page tables for the first 1 gigabyte of address space.
/// default: 880 MB ARM ram, 128MB VC
pub unsafe fn init() -> Result<(), &'static str> {
// Prepare the memory attribute indirection register.
mair::set_up();
// Point the first 2 MiB of virtual addresses to the follow-up LVL3
// page-table.
LVL2_TABLE.entries[0] =
PageTableEntry::new_table_descriptor(LVL3_TABLE.entries.base_addr_usize())?.into();
// Fill the rest of the LVL2 (2 MiB) entries as block descriptors.
//
// Notice the skip(1) which makes the iteration start at the second 2 MiB
// block (0x20_0000).
for (block_descriptor_nr, entry) in LVL2_TABLE.entries.iter_mut().enumerate().skip(1) {
let virt_addr = block_descriptor_nr << Size2MiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let block_desc =
match PageTableEntry::new_lvl2_block_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = block_desc.into();
}
// Finally, fill the single LVL3 table (4 KiB granule).
for (page_descriptor_nr, entry) in LVL3_TABLE.entries.iter_mut().enumerate() {
let virt_addr = page_descriptor_nr << Size4KiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let page_desc = match PageTableEntry::new_page_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = page_desc.into();
}
// Point to the LVL2 table base address in TTBR0.
TTBR0_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // User (lo-)space addresses
// TTBR1_EL1.set_baddr(LVL2_TABLE.entries.base_addr_u64()); // Kernel (hi-)space addresses
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI0::Ignored // @todo TBI1 also set to Ignored??
+ TCR_EL1::IPS.val(ips) // Intermediate Physical Address Size
// ttbr0 user memory addresses
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(34) // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
// ttbr1 kernel memory addresses
+ TCR_EL1::TG1::KiB_4 // 4 KiB granule
+ TCR_EL1::SH1::Inner
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD1::EnableTTBR1Walks
+ TCR_EL1::T1SZ.val(34), // ARMv8ARM Table D5-11 minimum TxSZ for starting table level 2
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// use cortex_a::regs::RegisterReadWrite;
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
barrier::isb(barrier::SY);
Ok(())
}

View File

@ -0,0 +1,362 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
use {
crate::println,
core::{fmt, ops::RangeInclusive},
};
mod addr;
pub mod mmu;
pub use addr::PhysAddr;
pub use addr::VirtAddr;
// aarch64 granules and page sizes howto:
// https://stackoverflow.com/questions/34269185/simultaneous-existence-of-different-sized-pages-on-aarch64
/// Default page size used by the kernel.
pub const PAGE_SIZE: usize = 4096;
/// System memory map.
/// This is a fixed memory map for RasPi3,
/// @todo we need to infer the memory map from the provided DTB.
#[rustfmt::skip]
pub mod map {
pub const START: usize = 0x0000_0000;
pub const END: usize = 0x3FFF_FFFF;
pub mod phys {
pub const VIDEOMEM_BASE: usize = 0x3e00_0000;
pub const MMIO_BASE: usize = 0x3F00_0000;
pub const VIDEOCORE_MBOX_BASE: usize = MMIO_BASE + 0x0000_B880;
pub const GPIO_BASE: usize = MMIO_BASE + 0x0020_0000;
pub const PL011_UART_BASE: usize = MMIO_BASE + 0x0020_1000;
pub const MINI_UART_BASE: usize = MMIO_BASE + 0x0021_5000;
pub const MMIO_END: usize = super::END;
}
pub mod virt {
pub const KERN_STACK_START: usize = super::START;
pub const KERN_STACK_END: usize = 0x0007_FFFF;
// The second 2 MiB block.
pub const DMA_HEAP_START: usize = 0x0020_0000;
pub const DMA_HEAP_END: usize = 0x005F_FFFF;
}
}
/// Types used for compiling the virtual memory layout of the kernel using
/// address ranges.
pub mod kernel_mem_range {
use core::ops::RangeInclusive;
#[derive(Copy, Clone)]
pub enum MemAttributes {
CacheableDRAM,
NonCacheableDRAM,
Device,
}
#[derive(Copy, Clone)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
}
#[allow(dead_code)]
#[derive(Copy, Clone)]
pub enum Translation {
Identity,
Offset(usize),
}
#[derive(Copy, Clone)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
pub execute_never: bool,
}
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
pub struct Descriptor {
pub name: &'static str,
pub virtual_range: fn() -> RangeInclusive<usize>,
pub translation: Translation,
pub attribute_fields: AttributeFields,
}
}
pub use kernel_mem_range::*;
/// A virtual memory layout that is agnostic of the paging granularity that the
/// hardware MMU will use.
///
/// Contains only special ranges, aka anything that is _not_ normal cacheable
/// DRAM.
static KERNEL_VIRTUAL_LAYOUT: [Descriptor; 6] = [
Descriptor {
name: "Kernel stack",
virtual_range: || {
RangeInclusive::new(map::virt::KERN_STACK_START, map::virt::KERN_STACK_END)
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "Boot code and data",
virtual_range: || {
// Using the linker script, we ensure that the boot area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__BOOT_START, __BOOT_END)
extern "C" {
// The inclusive start of the boot area, aka the address of the
// first byte of the area.
static __BOOT_START: u64;
// The exclusive end of the boot area, aka the address of
// the first byte _after_ the RO area.
static __BOOT_END: u64;
}
unsafe {
// Notice the subtraction to turn the exclusive end into an
// inclusive end
RangeInclusive::new(
&__BOOT_START as *const _ as usize,
&__BOOT_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
Descriptor {
name: "Kernel code and RO data",
virtual_range: || {
// Using the linker script, we ensure that the RO area is consecutive and 4
// KiB aligned, and we export the boundaries via symbols:
//
// [__RO_START, __RO_END)
extern "C" {
// The inclusive start of the read-only area, aka the address of the
// first byte of the area.
static __RO_START: u64;
// The exclusive end of the read-only area, aka the address of
// the first byte _after_ the RO area.
static __RO_END: u64;
}
unsafe {
// Notice the subtraction to turn the exclusive end into an
// inclusive end
RangeInclusive::new(
&__RO_START as *const _ as usize,
&__RO_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
Descriptor {
name: "Kernel data and BSS",
virtual_range: || {
extern "C" {
static __DATA_START: u64;
static __BSS_END: u64;
}
unsafe {
RangeInclusive::new(
&__DATA_START as *const _ as usize,
&__BSS_END as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "DMA heap pool",
virtual_range: || RangeInclusive::new(map::virt::DMA_HEAP_START, map::virt::DMA_HEAP_END),
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::NonCacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
Descriptor {
name: "Device MMIO",
virtual_range: || RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::phys::MMIO_END),
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
];
/// For a given virtual address, find and return the output address and
/// according attributes.
///
/// If the address is not covered in VIRTUAL_LAYOUT, return a default for normal
/// cacheable DRAM.
pub fn get_virt_addr_properties(
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > map::END {
return Err("Address out of range.");
}
for i in KERNEL_VIRTUAL_LAYOUT.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Human-readable output of a Descriptor.
impl fmt::Display for Descriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the
// result, which causes Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024)
const KIB_RSHIFT: u32 = 10;
// log2(1024 * 1024)
const MIB_RSHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
} else if (size >> KIB_RSHIFT) > 0 {
(size >> KIB_RSHIFT, "KiB")
} else {
(size, "Byte")
};
let attr = match self.attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::NonCacheableDRAM => "NC",
MemAttributes::Device => "Dev",
};
let acc_p = match self.attribute_fields.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if self.attribute_fields.execute_never {
"PXN"
} else {
"PX"
};
write!(
f,
" {:#010X} - {:#010X} | {: >3} {} | {: <3} {} {: <3} | {}",
start, end, size, unit, attr, acc_p, xn, self.name
)
}
}
/// Print the kernel memory layout.
pub fn print_layout() {
println!("[i] Kernel memory layout:");
for i in KERNEL_VIRTUAL_LAYOUT.iter() {
println!("{}", i);
}
}
/// Align address downwards.
///
/// Returns the greatest x with alignment `align` so that x <= addr.
/// The alignment must be a power of 2.
pub fn align_down(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
}
/// Align address upwards.
///
/// Returns the smallest x with alignment `align` so that x >= addr.
/// The alignment must be a power of 2.
pub fn align_up(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr // already aligned
} else {
(addr | align_mask) + 1
}
}
/// Calculate the next possible aligned address without sanity checking the
/// input parameters.
// #[inline]
// fn aligned_addr_unchecked(addr: usize, alignment: usize) -> usize {
// (addr + (alignment - 1)) & !(alignment - 1)
// }
#[cfg(test)]
mod tests {
use super::*;
#[test_case]
pub fn test_align_up() {
// align 1
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(1234, 1), 1234);
assert_eq!(align_up(0xffffffffffffffff, 1), 0xffffffffffffffff);
// align 2
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(1233, 2), 1234);
assert_eq!(align_up(0xfffffffffffffffe, 2), 0xfffffffffffffffe);
// address 0
assert_eq!(align_up(0, 128), 0);
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(0, 0x8000000000000000), 0);
}
}

View File

@ -2,6 +2,7 @@
* SPDX-License-Identifier: BlueOak-1.0.0
*/
mod boot;
pub mod memory;
#[inline]
pub fn endless_sleep() -> ! {

View File

@ -1,6 +1,7 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
#[cfg(target_arch = "aarch64")]
#[macro_use]
pub mod aarch64;

View File

@ -4,6 +4,7 @@
#![no_std]
#![no_main]
#![feature(asm)]
#![feature(ptr_internals)]
#![feature(format_args_nl)]
#![feature(custom_test_frameworks)]
#![test_runner(crate::tests::test_runner)]
@ -26,10 +27,24 @@ mod write_to;
entry!(kmain);
fn print_mmu_state_and_features() {
memory::mmu::print_features();
}
fn init_mmu() {
print_mmu_state_and_features();
unsafe {
memory::mmu::init().unwrap();
}
println!("MMU initialised");
}
// Kernel entry point
// arch crate is responsible for calling this
#[inline]
pub fn kmain() -> ! {
init_mmu();
#[cfg(test)]
test_main();