[wip] mmu experiments

This commit is contained in:
Berkus Decker 2020-11-20 04:23:14 +02:00
parent 062591fb48
commit 3c3ce334ca
4 changed files with 1039 additions and 0 deletions

View File

@ -0,0 +1,106 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
use super::{Frame, FrameAllocator};
use multiboot2::{MemoryArea, MemoryAreaIter}; // replace with DTB?
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame,
multiboot_start: Frame,
multiboot_end: Frame,
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(_area) = self.current_area {
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let frame = Frame {
number: self.next_free_frame.number,
};
// the last frame of the current area
let current_area_last_frame = Frame::containing_address(0x3f00_0000);
// {
// let address = area.base_addr + area.length - 1;
// Frame::containing_address(address as usize)
// };
if frame > current_area_last_frame {
// all frames of current area are used, switch to next area
// self.choose_next_area();
unimplemented!();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1,
};
} else if frame >= self.multiboot_start && frame <= self.multiboot_end {
// `frame` is used by the multiboot information structure
self.next_free_frame = Frame {
number: self.multiboot_end.number + 1,
};
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += 1;
return Some(frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frame()
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}
// Fixme: no multiboot, but dtb instead with avail memory regions
// Need dtb parser here!
impl AreaFrameAllocator {
pub fn new(
kernel_start: usize,
kernel_end: usize,
multiboot_start: usize,
multiboot_end: usize,
memory_areas: MemoryAreaIter,
) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(0),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
multiboot_start: Frame::containing_address(multiboot_start),
multiboot_end: Frame::containing_address(multiboot_end),
};
// allocator.choose_next_area();
allocator.next_free_frame = Frame::containing_address(0x100000); // start from 1Mb
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self
.areas
.clone()
.filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize) >= self.next_free_frame
})
.min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(area.base_addr as usize);
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}

View File

@ -0,0 +1,15 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
// Allocate regions from boot memory list obtained from devtree
pub struct BootRegionAllocator {}
impl BootRegionAllocator {
pub fn new(&boot_info: BootInfo) -> Self {
Self {}
}
pub fn alloc_region(&mut self) {}
pub fn alloc_zeroed(&mut self) {}
}

View File

@ -0,0 +1,896 @@
// 1: use Table<Level> for sure
// 2: in tables use typed descriptors over generic u64 entries?? how to pick right type...
// -- TableDescriptor
// -- Lvl2BlockDescriptor
// -- PageDescriptor
// Use them instead of PageTableEntry
// 3: Use PhysFrame<Size> and Page<Size> as flexible versions of various-sized pages
// Level 0 descriptors can only output the address of a Level 1 table.
// Level 3 descriptors cannot point to another table and can only output block addresses.
// The format of the table is therefore slightly different for Level 3.
//
// this means:
// - level 0 page table can be only TableDescriptors
// - level 1,2 page table can be TableDescriptors, Lvl2BlockDescriptors (PageDescriptors)
// - level 3 page table can be only PageDescriptors
// Level / Types | Table Descriptor | Lvl2BlockDescriptor (PageDescriptor)
// --------------+------------------+--------------------------------------
// 0 | X | (with 4KiB granule)
// 1 | X | X (1GiB range)
// 2 | X | X (2MiB range)
// 3 | | X (4KiB range) -- called PageDescriptor
// encoding actually the same as in Table Descriptor
// Translation granule affects the size of the block addressed.
// Lets use 4KiB granule on RPi3 for simplicity.
// This gives the following address format:
//
// Maximum OA is 48 bits.
//
// Level 0 descriptor cannot be block descriptor.
// Level 0 table descriptor has Output Address in [47:12]
//
// Level 1 block descriptor has Output Address in [47:30]
// Level 2 block descriptor has Output Address in [47:21]
//
// Level 1 table descriptor has Output Address in [47:12]
// Level 2 table descriptor has Output Address in [47:12]
//
// Level 3 Page Descriptor:
// Upper Attributes [63:51]
// Res0 [50:48]
// Output Address [47:12]
// Lower Attributes [11:2]
// 11b [1:0]
// enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) }
// impl PageTabelEntry { fn new_from_entry_addr(&u64) }
// If I have, for example, Table<Level0> I can get from it N `Table<Level1>` (via impl HierarchicalTable)
// From Table<Level1> I can get either `Table<Level2>` (via impl HierarchicalTable) or `BlockDescriptor<Size1GiB>`
// From Table<Level2> I can get either `Table<Level3>` (via impl HierarchicalTable) or `BlockDescriptor<Size2MiB>`
// From Table<Level3> I can only get `PageDescriptor<Size4KiB>` (because no impl HierarchicalTable exists)
// enum PageTableEntry { Page(&mut PageDescriptor), Block(&mut BlockDescriptor), Etc(&mut u64), Invalid(&mut u64) }
// return enum PageTableEntry constructed from table bits in u64
/*!
* Paging system uses a separate address space in top kernel region (TTBR1) to access
* entire physical memory contents.
* This mapping is not available to user space (user space uses TTBR0).
* Use the largest possible granule size to map physical memory since we want to use
* the least amount of memory for these mappings.
*/
// Check largest VA supported, calculate physical_memory_offset
//
const PHYSICAL_MEMORY_OFFSET: u64 = 0xffff_8000_0000_0000; // Last 1GiB of VA space
// AArch64:
// Table D4-8-2021: check supported granule sizes, select alloc policy based on results.
// TTBR_ELx is the pdbr for specific page tables
// Page 2068 actual page descriptor formats
/// A standard 16KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size16KiB {}
impl PageSize for Size16KiB {
const SIZE: u64 = 16384;
const SIZE_AS_DEBUG_STR: &'static str = "16KiB";
const SHIFT: usize = 14;
const MASK: u64 = 0x3fff;
}
impl NotGiantPageSize for Size16KiB {}
/// A “giant” 1GiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size1GiB {
const SIZE: u64 = Size2MiB::SIZE * NUM_ENTRIES_4KIB;
const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
const SHIFT: usize = 59; // @todo
const MASK: u64 = 0xfffaaaaa; // @todo
}
/// Errors from mapping layer (@todo use anyhow/snafu? thiserror?)
pub enum TranslationError {
NoPage,
}
// Pointer to currently active page table
// Could be either user space (TTBR0) or kernel space (TTBR1) -- ??
pub struct ActivePageTable {
l0: Unique<Table<PageGlobalDirectory>>,
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
l0: Unique::new_unchecked(0 as *mut _),
}
}
fn l0(&self) -> &Table<PageGlobalDirectory> {
unsafe { self.l0.as_ref() }
}
fn l0_mut(&mut self) -> &mut Table<PageGlobalDirectory> {
unsafe { self.l0.as_mut() }
}
pub fn translate(&self, virtual_address: VirtAddr) -> Result<PhysAddr, TranslationError> {
let offset = virtual_address % Size4KiB::SIZE as usize; // @todo use the size of the last page of course
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| frame.start_address() + offset)
}
fn translate_page(&self, page: Page) -> Result<PhysFrame, TranslationError> {
let l1 = self.l0().next_table(u64::from(page.l0_index()) as usize);
/*
let huge_page = || {
l1.and_then(|l1| {
let l1_entry = &l1[page.l1_index() as usize];
// 1GiB page?
if let Some(start_frame) = l1_entry.pointed_frame() {
if l1_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
!= STAGE1_DESCRIPTOR::TYPE::Table.value
{
// address must be 1GiB aligned
//start_frame.is_aligned()
assert!(start_frame.number % (NUM_ENTRIES_4KIB * NUM_ENTRIES_4KIB) == 0);
return Ok(PhysFrame::from_start_address(
start_frame.number
+ page.l2_index() * NUM_ENTRIES_4KIB
+ page.l3_index(),
));
}
}
if let Some(l2) = l1.next_table(page.l1_index()) {
let l2_entry = &l2[page.l2_index()];
// 2MiB page?
if let Some(start_frame) = l2_entry.pointed_frame() {
if l2_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
!= STAGE1_DESCRIPTOR::TYPE::Table
{
// address must be 2MiB aligned
assert!(start_frame.number % NUM_ENTRIES_4KIB == 0);
return Ok(PhysFrame::from_start_address(
start_frame.number + page.l3_index(),
));
}
}
}
Err(TranslationError::NoPage)
})
};
*/
let v = l1
.and_then(|l1| l1.next_table(u64::from(page.l1_index()) as usize))
.and_then(|l2| l2.next_table(u64::from(page.l2_index()) as usize))
.and_then(|l3| Some(l3[u64::from(page.l3_index()) as usize])); //.pointed_frame())
// .ok_or(TranslationError::NoPage)
// .or_else(huge_page)
Ok(v.unwrap().into())
}
pub fn map_to<A>(&mut self, page: Page, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
let l0 = self.l0_mut();
let l1 = l0.next_table_create(u64::from(page.l0_index()) as usize, allocator);
let l2 = l1.next_table_create(u64::from(page.l1_index()) as usize, allocator);
let l3 = l2.next_table_create(u64::from(page.l2_index()) as usize, allocator);
assert_eq!(
l3[u64::from(page.l3_index()) as usize],
0 /*.is_unused()*/
);
l3[u64::from(page.l3_index()) as usize] = PageTableEntry::PageDescriptor(
STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ flags // @todo properly extract flags
+ STAGE1_DESCRIPTOR::VALID::True,
)
.into();
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
}
fn unmap<A>(&mut self, page: Page, _allocator: &mut A)
where
A: FrameAllocator,
{
// use aarch64::instructions::tlb;
// use x86_64::VirtAddr;
assert!(self.translate(page.start_address()).is_ok());
let l3 = self
.l0_mut()
.next_table_mut(u64::from(page.l0_index()) as usize)
.and_then(|l1| l1.next_table_mut(u64::from(page.l1_index()) as usize))
.and_then(|l2| l2.next_table_mut(u64::from(page.l2_index()) as usize))
.expect("mapping code does not support huge pages");
let _frame = l3[u64::from(page.l3_index()) as usize];
// .pointed_frame()
// .unwrap();
l3[u64::from(page.l3_index()) as usize] = 0; /*.set_unused(); */
// tlb::flush(VirtAddr(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
}
}
// Abstractions for page table entries.
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry has the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}
/// A 64-bit page table entry.
// pub struct PageTableEntry {
// entry: u64,
// }
const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
/*
impl PageTableEntry {
/// Creates an unused page table entry.
pub fn new() -> Self {
PageTableEntry::Invalid
}
/// Returns whether this entry is zero.
pub fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
pub fn flags(&self) -> EntryFlags {
EntryFlags::new(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & ADDR_MASK)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().read(STAGE1_DESCRIPTOR::VALID) {
Err(FrameError::FrameNotPresent)
// } else if self.flags().contains(EntryFlags::HUGE_PAGE) {
// Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical address with the specified flags.
pub fn set_addr(&mut self, addr: PhysAddr, flags: EntryFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = addr.as_u64() | flags.bits();
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: EntryFlags) {
// assert!(!flags.contains(EntryFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}
/// Sets the flags of this entry.
pub fn set_flags(&mut self, flags: EntryFlags) {
// Todo: extract ADDR from self and replace all flags completely (?)
self.entry = self.addr().as_u64() | flags.bits();
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.finish()
}
}*/
// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/frame.rs
// Abstractions for default-sized and huge physical memory frames.
/// A physical memory frame.
/// Frame is an addressable unit of the physical address space.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct PhysFrame<S: PageSize = Size4KiB> {
start_address: PhysAddr,
size: PhantomData<S>,
}
impl<S: PageSize> From<u64> for PhysFrame<S> {
fn from(address: u64) -> PhysFrame<S> {
PhysFrame::containing_address(PhysAddr::new(address))
}
}
impl<S: PageSize> From<PhysFrame<S>> for u64 {
fn from(frame: PhysFrame<S>) -> u64 {
frame.start_address.as_u64()
}
}
impl<S: PageSize> PhysFrame<S> {
/// Returns the frame that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
pub fn from_start_address(address: PhysAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(PhysFrame::containing_address(address))
}
/// Returns the frame that contains the given physical address.
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the frame.
pub fn start_address(&self) -> PhysAddr {
self.start_address
}
/// Returns the size the frame (4KB, 2MB or 1GB).
pub fn size(&self) -> u64 {
S::SIZE
}
/// Returns a range of frames, exclusive `end`.
pub fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}
/// Returns a range of frames, inclusive `end`.
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"PhysFrame[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for PhysFrame<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for PhysFrame<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for PhysFrame<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for PhysFrame<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
type Output = u64;
fn sub(self, rhs: PhysFrame<S>) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// An range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/page.rs
// Abstractions for default-sized and huge virtual memory pages.
// x86_64 page level numbering: P4 -> P3 -> P2 -> P1
// armv8a page level numbering: L0 -> L1 -> L2 -> L3
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct Page<S: PageSize = Size4KiB> {
start_address: VirtAddr,
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// The page size in bytes.
pub const SIZE: u64 = S::SIZE;
/// Returns the page that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
pub fn from_start_address(address: VirtAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(Page::containing_address(address))
}
/// Returns the page that contains the given virtual address.
pub fn containing_address(address: VirtAddr) -> Self {
Page {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the page.
pub fn start_address(&self) -> VirtAddr {
self.start_address
}
/// Returns the size the page (4KB, 2MB or 1GB).
pub const fn size(&self) -> u64 {
S::SIZE
}
/// Returns the level 0 page table index of this page.
pub fn l0_index(&self) -> u9 {
self.start_address().l0_index()
}
/// Returns the level 1 page table index of this page.
pub fn l1_index(&self) -> u9 {
self.start_address().l1_index()
}
/// Returns a range of pages, exclusive `end`.
pub fn range(start: Self, end: Self) -> PageRange<S> {
PageRange { start, end }
}
/// Returns a range of pages, inclusive `end`.
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
}
impl<S: NotGiantPageSize> Page<S> {
/// Returns the level 2 page table index of this page.
pub fn l2_index(&self) -> u9 {
self.start_address().l2_index()
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
pub fn from_page_table_indices_1gib(l0_index: u9, l1_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
pub fn from_page_table_indices_2mib(l0_index: u9, l1_index: u9, l2_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
addr.set_bits(21..30, u64::from(l2_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
pub fn from_page_table_indices(l0_index: u9, l1_index: u9, l2_index: u9, l3_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
addr.set_bits(21..30, u64::from(l2_index));
addr.set_bits(12..21, u64::from(l3_index));
Page::containing_address(VirtAddr::new(addr))
}
/// Returns the level 3 page table index of this page.
pub fn l3_index(&self) -> u9 {
self.start_address().l3_index()
}
}
impl<S: PageSize> fmt::Debug for Page<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"Page<{}>({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for Page<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for Page<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for Page<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for Page<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<Self> for Page<S> {
type Output = u64;
fn sub(self, rhs: Self) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// A range of pages with exclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, exclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRange<S> {
/// Returns whether this range contains no pages.
pub fn is_empty(&self) -> bool {
self.start >= self.end
}
}
impl<S: PageSize> Iterator for PageRange<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
pub fn as_4kib_page_range(self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A range of pages with inclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, inclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRangeInclusive<S> {
/// Returns whether this range contains no pages.
pub fn is_empty(&self) -> bool {
self.start > self.end
}
}
impl<S: PageSize> Iterator for PageRangeInclusive<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PageRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test_case]
pub fn test_page_ranges() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(0xdeadbeaf);
let start: Page = Page::containing_address(start_addr);
let end = start.clone() + number;
let mut range = Page::range(start.clone(), end.clone());
for i in 0..number {
assert_eq!(
range.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range.next(), None);
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
}
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
*/
/*
* SPDX-License-Identifier: BSL-1.0 - todo this is from Sergio Benitez cs140e
*/
// Abstractions for page tables.
// to get L0 we must allocate a few frames from boot region allocator.
// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu,
// this part will be inited in mmu::init():
//pub const L0: *mut Table<PageGlobalDirectory> = &mut LVL0_TABLE as *mut _; // was Table<Level0>
// @fixme this is for recursive page tables!!
impl<L> Table<L>
where
L: HierarchicalLevel,
{
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = EntryRegister::new(self[index]);
if entry_flags.matches_all(STAGE1_DESCRIPTOR::VALID::True + STAGE1_DESCRIPTOR::TYPE::Table)
{
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(
&mut self,
index: usize,
allocator: &mut A,
) -> &mut Table<L::NextLevel>
where
A: FrameAllocator,
{
if self.next_table(index).is_none() {
assert!(
EntryRegister::new(self.entries[index]).read(STAGE1_DESCRIPTOR::TYPE)
== STAGE1_DESCRIPTOR::TYPE::Table.value,
"mapping code does not support huge pages"
);
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index] = PageTableEntry::TableDescriptor(
STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ STAGE1_DESCRIPTOR::VALID::True,
)
.into();
// self.entries[index]
// .set_frame(frame, STAGE1_DESCRIPTOR::VALID::True /*| WRITABLE*/);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
}
// ORIGINAL MMU.RS CODE
//static mut LVL0_TABLE: Table<PageGlobalDirectory> = Table {
// entries: [0; NUM_ENTRIES_4KIB],
// level: PhantomData,
//};

View File

@ -13,9 +13,31 @@ use {
mod addr;
pub mod mmu;
pub mod mmu_experimental;
pub use mmu_experimental::*;
// mod area_frame_allocator;
// pub use self::area_frame_allocator::AreaFrameAllocator;
// mod boot_allocator; // Hands out physical memory obtained from devtree
// use self::paging::PAGE_SIZE;
pub use addr::PhysAddr;
pub use addr::VirtAddr;
use mmu_experimental::PhysFrame;
// @todo ??
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<PhysFrame>; // @todo Result<>
fn deallocate_frame(&mut self, frame: PhysFrame);
}
// Identity-map things for now.
//
// > but more normal the simplest form is a table with 1024 32 bit entries starting at
// a 0x4000 aligned address, where each entry describes a 1 Mb memory part.
// On the rpi3 only the bottom 1024 entries are relevant as it has 1 Gb memory.
// aarch64 granules and page sizes howto:
// https://stackoverflow.com/questions/34269185/simultaneous-existence-of-different-sized-pages-on-aarch64