Compare commits

...

46 Commits

Author SHA1 Message Date
Berkus Decker 1bc062081e wip untypeds boot 2021-07-11 21:21:48 +03:00
Berkus Decker 644426d806 wip mmu 2021-07-11 21:21:48 +03:00
Berkus Decker cba96c8de3 add paging illustration
this sample pretends that each directory has only
2 table entries.
2021-07-11 21:21:48 +03:00
Berkus Decker bb7316a7cb wip mmu plans 2021-07-11 21:21:48 +03:00
Berkus Decker a83905f90b Add a non-military license constraint 2021-07-11 21:21:48 +03:00
Berkus Decker 15999223a4 Use enable_jtag_gpio config option in the manual
Just enabling Alt4 for JTAG GPIO will (no longer) work.
2021-07-11 21:21:48 +03:00
Berkus Decker a86f623df2 Update to new qemu 5.2.50, openocd 0.11.0-rc2, gdb 10.1 2021-07-11 21:21:48 +03:00
Berkus Decker 2c3c9f1fbe Add SPI SWD from RPi3 manual 2021-07-11 21:21:48 +03:00
Berkus Decker ec8cac260f [wip] future crates to use 2021-07-11 21:21:48 +03:00
Berkus Decker 973c78cd3d Enable selecting target board 2021-07-11 21:21:48 +03:00
Berkus Decker adbf5c413a Add RasPi4-specific target configuration for OpenOCD
Update and move rpi3 jtag configs.
Add rpi bringup doc - lists romtables for configuring.
2021-07-11 21:21:48 +03:00
Berkus Decker 0208b0f879 Update OpenOCD version
RTT patch has been merged, so use the latest 0.11.0-rc2+dev-01576-g0d9e8bd52-dirty
2021-07-11 21:21:48 +03:00
Berkus Decker e8baa13fc3 wip improve mmu mapping code 2021-07-11 21:21:48 +03:00
Berkus Decker 2b6f1bedf4 [temp] allow dead_code while this code is experimental and unused 2021-07-11 21:21:48 +03:00
Berkus Decker c1e0a8f3dd [sq] make error enum public 2021-07-11 21:21:48 +03:00
Berkus Decker b787a2224d [sq] fix iterator checks 2021-07-11 21:21:48 +03:00
Berkus Decker bf291b917f Switch to usize for alignment checks 2021-07-11 21:21:48 +03:00
Berkus Decker ff12867b02 [sq] fix unused Result 2021-07-11 21:21:48 +03:00
Berkus Decker cc163e6d61 [sq] add missing documentation 2021-07-11 21:21:48 +03:00
Berkus Decker 389a6971b7 [wip] comment out unported code 2021-07-11 21:21:48 +03:00
Berkus Decker e33f99786b [sq] use static_assertions 2021-07-11 21:21:48 +03:00
Berkus Decker 81974b40c7 [wip] reshuffle stuff around - to be finalized 2021-07-11 21:20:10 +03:00
Berkus Decker 6c77d0930c [sq] add missing Clone derives 2021-07-11 20:54:36 +03:00
Berkus Decker a6266dc385 Make boot info compile 2021-07-11 20:54:36 +03:00
Berkus Decker 15323fe7f3 Make features printing compile 2021-07-11 20:54:36 +03:00
Berkus Decker 55e1761492 [wip] directory levels traversal 2021-07-11 20:54:36 +03:00
Berkus Decker 3c6fcfcf10 [wip] memory map initialization 2021-07-11 20:54:36 +03:00
Berkus Decker 07d7f9cd9e Update todos 2021-07-11 20:54:36 +03:00
Berkus Decker 083711b61e [wip] Improve virt_page impl and add tests 2021-07-11 20:54:36 +03:00
Berkus Decker f26fa39265 [fixme] move those out 2021-07-11 20:54:36 +03:00
Berkus Decker d278383184 Move PageSize to a mod and implement it for phys frames and virt pages 2021-07-11 20:54:36 +03:00
Berkus Decker 1eed756d04 sq we use snafu already 2021-07-11 20:54:36 +03:00
Berkus Decker 633dbd191f Implement comparison for invalid virt address error
Similar to PhysAddrNotValid.
2021-07-11 20:54:36 +03:00
Berkus Decker e215f9d62c [wip] add to-kernel-space/from-kernel-space address conversion 2021-07-11 20:54:36 +03:00
Berkus Decker de91603059 [wip] necessary modifications 2021-07-11 20:54:36 +03:00
Berkus Decker 825806fdd7 Start moving code to a new mmu2 module 2021-07-11 20:54:36 +03:00
Berkus Decker 9b5d7b14d3 Document TODO steps 2021-07-11 20:52:12 +03:00
Berkus Decker bd4015679c drop obsolete stuff 2021-07-11 20:52:12 +03:00
Berkus Decker da52104b53 sq extract features 2021-07-11 20:52:12 +03:00
Berkus Decker 851e691534 [wip] extract virt_page code 2021-07-11 20:52:12 +03:00
Berkus Decker 35097458b2 [wip] extract phys_frame code 2021-07-11 20:52:12 +03:00
Berkus Decker fa10d649e4 [wip] extract mmu features printer 2021-07-11 20:52:12 +03:00
Berkus Decker a7474f2b24 [sq] Drop invalid PDF from some old arm 2021-07-11 20:52:12 +03:00
Berkus Decker 119017c703 [wip] Add boot memory regions info 2021-07-11 20:52:12 +03:00
Berkus Decker 3f98cbe8ec [wip] MMU docs 2021-07-11 20:52:12 +03:00
Berkus Decker 3c3ce334ca [wip] mmu experiments 2021-07-11 20:52:12 +03:00
27 changed files with 2381 additions and 576 deletions

7
Cargo.lock generated
View File

@ -86,6 +86,12 @@ dependencies = [
"syn",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "syn"
version = "1.0.73"
@ -132,6 +138,7 @@ dependencies = [
"qemu-exit",
"r0",
"snafu",
"static_assertions",
"tock-registers",
"usize_conversions",
"ux",

View File

@ -53,3 +53,34 @@ No contributor can revoke this license.
without any warranty or condition, and no contributor
will be liable to anyone for any damages related to this
software or this license, under any kind of legal claim.***
---
[Addtional restrictions](https://blog.yossarian.net/2020/06/03/You-may-not-use-my-projects-in-a-military-or-law-enforcement-context):
The following terms additionally apply and override any above terms for
applicable parties:
You may not use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software in a military or law enforcement context,
defined as follows:
1. A military context is a professional context where the intended application
of the Software is integration or use with or by military software, tools
(software or hardware), or personnel. This includes contractors and
subcontractors as well as research affiliates of any military organization.
2. A law enforcement context is a professional context where the intended
application of the Software is integration or use with or by law enforcement
software, tools (software or hardware), or personnel. This includes
contractors and subcontractors as well as research affiliates of any law
enforcement organization.
Entities that sell or license to military or law enforcement organizations
may use the Software under the original terms, but only in contexts that do
not assist or supplement the sold or licensed product.
Students and academics who are affiliated with research institutions may use
the Software under the original terms, but only in contexts that do not assist
or supplement collaboration or affiliation with any military or law
enforcement organization.

View File

@ -17,14 +17,18 @@ DEFAULT_TARGET = "aarch64-vesper-metta"
# Pass TARGET env var if it does not match the default target above.
TARGET = { value = "${DEFAULT_TARGET}", condition = { env_not_set = ["TARGET"] } }
# Name of the target board "rpi3" or "rpi4"
TARGET_BOARD = { value = "rpi3", condition = { env_not_set = ["TARGET_BOARD"] } }
# AArch64 QEMU binary
QEMU = { value = "qemu-system-aarch64", condition = { env_not_set = ["QEMU"] } }
QEMU = { value = "/usr/local/opt/qemu/HEAD-51db2d7cf2-mmudebug/bin/qemu-system-aarch64", condition = { env_not_set = ["QEMU"] } }
# An aarch64-enabled GDB
GDB = { value = "/usr/local/opt/gdb-8.2.1-aarhc64/bin/aarch64-linux-elf-gdb", condition = { env_not_set = ["GDB"] } }
GDB = { value = "/usr/local/opt/gdb/10.1-aarch64/bin/aarch64-linux-elf-gdb", condition = { env_not_set = ["GDB"] } }
# OpenOCD with JLink support and RTT patch from http://openocd.zylin.com/#/c/4055/11
OPENOCD = { value = "/usr/local/openocd-aeb7b327-rtt/bin/openocd", condition = { env_not_set = ["OPENOCD"] } }
# OpenOCD with JLink support
# (RTT patch from http://openocd.zylin.com/#/c/4055/11 has already been merged into main line)
OPENOCD = { value = "/usr/local/opt/openocd/4d6519593-rtt/bin/openocd", condition = { env_not_set = ["OPENOCD"] } }
# Mounted sdcard partition path
VOLUME = { value = "/Volumes/BOOT", condition = { env_not_set = ["VOLUME"] } }

View File

@ -117,6 +117,8 @@ Various references from [OSDev Wiki](https://wiki.osdev.org/Raspberry_Pi_Bare_Bo
![Build](https://github.com/metta-systems/vesper/workflows/Build/badge.svg)
![License](https://raster.shields.io/badge/license-BlueOak%20with%20restrictions-blue.png)
[![Dependency Status](https://deps.rs/repo/github/metta-systems/vesper/status.svg)](https://deps.rs/repo/github/metta-systems/vesper)
[![Gitpod Ready-to-Code](https://img.shields.io/badge/Gitpod-Ready--to--Code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/metta-systems/vesper)
@ -132,6 +134,10 @@ Individual files contain the following tag instead of the full license text.
This enables machine processing of license information based on the SPDX
License Identifiers that are here available: http://spdx.org/licenses/
@todo http://copyfree.org/support/community
-- copyfree
-- no CoC approach of that community
----
For more information please re-read.

View File

@ -53,16 +53,13 @@ Connecting TDI to pin 7 (GPIO4) did not work!
In config.txt:
```
# Set GPIO pins for JTAG debugger connection on rpi3
gpio=22-27=a4
```
Alternatively, just specify this: (@todo verify this works with all alt4 pins)
```
# Set GPIO pins for JTAG debugger connection on all rpi models
enable_jtag_gpio=1
```
Quote from [official doc](https://www.raspberrypi.org/documentation/configuration/config-txt/gpio.md):
> Setting enable_jtag_gpio=1 selects Alt4 mode for GPIO pins 22-27, and sets up some internal SoC connections, thus enabling the JTAG interface for the ARM CPU. It works on all models of Raspberry Pi.
### Wire Connection between boards
```

4
doc/rpi4_swd.md Normal file
View File

@ -0,0 +1,4 @@
# Using RPi4 as SWD bitbanging host
[Use SPI for better reliability](https://lupyuen.github.io/articles/openocd-on-raspberry-pi-better-with-swd-on-spi)

View File

@ -34,3 +34,6 @@ bit_field = "0.10.1"
bitflags = "1.2"
cfg-if = "1.0"
snafu = { version = "0.7.0-beta.0", default-features = false }
static_assertions = "1.1.0"
#enum_dispatch = "0.3"
#tap = "1.0" -- check no_std?

View File

@ -72,14 +72,14 @@ env = { "QEMU_RUNNER_OPTS" = "${QEMU_SERIAL_OPTS} ${QEMU_GDB_OPTS}" }
[tasks.openocd]
dependencies = ["build", "kernel-binary"]
script = [
"${OPENOCD} -f interface/jlink.cfg -f ../doc/rpi2rpi_jtag/rpi3_target.cfg"
"${OPENOCD} -f interface/jlink.cfg -f ../ocd/${TARGET_BOARD}_target.cfg"
]
[tasks.gdb-config]
script_runner = "@duckscript"
script = [
'''
writefile ${GDB_CONNECT_FILE} "target remote :5555\n"
writefile ${GDB_CONNECT_FILE} "target extended-remote :5555\n"
'''
]

View File

@ -9,3 +9,143 @@ a specific-size page.
----
For more information please re-read.
----
## Plan
1. MMU tables - because we need separate memspaces for kernel and userspace
1a. Allocate initial page tables
1b. Map over available RAM sensibly
1c. Create kernel's own mapping (TTBR_EL1)
## What does the kernel MMU code support?
* mapping
* unmapping
* switching per-process mappings (virtspaces)
* virt2phys resolution
* direct phys access for kernel (TTBR_EL1 mapping to physmem)
* initial kernel memory allocation: for mapping tables and capnodes, for initial thread TCB and stacks
## public api
ARMMU invocations:
on page directory cap
cache maintenance (clean/invalidate/unify)
on page table cap
map
unmap
on small frame/frame caps
map
remap
unmap
cache maintenance (clean/invalidate/unify)
get address
on asid control cap
on asid pool cap
## Minimum Required Functionality (build from this)
* resolve VA to PA - resolving lets kernel access mapped process memory.
(start from the process' virtspace root - Page Directory)
* flush page, pd, pt, virtspace - will be important for thread switching
* map a page table to appropriate location
* unmap entire mapped page table
* map a phys frame to virt location
* unmap a mapped frame
## Requirements
GIVEN
A PageGlobalDirectory of process
FIND
A kernel-physical address of where it contains a certain leaf node.
## sel4
> seL4 does not provide virtual memory management, beyond kernel primitives for manipulating hardware paging structures. User-level must provide services for creating intermediate paging structures, mapping and unmapping pages.
> Users are free to define their own address space layout with one restriction: the seL4 kernel claims the high part of the virtual memory range. On most 32-bit platforms, this is 0xe0000000 and above. This variable is set per platform, and can be found by finding the kernelBase variable in the seL4 source.
(from https://docs.sel4.systems/Tutorials/mapping.html)
> Note that to map a frame multiple times, one must make copies of the frame capability: each frame capability can only track one mapping.
## howto steps
initial mapping:
* for kernel space -
1. obtain memory map
2. build a list of regions available as RAM
3. find largest covering page sizes
4. allocate several memory pages and fill them with table info
(need page table creation functions here)
5. now kernel is able to address physical memory through it's (negative) kernel mapping.
6. prepare init thread VSpace
- this is more complicated wrt mapping
// The region of the initial thread is the user image + ipcbuf and boot info.
/* setup virtual memory for the kernel */
map_kernel_window();
/* Construct an initial address space with enough virtual addresses
* to cover the user image + ipc buffer and bootinfo frames */
it_pd_cap = create_it_address_space(root_cnode_cap, it_v_reg);
/* Create and map bootinfo frame cap */
create_bi_frame_cap(
root_cnode_cap,
it_pd_cap,
bi_frame_pptr,
bi_frame_vptr
);
/* create the initial thread's IPC buffer */
ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_pd_cap, ipcbuf_vptr);
/* create all userland image frames */
create_frames_ret =
create_frames_of_region(
root_cnode_cap,
it_pd_cap,
ui_reg,
true,
pv_offset
);
ndks_boot.bi_frame->userImageFrames = create_frames_ret.region;
... later ...
/* create the initial thread */
if (!create_initial_thread(
root_cnode_cap,
it_pd_cap,
v_entry,
bi_frame_vptr,
ipcbuf_vptr,
ipcbuf_cap
)) {
/* create all of the untypeds. Both devices and kernel window memory */
if (!create_untypeds(
root_cnode_cap,
(region_t) {
0xf0000000, (pptr_t)ki_boot_end
} /* reusable boot code/data */
)) {
return false;
}

View File

@ -10,3 +10,8 @@ mod virt_addr;
pub use asid::*;
pub use phys_addr::*;
pub use virt_addr::*;
// @todo Check largest VA supported, calculate physical_memory_offset
// @todo Keep in mind amount of physical memory present, the following
// @todo will only work for 1Gb board:
pub const PHYSICAL_MEMORY_OFFSET: u64 = 0xffff_8000_0000_0000; // Last 1GiB of VA space

View File

@ -4,7 +4,10 @@
*/
use {
crate::mm::{align_down, align_up},
crate::{
memory::VirtAddr,
mm::{align_down, align_up},
},
bit_field::BitField,
core::{
convert::{From, Into},
@ -75,7 +78,7 @@ impl PhysAddr {
/// See the `align_up` function for more information.
pub fn aligned_up<U>(self, align: U) -> Self
where
U: Into<u64>,
U: Into<usize>,
{
PhysAddr(align_up(self.0, align.into()))
}
@ -85,7 +88,7 @@ impl PhysAddr {
/// See the `align_down` function for more information.
pub fn aligned_down<U>(self, align: U) -> Self
where
U: Into<u64>,
U: Into<usize>,
{
PhysAddr(align_down(self.0, align.into()))
}
@ -93,10 +96,17 @@ impl PhysAddr {
/// Checks whether the physical address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
U: Into<usize>,
{
self.aligned_down(align) == self
}
/// Convert physical memory address into a kernel virtual address.
pub fn user_to_kernel(&self) -> VirtAddr {
use super::PHYSICAL_MEMORY_OFFSET;
assert!(self.0 < !PHYSICAL_MEMORY_OFFSET); // Can't have phys address over 1GiB then
VirtAddr::new(self.0 + PHYSICAL_MEMORY_OFFSET)
}
}
impl fmt::Debug for PhysAddr {

View File

@ -4,7 +4,10 @@
*/
use {
crate::mm::{align_down, align_up},
crate::{
memory::PhysAddr,
mm::{align_down, align_up},
},
bit_field::BitField,
core::{
convert::{From, Into, TryInto},
@ -34,7 +37,7 @@ pub struct VirtAddr(u64);
/// a valid sign extension and are not null either. So automatic sign extension would have
/// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid
/// address calculation.
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct VirtAddrNotValid(u64);
impl VirtAddr {
@ -110,7 +113,7 @@ impl VirtAddr {
/// See the `align_up` free function for more information.
pub fn aligned_up<U>(self, align: U) -> Self
where
U: Into<u64>,
U: Into<usize>,
{
VirtAddr(align_up(self.0, align.into()))
}
@ -120,7 +123,7 @@ impl VirtAddr {
/// See the `align_down` free function for more information.
pub fn aligned_down<U>(self, align: U) -> Self
where
U: Into<u64>,
U: Into<usize>,
{
VirtAddr(align_down(self.0, align.into()))
}
@ -128,7 +131,7 @@ impl VirtAddr {
/// Checks whether the virtual address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
U: Into<usize>,
{
self.aligned_down(align) == self
}
@ -158,6 +161,13 @@ impl VirtAddr {
pub fn l0_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9 >> 9) & 0o777).try_into().unwrap())
}
/// Convert kernel-space virtual address into a physical memory address.
pub fn kernel_to_user(&self) -> PhysAddr {
use super::PHYSICAL_MEMORY_OFFSET;
assert!(self.0 > PHYSICAL_MEMORY_OFFSET);
PhysAddr::new(self.0 - PHYSICAL_MEMORY_OFFSET)
}
}
impl fmt::Debug for VirtAddr {

View File

@ -0,0 +1,106 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
use super::{Frame, FrameAllocator};
use multiboot2::{MemoryArea, MemoryAreaIter}; // replace with DTB?
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame,
multiboot_start: Frame,
multiboot_end: Frame,
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(_area) = self.current_area {
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let frame = Frame {
number: self.next_free_frame.number,
};
// the last frame of the current area
let current_area_last_frame = Frame::containing_address(0x3f00_0000);
// {
// let address = area.base_addr + area.length - 1;
// Frame::containing_address(address as usize)
// };
if frame > current_area_last_frame {
// all frames of current area are used, switch to next area
// self.choose_next_area();
unimplemented!();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1,
};
} else if frame >= self.multiboot_start && frame <= self.multiboot_end {
// `frame` is used by the multiboot information structure
self.next_free_frame = Frame {
number: self.multiboot_end.number + 1,
};
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += 1;
return Some(frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frame()
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}
// Fixme: no multiboot, but dtb instead with avail memory regions
// Need dtb parser here!
impl AreaFrameAllocator {
pub fn new(
kernel_start: usize,
kernel_end: usize,
multiboot_start: usize,
multiboot_end: usize,
memory_areas: MemoryAreaIter,
) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(0),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
multiboot_start: Frame::containing_address(multiboot_start),
multiboot_end: Frame::containing_address(multiboot_end),
};
// allocator.choose_next_area();
allocator.next_free_frame = Frame::containing_address(0x100000); // start from 1Mb
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self
.areas
.clone()
.filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(address as usize) >= self.next_free_frame
})
.min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(area.base_addr as usize);
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}

View File

@ -0,0 +1,15 @@
/*
* SPDX-License-Identifier: BlueOak-1.0.0
*/
// Allocate regions from boot memory list obtained from devtree
pub struct BootRegionAllocator {}
impl BootRegionAllocator {
pub fn new(&boot_info: BootInfo) -> Self {
Self {}
}
pub fn alloc_region(&mut self) {}
pub fn alloc_zeroed(&mut self) {}
}

View File

@ -0,0 +1,126 @@
//! Print MMU suported features for debugging.
use {
crate::println,
cortex_a::regs::{RegisterReadOnly, RegisterReadWrite, ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1},
};
/// Parse the ID_AA64MMFR0_EL1 register for runtime information about supported MMU features.
/// Print the current state of TCR register.
pub fn print_features() {
let sctlr = SCTLR_EL1.extract();
if let Some(SCTLR_EL1::M::Value::Enable) = sctlr.read_as_enum(SCTLR_EL1::M) {
println!("[i] MMU currently enabled");
}
if let Some(SCTLR_EL1::I::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::I) {
println!("[i] MMU I-cache enabled");
}
if let Some(SCTLR_EL1::C::Value::Cacheable) = sctlr.read_as_enum(SCTLR_EL1::C) {
println!("[i] MMU D-cache enabled");
}
let mmfr = ID_AA64MMFR0_EL1.extract();
if let Some(ID_AA64MMFR0_EL1::TGran4::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran4)
{
println!("[i] MMU: 4 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran16::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran16)
{
println!("[i] MMU: 16 KiB granule supported!");
}
if let Some(ID_AA64MMFR0_EL1::TGran64::Value::Supported) =
mmfr.read_as_enum(ID_AA64MMFR0_EL1::TGran64)
{
println!("[i] MMU: 64 KiB granule supported!");
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::ASIDBits) {
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_16) => {
println!("[i] MMU: 16 bit ASIDs supported!")
}
Some(ID_AA64MMFR0_EL1::ASIDBits::Value::Bits_8) => {
println!("[i] MMU: 8 bit ASIDs supported!")
}
_ => println!("[i] MMU: Invalid ASID bits specified!"),
}
match mmfr.read_as_enum(ID_AA64MMFR0_EL1::PARange) {
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_32) => {
println!("[i] MMU: Up to 32 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_36) => {
println!("[i] MMU: Up to 36 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_40) => {
println!("[i] MMU: Up to 40 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_42) => {
println!("[i] MMU: Up to 42 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => {
println!("[i] MMU: Up to 44 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => {
println!("[i] MMU: Up to 48 Bit physical address range supported!")
}
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => {
println!("[i] MMU: Up to 52 Bit physical address range supported!")
}
_ => println!("[i] MMU: Invalid PARange specified!"),
}
let tcr = TCR_EL1.extract();
match tcr.read_as_enum(TCR_EL1::IPS) {
Some(TCR_EL1::IPS::Value::Bits_32) => {
println!("[i] MMU: 32 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_36) => {
println!("[i] MMU: 36 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_40) => {
println!("[i] MMU: 40 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_42) => {
println!("[i] MMU: 42 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_44) => {
println!("[i] MMU: 44 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_48) => {
println!("[i] MMU: 48 Bit intermediate physical address size supported!")
}
Some(TCR_EL1::IPS::Value::Bits_52) => {
println!("[i] MMU: 52 Bit intermediate physical address size supported!")
}
_ => println!("[i] MMU: Invalid IPS specified!"),
}
match tcr.read_as_enum(TCR_EL1::TG0) {
Some(TCR_EL1::TG0::Value::KiB_4) => println!("[i] MMU: TTBR0 4 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_16) => println!("[i] MMU: TTBR0 16 KiB granule active!"),
Some(TCR_EL1::TG0::Value::KiB_64) => println!("[i] MMU: TTBR0 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR0 granule size specified!"),
}
let t0sz = tcr.read(TCR_EL1::T0SZ);
println!("[i] MMU: T0sz = 64-{} = {} bits", t0sz, 64 - t0sz);
match tcr.read_as_enum(TCR_EL1::TG1) {
Some(TCR_EL1::TG1::Value::KiB_4) => println!("[i] MMU: TTBR1 4 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_16) => println!("[i] MMU: TTBR1 16 KiB granule active!"),
Some(TCR_EL1::TG1::Value::KiB_64) => println!("[i] MMU: TTBR1 64 KiB granule active!"),
_ => println!("[i] MMU: Invalid TTBR1 granule size specified!"),
}
let t1sz = tcr.read(TCR_EL1::T1SZ);
println!("[i] MMU: T1sz = 64-{} = {} bits", t1sz, 64 - t1sz);
}

View File

@ -0,0 +1,586 @@
use {
crate::{
arch::aarch64::memory::{
get_virt_addr_properties, AttributeFields, /*FrameAllocator, PhysAddr, VirtAddr,*/
},
println,
},
// bitflags::bitflags,
core::{
// convert::TryInto,
// fmt,
marker::PhantomData,
ops::{Index, IndexMut},
// ptr::Unique,
},
cortex_a::{
barrier,
regs::{ID_AA64MMFR0_EL1, SCTLR_EL1, TCR_EL1, TTBR0_EL1},
},
register::{
cpu::{RegisterReadOnly, RegisterReadWrite},
register_bitfields,
},
// ux::*,
};
mod mair {
use cortex_a::regs::MAIR_EL1;
/// Setup function for the MAIR_EL1 register.
pub fn set_up() {
use cortex_a::regs::RegisterReadWrite;
// Define the three memory types that we will map. Normal DRAM, Uncached and device.
MAIR_EL1.write(
// Attribute 2 -- Device Memory
MAIR_EL1::Attr2_Device::nonGathering_nonReordering_EarlyWriteAck
// Attribute 1 -- Non Cacheable DRAM
+ MAIR_EL1::Attr1_Normal_Outer::NonCacheable
+ MAIR_EL1::Attr1_Normal_Inner::NonCacheable
// Attribute 0 -- Regular Cacheable
+ MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc
+ MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc,
);
}
// Three descriptive consts for indexing into the correct MAIR_EL1 attributes.
pub mod attr {
pub const NORMAL: u64 = 0;
pub const NORMAL_NON_CACHEABLE: u64 = 1;
pub const DEVICE_NGNRE: u64 = 2;
// DEVICE_GRE
// DEVICE_NGNRNE
}
}
/// A function that maps the generic memory range attributes to HW-specific
/// attributes of the MMU.
fn into_mmu_attributes(
attribute_fields: AttributeFields,
) -> register::FieldValue<u64, STAGE1_DESCRIPTOR::Register> {
use super::{AccessPermissions, MemAttributes};
// Memory attributes
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL)
}
MemAttributes::NonCacheableDRAM => {
STAGE1_DESCRIPTOR::SH::InnerShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::NORMAL_NON_CACHEABLE)
}
MemAttributes::Device => {
STAGE1_DESCRIPTOR::SH::OuterShareable
+ STAGE1_DESCRIPTOR::AttrIndx.val(mair::attr::DEVICE_NGNRE)
}
};
// Access Permissions
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_DESCRIPTOR::AP::RW_EL1,
};
// Execute Never
desc += if attribute_fields.execute_never {
STAGE1_DESCRIPTOR::PXN::NeverExecute
} else {
STAGE1_DESCRIPTOR::PXN::Execute
};
desc
}
/// Type-safe enum wrapper covering Table<L>'s 64-bit entries.
#[derive(Clone)]
// #[repr(transparent)]
enum PageTableEntry {
/// Empty page table entry.
Invalid,
/// Table descriptor is a L0, L1 or L2 table pointing to another table.
/// L0 tables can only point to L1 tables.
/// A descriptor pointing to the next page table.
TableDescriptor(EntryFlags),
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
Lvl2BlockDescriptor(EntryFlags),
/// A page PageTableEntry::descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
PageDescriptor(EntryFlags),
}
/// A descriptor pointing to the next page table. (within PageTableEntry enum)
// struct TableDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_table_descriptor(next_lvl_table_addr: usize) -> Result<PageTableEntry, &'static str> {
if next_lvl_table_addr % Size4KiB::SIZE as usize != 0 {
// @todo SIZE must be usize
return Err("TableDescriptor: Address is not 4 KiB aligned.");
}
let shifted = next_lvl_table_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::TableDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
#[derive(Snafu, Debug)]
enum PageTableError {
#[snafu(display("BlockDescriptor: Address is not 2 MiB aligned."))]
//"PageDescriptor: Address is not 4 KiB aligned."
NotAligned(&'static str),
}
/// A Level2 block descriptor with 2 MiB aperture.
///
/// The output points to physical memory.
// struct Lvl2BlockDescriptor(register::FieldValue<u64, STAGE1_DESCRIPTOR::Register>);
impl PageTableEntry {
fn new_lvl2_block_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, PageTableError> {
if output_addr % Size2MiB::SIZE as usize != 0 {
return Err(PageTableError::NotAligned(Size2MiB::SIZE_AS_DEBUG_STR));
}
let shifted = output_addr >> Size2MiB::SHIFT;
Ok(PageTableEntry::Lvl2BlockDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::True
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(shifted as u64),
))
}
}
/// A page descriptor with 4 KiB aperture.
///
/// The output points to physical memory.
impl PageTableEntry {
fn new_page_descriptor(
output_addr: usize,
attribute_fields: AttributeFields,
) -> Result<PageTableEntry, PageTableError> {
if output_addr % Size4KiB::SIZE as usize != 0 {
return Err(PageTableError::NotAligned(Size4KiB::SIZE_AS_DEBUG_STR));
}
let shifted = output_addr >> Size4KiB::SHIFT;
Ok(PageTableEntry::PageDescriptor(
STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::AF::True
+ into_mmu_attributes(attribute_fields)
+ STAGE1_DESCRIPTOR::TYPE::Table
+ STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(shifted as u64),
))
}
}
impl From<u64> for PageTableEntry {
fn from(_val: u64) -> PageTableEntry {
// xxx0 -> Invalid
// xx11 -> TableDescriptor on L0, L1 and L2
// xx10 -> Block Entry L1 and L2
// xx11 -> PageDescriptor L3
PageTableEntry::Invalid
}
}
impl From<PageTableEntry> for u64 {
fn from(val: PageTableEntry) -> u64 {
match val {
PageTableEntry::Invalid => 0,
PageTableEntry::TableDescriptor(x)
| PageTableEntry::Lvl2BlockDescriptor(x)
| PageTableEntry::PageDescriptor(x) => x.value,
}
}
}
// to get L0 we must allocate a few frames from boot region allocator.
// So, first we init the dtb, parse mem-regions from there, then init boot_info page and start mmu,
// this part will be inited in mmu::init():
// // @todo do NOT keep these statically, always allocate from available bump memory
// static mut LVL2_TABLE: Table<PageDirectory> = Table::<PageDirectory> {
// entries: [0; NUM_ENTRIES_4KIB as usize],
// level: PhantomData,
// };
//
// // @todo do NOT keep these statically, always allocate from available bump memory
// static mut LVL3_TABLE: Table<PageTable> = Table::<PageTable> {
// entries: [0; NUM_ENTRIES_4KIB as usize],
// level: PhantomData,
// };
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
impl BaseAddr for [u64; 512] {
fn base_addr_u64(&self) -> u64 {
self as *const u64 as u64
}
fn base_addr_usize(&self) -> usize {
self as *const u64 as usize
}
}
/// Set up identity mapped page tables for the first 1 gigabyte of address space.
/// default: 880 MB ARM ram, 128MB VC
///
/// # Safety
///
/// Completely unsafe, we're in the hardware land! Incorrectly initialised tables will just
/// restart the CPU.
pub unsafe fn init() -> Result<(), &'static str> {
// Prepare the memory attribute indirection register.
mair::set_up();
// should receive in args an obtained memory map from DT
let memory_map = Regions {
start: 0x1000,
size: 0x10000,
};
// bump-allocate page tables for entire memory
// also allocate phys memory to kernel space!
//
// separate regions - regular memory, device mmaps,
// initial thread maps ALL the memory??
// instead
// init thread may map only necessary mem
// boot time only map kernel physmem space, and currently loaded kernel data
// PROBABLY only kernel mapping TTBR1 is needed, the rest is not very useful?
// take over protected memory space though anyway.
// Point the first 2 MiB of virtual addresses to the follow-up LVL3
// page-table.
// LVL2_TABLE.entries[0] =
// PageTableEntry::new_table_descriptor(LVL3_TABLE.entries.base_addr_usize())?.into();
// Fill the rest of the LVL2 (2 MiB) entries as block descriptors.
//
// Notice the skip(1) which makes the iteration start at the second 2 MiB
// block (0x20_0000).
for (block_descriptor_nr, entry) in LVL2_TABLE.entries.iter_mut().enumerate().skip(1) {
let virt_addr = block_descriptor_nr << Size2MiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let block_desc =
match PageTableEntry::new_lvl2_block_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = block_desc.into();
}
// Finally, fill the single LVL3 table (4 KiB granule).
for (page_descriptor_nr, entry) in LVL3_TABLE.entries.iter_mut().enumerate() {
let virt_addr = page_descriptor_nr << Size4KiB::SHIFT;
let (output_addr, attribute_fields) = match get_virt_addr_properties(virt_addr) {
Err(s) => return Err(s),
Ok((a, b)) => (a, b),
};
let page_desc = match PageTableEntry::new_page_descriptor(output_addr, attribute_fields) {
Err(s) => return Err(s),
Ok(desc) => desc,
};
*entry = page_desc.into();
}
}
// AArch64:
// Table D4-8-2021: check supported granule sizes, select alloc policy based on results.
// TTBR_ELx is the pdbr for specific page tables
// Page 2068 actual page descriptor formats
// Pointer to currently active page table
// Could be either user space (TTBR0) or kernel space (TTBR1) -- ??
pub struct ActivePageTable {
l0: Unique<Table<PageGlobalDirectory>>,
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
l0: Unique::new_unchecked(0 as *mut _),
}
}
fn l0(&self) -> &Table<PageGlobalDirectory> {
unsafe { self.l0.as_ref() }
}
fn l0_mut(&mut self) -> &mut Table<PageGlobalDirectory> {
unsafe { self.l0.as_mut() }
}
// pub fn translate(&self, virtual_address: VirtAddr) -> Result<PhysAddr, TranslationError> {
// let offset = virtual_address % Size4KiB::SIZE as usize; // @todo use the size of the last page of course
// self.translate_page(Page::containing_address(virtual_address))?
// .map(|frame| frame.start_address() + offset)
// }
fn translate_page(&self, page: Page) -> Result<PhysFrame, TranslationError> {
// @todo translate only one level of hierarchy per impl function...
let l1 = self.l0().next_table(u64::from(page.l0_index()) as usize);
/*
let huge_page = || {
l1.and_then(|l1| {
let l1_entry = &l1[page.l1_index() as usize];
// 1GiB page?
if let Some(start_frame) = l1_entry.pointed_frame() {
if l1_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
!= STAGE1_DESCRIPTOR::TYPE::Table.value
{
// address must be 1GiB aligned
//start_frame.is_aligned()
assert!(start_frame.number % (NUM_ENTRIES_4KIB * NUM_ENTRIES_4KIB) == 0);
return Ok(PhysFrame::from_start_address(
start_frame.number
+ page.l2_index() * NUM_ENTRIES_4KIB
+ page.l3_index(),
));
}
}
if let Some(l2) = l1.next_table(page.l1_index()) {
let l2_entry = &l2[page.l2_index()];
// 2MiB page?
if let Some(start_frame) = l2_entry.pointed_frame() {
if l2_entry.flags().read(STAGE1_DESCRIPTOR::TYPE)
!= STAGE1_DESCRIPTOR::TYPE::Table
{
// address must be 2MiB aligned
assert!(start_frame.number % NUM_ENTRIES_4KIB == 0);
return Ok(PhysFrame::from_start_address(
start_frame.number + page.l3_index(),
));
}
}
}
Err(TranslationError::NoPage)
})
};
*/
let v = l1
.and_then(|l1| l1.next_table(u64::from(page.l1_index()) as usize))
.and_then(|l2| l2.next_table(u64::from(page.l2_index()) as usize))
.and_then(|l3| Some(l3[u64::from(page.l3_index()) as usize])); //.pointed_frame())
// .ok_or(TranslationError::NoPage)
// .or_else(huge_page)
Ok(v.unwrap().into())
}
pub fn map_to<A>(&mut self, page: Page, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
let l0 = self.l0_mut();
let l1 = l0.next_table_create(u64::from(page.l0_index()) as usize, allocator);
let l2 = l1.next_table_create(u64::from(page.l1_index()) as usize, allocator);
let l3 = l2.next_table_create(u64::from(page.l2_index()) as usize, allocator);
assert_eq!(
l3[u64::from(page.l3_index()) as usize],
0 /*.is_unused()*/
);
l3[u64::from(page.l3_index()) as usize] = PageTableEntry::PageDescriptor(
STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ flags // @todo properly extract flags
+ STAGE1_DESCRIPTOR::VALID::True,
)
.into();
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
// @todo fail mapping if table is not allocated, causing client to allocate and restart
// @todo problems described in preso - chicken&egg problem of allocating first allocations
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
pub fn identity_map<A>(&mut self, frame: PhysFrame, flags: EntryFlags, allocator: &mut A)
where
A: FrameAllocator,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
}
fn unmap<A>(&mut self, page: Page, _allocator: &mut A)
where
A: FrameAllocator,
{
// use aarch64::instructions::tlb;
// use x86_64::VirtAddr;
assert!(self.translate(page.start_address()).is_ok());
let l3 = self
.l0_mut()
.next_table_mut(u64::from(page.l0_index()) as usize)
.and_then(|l1| l1.next_table_mut(u64::from(page.l1_index()) as usize))
.and_then(|l2| l2.next_table_mut(u64::from(page.l2_index()) as usize))
.expect("mapping code does not support huge pages");
let _frame = l3[u64::from(page.l3_index()) as usize];
// .pointed_frame()
// .unwrap();
l3[u64::from(page.l3_index()) as usize] = 0; /*.set_unused(); */
// tlb::flush(VirtAddr(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
// @todo do NOT deallocate frames either, but need to signal client that it's unused
}
}
// Abstractions for page table entries.
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Snafu, Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry has the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned. @todo
HugeFrame,
}
/// A 64-bit page table entry.
// pub struct PageTableEntry {
// entry: u64,
// }
const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
/*
impl PageTableEntry {
/// Creates an unused page table entry.
pub fn new() -> Self {
PageTableEntry::Invalid
}
/// Returns whether this entry is zero.
pub fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
pub fn flags(&self) -> EntryFlags {
EntryFlags::new(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & ADDR_MASK)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().read(STAGE1_DESCRIPTOR::VALID) {
Err(FrameError::FrameNotPresent)
// } else if self.flags().contains(EntryFlags::HUGE_PAGE) {
// Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical address with the specified flags.
pub fn set_addr(&mut self, addr: PhysAddr, flags: EntryFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = addr.as_u64() | flags.bits();
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: EntryFlags) {
// assert!(!flags.contains(EntryFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}
/// Sets the flags of this entry.
pub fn set_flags(&mut self, flags: EntryFlags) {
// Todo: extract ADDR from self and replace all flags completely (?)
self.entry = self.addr().as_u64() | flags.bits();
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.finish()
}
}*/
impl<Level> Table<Level>
where
Level: HierarchicalLevel,
{
pub fn next_table_create<Alloc>(
&mut self,
index: usize,
allocator: &mut Alloc,
) -> &mut Table<Level::NextLevel>
where
Alloc: FrameAllocator,
{
if self.next_table(index).is_none() {
assert!(
EntryRegister::new(self.entries[index]).read(STAGE1_DESCRIPTOR::TYPE)
== STAGE1_DESCRIPTOR::TYPE::Table.value,
"mapping code does not support huge pages"
);
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index] = PageTableEntry::TableDescriptor(
STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(u64::from(frame))
+ STAGE1_DESCRIPTOR::VALID::True,
)
.into();
// self.entries[index]
// .set_frame(frame, STAGE1_DESCRIPTOR::VALID::True /*| WRITABLE*/);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -11,11 +11,34 @@ use {
};
mod addr;
pub mod features; // @todo make only pub re-export?
pub mod mmu;
mod page_size;
mod phys_frame;
mod virt_page;
pub use mmu::*;
// mod area_frame_allocator;
// pub use self::area_frame_allocator::AreaFrameAllocator;
// mod boot_allocator; // Hands out physical memory obtained from devtree
// use self::paging::PAGE_SIZE;
pub use addr::PhysAddr;
pub use addr::VirtAddr;
pub use page_size::PageSize;
pub use phys_frame::PhysFrame;
/// @todo ??
pub trait FrameAllocator {
/// Allocate a physical memory frame.
fn allocate_frame(&mut self) -> Option<PhysFrame>; // @todo Result<>
/// Deallocate a physical frame.
fn deallocate_frame(&mut self, frame: PhysFrame);
}
// Identity-map things for now.
//
// aarch64 granules and page sizes howto:
// https://stackoverflow.com/questions/34269185/simultaneous-existence-of-different-sized-pages-on-aarch64
@ -142,6 +165,8 @@ pub use kernel_mem_range::*;
/// Contains only special ranges, aka anything that is _not_ normal cacheable
/// DRAM.
static KERNEL_VIRTUAL_LAYOUT: [Descriptor; 6] = [
// These are part of a static linked image and used for proper kernel-space initialization.
// i.e. these data are subtracted from the dtb-provided memory map.
Descriptor {
name: "Kernel stack",
virtual_range: || {
@ -242,6 +267,7 @@ static KERNEL_VIRTUAL_LAYOUT: [Descriptor; 6] = [
execute_never: true,
},
},
// @todo these should come from DTB and mem-map?
Descriptor {
name: "DMA heap pool",
virtual_range: || RangeInclusive::new(map::virt::DMA_HEAP_START, map::virt::DMA_HEAP_END),
@ -252,6 +278,7 @@ static KERNEL_VIRTUAL_LAYOUT: [Descriptor; 6] = [
execute_never: true,
},
},
// @todo these should come from DTB and mem-map?
Descriptor {
name: "Device MMIO",
virtual_range: || RangeInclusive::new(map::phys::VIDEOMEM_BASE, map::phys::MMIO_END),

View File

@ -0,0 +1,68 @@
/// Trait for abstracting over the possible page sizes, 4KiB, 16KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + PartialEq + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: usize;
/// A string representation of the page size for debug output.
const SIZE_AS_DEBUG_STR: &'static str;
/// The page shift in bits.
const SHIFT: usize;
/// The page mask in bits.
const MASK: u64;
}
/// This trait is implemented for 4KiB, 16KiB, and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {} // @todo doesn't have to be pub??
/// A standard 4KiB page.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
impl PageSize for Size4KiB {
const SIZE: usize = 4 * 1024;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
const SHIFT: usize = 12;
const MASK: u64 = 0xfff;
}
impl NotGiantPageSize for Size4KiB {}
/// A standard 16KiB page.
/// Currently unused.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size16KiB {}
impl PageSize for Size16KiB {
const SIZE: usize = 16 * 1024;
const SIZE_AS_DEBUG_STR: &'static str = "16KiB";
const SHIFT: usize = 14;
const MASK: u64 = 0x3fff;
}
impl NotGiantPageSize for Size16KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
impl PageSize for Size2MiB {
const SIZE: usize = 2 * 1024 * 1024;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
const SHIFT: usize = 21;
const MASK: u64 = 0x1f_ffff;
}
impl NotGiantPageSize for Size2MiB {}
/// A “giant” 1GiB page.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size1GiB {
const SIZE: usize = 1024 * 1024 * 1024;
const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
const SHIFT: usize = 30;
const MASK: u64 = 0x3fff_ffff;
}

View File

@ -0,0 +1,40 @@
@startuml
'https://plantuml.com/object-diagram
object GiantPage
object GiantPage_2
object LargePage
object Page
object Page_2
object Unmapped
map L1PageUpperDirectory_2 {
entry0 *--> GiantPage_2
entry1 *--> Unmapped
}
map L3PageTable {
entry0 *--> Page
entry1 *--> Page_2
}
map L2PageDirectory {
entry0 *-> L3PageTable
entry1 *--> LargePage
}
map L1PageUpperDirectory {
entry0 *-> L2PageDirectory
entry1 *--> GiantPage
}
map L0PageGlobalDirectory {
entry0 *-> L1PageUpperDirectory
entry1 *--> L1PageUpperDirectory_2
}
map VirtSpace {
root *-> L0PageGlobalDirectory
}
@enduml

View File

@ -0,0 +1,202 @@
// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/frame.rs
// Abstractions for default-sized and huge physical memory frames.
use {
crate::memory::{
page_size::{PageSize, Size4KiB},
PhysAddr,
},
core::{
fmt,
marker::PhantomData,
ops::{Add, AddAssign, Sub, SubAssign},
},
};
/// A physical memory frame.
/// Frame is an addressable unit of the physical address space.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct PhysFrame<S: PageSize = Size4KiB> {
start_address: PhysAddr,
size: PhantomData<S>,
}
impl<S: PageSize> From<u64> for PhysFrame<S> {
fn from(address: u64) -> PhysFrame<S> {
PhysFrame::containing_address(PhysAddr::new(address))
}
}
impl<S: PageSize> From<PhysFrame<S>> for u64 {
fn from(frame: PhysFrame<S>) -> u64 {
frame.start_address.as_u64()
}
}
impl<S: PageSize> PhysFrame<S> {
/// Returns the frame that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
pub fn from_start_address(address: PhysAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(PhysFrame::containing_address(address))
}
/// Returns the frame that contains the given physical address.
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.aligned_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the frame.
pub fn start_address(&self) -> PhysAddr {
self.start_address
}
/// Returns the size the frame (4KB, 2MB or 1GB).
pub fn size(&self) -> usize {
S::SIZE
}
/// Returns a range of frames, exclusive `end`.
pub fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}
/// Returns a range of frames, inclusive `end`.
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"PhysFrame[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for PhysFrame<S> {
type Output = Self;
/// Adds `rhs` same-sized frames to the current address.
fn add(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() + rhs * S::SIZE as u64)
}
}
impl<S: PageSize> AddAssign<u64> for PhysFrame<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for PhysFrame<S> {
type Output = Self;
/// Subtracts `rhs` same-sized frames from the current address.
// @todo should I sub pages or just bytes here?
fn sub(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() - rhs * S::SIZE as u64)
}
}
impl<S: PageSize> SubAssign<u64> for PhysFrame<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
type Output = usize;
/// Return number of frames between start and end addresses.
fn sub(self, rhs: PhysFrame<S>) -> Self::Output {
(self.start_address - rhs.start_address) as usize / S::SIZE
}
}
/// A range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}

View File

@ -0,0 +1,324 @@
// Verbatim from https://github.com/rust-osdev/x86_64/blob/aa9ae54657beb87c2a491f2ab2140b2332afa6ba/src/structures/paging/page.rs
// Abstractions for default-sized and huge virtual memory pages.
// @fixme x86_64 page level numbering: P4 -> P3 -> P2 -> P1
// @fixme armv8a page level numbering: L0 -> L1 -> L2 -> L3
#![allow(dead_code)]
use {
crate::memory::{
page_size::{NotGiantPageSize, PageSize, Size1GiB, Size2MiB, Size4KiB},
VirtAddr,
},
core::{
fmt,
marker::PhantomData,
ops::{Add, AddAssign, Sub, SubAssign},
},
ux::u9,
};
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page<S: PageSize = Size4KiB> {
start_address: VirtAddr,
size: PhantomData<S>,
}
pub enum Error {
NotAligned,
}
impl<S: PageSize> Page<S> {
/// The page size in bytes.
pub const SIZE: usize = S::SIZE;
/// Returns the page that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
pub fn from_start_address(address: VirtAddr) -> Result<Self, Error> {
if !address.is_aligned(S::SIZE) {
Err(Error::NotAligned)
} else {
Ok(Page::containing_address(address))
}
}
/// Returns the page that contains the given virtual address.
pub fn containing_address(address: VirtAddr) -> Self {
Page {
start_address: address.aligned_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the page.
pub fn start_address(&self) -> VirtAddr {
self.start_address
}
/// Returns the size the page (4KB, 2MB or 1GB).
pub const fn size(&self) -> usize {
S::SIZE
}
/// Returns the level 0 page table index of this page.
pub fn l0_index(&self) -> u9 {
self.start_address().l0_index()
}
/// Returns the level 1 page table index of this page.
pub fn l1_index(&self) -> u9 {
self.start_address().l1_index()
}
/// Returns a range of pages, exclusive `end`.
pub fn range(start: Self, end: Self) -> PageRange<S> {
PageRange { start, end }
}
/// Returns a range of pages, inclusive `end`.
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
}
impl<S: NotGiantPageSize> Page<S> {
/// Returns the level 2 page table index of this page.
pub fn l2_index(&self) -> u9 {
self.start_address().l2_index()
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
pub fn from_page_table_indices_1gib(l0_index: u9, l1_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
pub fn from_page_table_indices_2mib(l0_index: u9, l1_index: u9, l2_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
addr.set_bits(21..30, u64::from(l2_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
pub fn from_page_table_indices(l0_index: u9, l1_index: u9, l2_index: u9, l3_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(l0_index));
addr.set_bits(30..39, u64::from(l1_index));
addr.set_bits(21..30, u64::from(l2_index));
addr.set_bits(12..21, u64::from(l3_index));
Page::containing_address(VirtAddr::new(addr))
}
/// Returns the level 3 page table index of this page.
pub fn l3_index(&self) -> u9 {
self.start_address().l3_index()
}
}
impl<S: PageSize> fmt::Debug for Page<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"Page<{}>({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for Page<S> {
type Output = Self;
// @todo should I add pages or just bytes here?
fn add(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() + rhs * S::SIZE as u64)
}
}
impl<S: PageSize> AddAssign<u64> for Page<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for Page<S> {
type Output = Self;
/// Subtracts `rhs` same-sized pages from the current address.
// @todo should I sub pages or just bytes here?
fn sub(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() - rhs * S::SIZE as u64)
}
}
impl<S: PageSize> SubAssign<u64> for Page<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<Self> for Page<S> {
type Output = usize;
fn sub(self, rhs: Self) -> Self::Output {
(self.start_address - rhs.start_address) as usize / S::SIZE
}
}
/// A range of pages with exclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct PageRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, exclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRange<S> {
/// Returns whether this range contains no pages.
pub fn is_empty(&self) -> bool {
self.start >= self.end
}
pub fn num_pages(&self) -> usize {
(self.end - self.start) as usize / S::SIZE
}
}
impl<S: PageSize> Iterator for PageRange<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
// @todo what about range of 1GiB pages?
pub fn as_4kib_page_range(&self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
// @fixme end is calculated incorrectly, add test
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A range of pages with inclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct PageRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, inclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRangeInclusive<S> {
/// Returns whether this range contains no pages.
pub fn is_empty(&self) -> bool {
self.start > self.end
}
}
impl<S: PageSize> Iterator for PageRangeInclusive<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if !self.is_empty() {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PageRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test_case]
pub fn test_page_ranges() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(0xdeadbeaf);
let start: Page = Page::containing_address(start_addr);
let end = start.clone() + number;
let mut range = Page::range(start.clone(), end.clone());
for i in 0..number {
assert_eq!(
range.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range.next(), None);
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
#[test_case]
fn test_page_range_conversion() {
let page_size = Size2MiB::SIZE;
let number = 10;
let start_addr = VirtAddr::new(0xdeadbeaf);
let start: Page = Page::containing_address(start_addr);
let end = start.clone() + number;
let range = Page::range(start.clone(), end.clone()).as_4kib_page_range();
// 10 2MiB pages is 512 4KiB pages
aseert_eq!(range.num_pages(), 512);
}
}

132
nucleus/src/boot_info.rs Normal file
View File

@ -0,0 +1,132 @@
#![allow(dead_code)]
use crate::{memory::PhysAddr, println, sync};
#[derive(Default, Copy, Clone)]
struct BootInfoMemRegion {
pub start: PhysAddr,
pub end: PhysAddr,
}
impl BootInfoMemRegion {
pub const fn new() -> BootInfoMemRegion {
BootInfoMemRegion {
start: PhysAddr::zero(),
end: PhysAddr::zero(),
}
}
pub fn size(&self) -> u64 {
self.end - self.start
}
pub fn is_empty(&self) -> bool {
self.start == self.end
}
}
const NUM_MEM_REGIONS: usize = 16;
pub enum BootInfoError {
NoFreeMemRegions,
}
#[derive(Default)]
struct BootInfo {
pub regions: [BootInfoMemRegion; NUM_MEM_REGIONS],
pub max_slot_pos: usize,
}
impl BootInfo {
pub const fn new() -> BootInfo {
BootInfo {
regions: [BootInfoMemRegion::new(); NUM_MEM_REGIONS],
max_slot_pos: 0,
}
}
pub fn insert_region(&mut self, reg: BootInfoMemRegion) -> Result<(), BootInfoError> {
if reg.is_empty() {
return Ok(());
}
assert!(reg.start <= reg.end);
for region in self.regions.iter_mut() {
if region.is_empty() {
*region = reg;
return Ok(());
}
}
return Err(BootInfoError::NoFreeMemRegions);
}
pub fn alloc_region(&mut self, size_bits: usize) -> Result<PhysAddr, BootInfoError> {
let mut reg_index: usize = 0;
let mut reg: BootInfoMemRegion = BootInfoMemRegion::new();
let mut rem_small: BootInfoMemRegion = BootInfoMemRegion::new();
let mut rem_large: BootInfoMemRegion = BootInfoMemRegion::new();
/*
* Search for a free mem region that will be the best fit for an allocation. We favour allocations
* that are aligned to either end of the region. If an allocation must split a region we favour
* an unbalanced split. In both cases we attempt to use the smallest region possible. In general
* this means we aim to make the size of the smallest remaining region smaller (ideally zero)
* followed by making the size of the largest remaining region smaller.
*/
for (i, reg_iter) in self.regions.iter().enumerate() {
let mut new_reg: BootInfoMemRegion = BootInfoMemRegion::new();
/* Determine whether placing the region at the start or the end will create a bigger left over region */
if reg_iter.start.aligned_up(1usize << size_bits) - reg_iter.start
< reg_iter.end - reg_iter.end.aligned_down(1usize << size_bits)
{
new_reg.start = reg_iter.start.aligned_up(1usize << size_bits);
new_reg.end = new_reg.start + (1u64 << size_bits);
} else {
new_reg.end = reg_iter.end.aligned_down(1usize << size_bits);
new_reg.start = new_reg.end - (1u64 << size_bits);
}
if new_reg.end > new_reg.start
&& new_reg.start >= reg_iter.start
&& new_reg.end <= reg_iter.end
{
let mut new_rem_small: BootInfoMemRegion = BootInfoMemRegion::new();
let mut new_rem_large: BootInfoMemRegion = BootInfoMemRegion::new();
if new_reg.start - reg_iter.start < reg_iter.end - new_reg.end {
new_rem_small.start = reg_iter.start;
new_rem_small.end = new_reg.start;
new_rem_large.start = new_reg.end;
new_rem_large.end = reg_iter.end;
} else {
new_rem_large.start = reg_iter.start;
new_rem_large.end = new_reg.start;
new_rem_small.start = new_reg.end;
new_rem_small.end = reg_iter.end;
}
if reg.is_empty()
|| (new_rem_small.size() < rem_small.size())
|| (new_rem_small.size() == rem_small.size()
&& new_rem_large.size() < rem_large.size())
{
reg = new_reg;
rem_small = new_rem_small;
rem_large = new_rem_large;
reg_index = i;
}
}
}
if reg.is_empty() {
panic!("Kernel init failed: not enough memory\n");
}
/* Remove the region in question */
self.regions[reg_index] = BootInfoMemRegion::new();
/* Add the remaining regions in largest to smallest order */
self.insert_region(rem_large)?;
if self.insert_region(rem_small).is_err() {
println!("BootInfo::alloc_region(): wasted {} bytes due to alignment, try to increase NUM_MEM_REGIONS", rem_small.size());
}
Ok(reg.start)
}
}
#[link_section = ".data.boot"] // @todo put zero-initialized stuff to .bss.boot!
static BOOT_INFO: sync::NullLock<BootInfo> = sync::NullLock::new(BootInfo::new());

View File

@ -12,6 +12,7 @@
#![feature(asm)]
#![feature(global_asm)]
#![feature(decl_macro)]
#![feature(const_fn)]
#![feature(allocator_api)]
#![feature(ptr_internals)]
#![feature(format_args_nl)]
@ -32,6 +33,7 @@ use architecture_not_supported_sorry;
#[macro_use]
pub mod arch;
pub use arch::*;
mod boot_info;
mod devices;
mod macros;
mod mm;
@ -78,13 +80,13 @@ static DMA_ALLOCATOR: sync::NullLock<mm::BumpAllocator> =
));
fn print_mmu_state_and_features() {
memory::mmu::print_features();
memory::features::print_features();
}
fn init_mmu() {
unsafe {
memory::mmu::init().unwrap();
}
// unsafe {
// memory::mmu::init().unwrap();
// }
println!("[!] MMU initialised");
print_mmu_state_and_features();
}

View File

@ -10,18 +10,18 @@ pub use bump_allocator::BumpAllocator;
///
/// Returns the greatest x with alignment `align` so that x <= addr.
/// The alignment must be a power of 2.
pub fn align_down(addr: u64, align: u64) -> u64 {
pub fn align_down(addr: u64, align: usize) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
addr & !(align as u64 - 1)
}
/// Align address upwards.
///
/// Returns the smallest x with alignment `align` so that x >= addr.
/// The alignment must be a power of 2.
pub fn align_up(addr: u64, align: u64) -> u64 {
pub fn align_up(addr: u64, align: usize) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
let align_mask = align as u64 - 1;
if addr & align_mask == 0 {
addr // already aligned
} else {
@ -31,6 +31,7 @@ pub fn align_up(addr: u64, align: u64) -> u64 {
/// Calculate the next possible aligned address without sanity checking the
/// input parameters.
// u64 for return and addr?
#[inline]
fn aligned_addr_unchecked(addr: usize, alignment: usize) -> usize {
(addr + (alignment - 1)) & !(alignment - 1)

View File

@ -1,32 +1,32 @@
# Broadcom 2837 on Raspberry Pi 3 as JTAG target
# Broadcom bcm2837 on Raspberry Pi 3 as JTAG target
# From https://www.suse.com/c/debugging-raspberry-pi-3-with-jtag/
telnet_port 4444
gdb_port 5555
transport select jtag
# we need to enable srst even though we don't connect it
reset_config trst_and_srst
adapter_khz 4000
jtag_ntrst_delay 500
if { [info exists CHIPNAME] } {
set _CHIPNAME $CHIPNAME
} else {
set _CHIPNAME rpi3
set _CHIPNAME bcm2837
}
#
# Main DAP
#
if { [info exists DAP_TAPID] } {
set _DAP_TAPID $DAP_TAPID
} else {
set _DAP_TAPID 0x4ba00477
}
adapter speed 4000
transport select jtag
# we need to enable srst even though we don't connect it
reset_config trst_and_srst
jtag_ntrst_delay 500
telnet_port 4444
gdb_port 5555
#
# Main DAP
#
jtag newtap $_CHIPNAME tap -irlen 4 -ircapture 0x1 -irmask 0xf -expected-id $_DAP_TAPID -enable
dap create $_CHIPNAME.dap -chain-position $_CHIPNAME.tap
@ -35,17 +35,28 @@ set _CTINAME $_CHIPNAME.cti
set DBGBASE {0x80010000 0x80012000 0x80014000 0x80016000}
set CTIBASE {0x80018000 0x80019000 0x8001a000 0x8001b000}
set _cores 4
set _smp_command ""
for { set _core 0 } { $_core < $_cores } { incr _core } {
cti create $_CTINAME.$_core -dap $_CHIPNAME.dap -ap-num 0 \
-ctibase [lindex $CTIBASE $_core]
-baseaddr [lindex $CTIBASE $_core]
target create $_TARGETNAME.$_core aarch64 \
-dap $_CHIPNAME.dap -coreid $_core \
-dbgbase [lindex $DBGBASE $_core] -cti $_CTINAME.$_core
if {$_core != 0} {
set _smp_command "$_smp_command ${_TARGETNAME}.${_core}"
} else {
set _smp_command "target smp ${_TARGETNAME}.${_core}"
}
$_TARGETNAME.$_core configure -event reset-assert-post "aarch64 dbginit"
$_TARGETNAME.$_core configure -event gdb-attach { halt }
}
eval $_smp_command
targets $_TARGETNAME.0

62
ocd/rpi4_target.cfg Normal file
View File

@ -0,0 +1,62 @@
# Broadcom bcm2711 on Raspberry Pi 4 as JTAG target
# From https://gist.github.com/tnishinaga/46a3380e1f47f5e892bbb74e55b3cf3e
# See also https://xihan94.gitbook.io/raspberry-pi/raspberry-pi-4-bringup
if { [info exists CHIPNAME] } {
set _CHIPNAME $CHIPNAME
} else {
set _CHIPNAME bcm2711
}
if { [info exists DAP_TAPID] } {
set _DAP_TAPID $DAP_TAPID
} else {
set _DAP_TAPID 0x4ba00477
}
adapter speed 4000
transport select jtag
# we need to enable srst even though we don't connect it
reset_config trst_and_srst
jtag_ntrst_delay 500
telnet_port 4444
gdb_port 5555
#
# Main DAP
#
jtag newtap $_CHIPNAME tap -irlen 4 -expected-id $_DAP_TAPID
dap create $_CHIPNAME.dap -chain-position $_CHIPNAME.tap
set _TARGETNAME $_CHIPNAME.a72
set _CTINAME $_CHIPNAME.cti
set DBGBASE {0x80410000 0x80510000 0x80610000 0x80710000}
set CTIBASE {0x80420000 0x80520000 0x80620000 0x80720000}
set _cores 4
set _smp_command ""
for { set _core 0 } { $_core < $_cores } { incr _core } {
cti create $_CTINAME.$_core -dap $_CHIPNAME.dap -ap-num 0 \
-baseaddr [lindex $CTIBASE $_core]
target create ${_TARGETNAME}.${_core} aarch64 \
-dap ${_CHIPNAME}.dap -coreid $_core \
-dbgbase [lindex $DBGBASE $_core] -cti ${_CTINAME}.${_core}
if {$_core != 0} {
set _smp_command "$_smp_command ${_TARGETNAME}.${_core}"
} else {
set _smp_command "target smp ${_TARGETNAME}.${_core}"
}
$_TARGETNAME.$_core configure -event reset-assert-post "aarch64 dbginit"
$_TARGETNAME.$_core configure -event gdb-attach { halt }
}
eval $_smp_command
targets $_TARGETNAME.0