diff --git a/src/arch/aarch64/memory/paging/mod.rs b/src/arch/aarch64/memory/paging/mod.rs
index 0c4566e..883fed8 100644
--- a/src/arch/aarch64/memory/paging/mod.rs
+++ b/src/arch/aarch64/memory/paging/mod.rs
@@ -80,3 +80,117 @@ impl Page {
pub struct ActivePageTable {
l0: Unique
>,
}
+
+impl ActivePageTable {
+ pub unsafe fn new() -> ActivePageTable {
+ ActivePageTable {
+ l0: Unique::new_unchecked(table::L0),
+ }
+ }
+
+ fn l0(&self) -> &Table {
+ unsafe { self.l0.as_ref() }
+ }
+
+ fn l0_mut(&mut self) -> &mut Table {
+ unsafe { self.l0.as_mut() }
+ }
+
+ pub fn translate(&self, virtual_address: VirtualAddress) -> Option {
+ let offset = virtual_address % PAGE_SIZE;
+ self.translate_page(Page::containing_address(virtual_address))
+ .map(|frame| frame.number * PAGE_SIZE + offset)
+ }
+
+ fn translate_page(&self, page: Page) -> Option {
+ use self::entry::EntryFlags;
+
+ let l1 = self.l0().next_table(page.l0_index());
+
+ let huge_page = || {
+ l1.and_then(|l1| {
+ let l1_entry = &l1[page.l1_index()];
+ // 1GiB page?
+ if let Some(start_frame) = l1_entry.pointed_frame() {
+ if !l1_entry.flags().contains(EntryFlags::TABLE) {
+ // address must be 1GiB aligned
+ assert!(start_frame.number % (ENTRY_COUNT * ENTRY_COUNT) == 0);
+ return Some(Frame {
+ number: start_frame.number + page.l2_index() * ENTRY_COUNT
+ + page.l3_index(),
+ });
+ }
+ }
+ if let Some(l2) = l1.next_table(page.l1_index()) {
+ let l2_entry = &l2[page.l2_index()];
+ // 2MiB page?
+ if let Some(start_frame) = l2_entry.pointed_frame() {
+ if !l2_entry.flags().contains(EntryFlags::TABLE) {
+ // address must be 2MiB aligned
+ assert!(start_frame.number % ENTRY_COUNT == 0);
+ return Some(Frame {
+ number: start_frame.number + page.l3_index(),
+ });
+ }
+ }
+ }
+ None
+ })
+ };
+
+ l1.and_then(|l1| l1.next_table(page.l1_index()))
+ .and_then(|l2| l2.next_table(page.l2_index()))
+ .and_then(|l3| l3[page.l3_index()].pointed_frame())
+ .or_else(huge_page)
+ }
+
+ pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let l0 = self.l0_mut();
+ let mut l1 = l0.next_table_create(page.l0_index(), allocator);
+ let mut l2 = l1.next_table_create(page.l1_index(), allocator);
+ let mut l3 = l2.next_table_create(page.l2_index(), allocator);
+
+ assert!(l3[page.l3_index()].is_unused());
+ l3[page.l3_index()].set(frame, flags | EntryFlags::VALID);
+ }
+
+ pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let frame = allocator.allocate_frame().expect("out of memory");
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ let page = Page::containing_address(frame.start_address());
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ fn unmap(&mut self, page: Page, allocator: &mut A)
+ where
+ A: FrameAllocator,
+ {
+ // use aarch64::instructions::tlb;
+ // use x86_64::VirtualAddress;
+
+ assert!(self.translate(page.start_address()).is_some());
+
+ let l3 = self.l0_mut()
+ .next_table_mut(page.l0_index())
+ .and_then(|l1| l1.next_table_mut(page.l1_index()))
+ .and_then(|l2| l2.next_table_mut(page.l2_index()))
+ .expect("mapping code does not support huge pages");
+ let frame = l3[page.l3_index()].pointed_frame().unwrap();
+ l3[page.l3_index()].set_unused();
+ // tlb::flush(VirtualAddress(page.start_address()));
+ // TODO free p(1,2,3) table if empty
+ //allocator.deallocate_frame(frame);
+ }
+}