diff --git a/nucleus/src/arch/aarch64/memory/addr/phys_addr.rs b/nucleus/src/arch/aarch64/memory/addr/phys_addr.rs index fd9afd7..20776bd 100644 --- a/nucleus/src/arch/aarch64/memory/addr/phys_addr.rs +++ b/nucleus/src/arch/aarch64/memory/addr/phys_addr.rs @@ -78,7 +78,7 @@ impl PhysAddr { /// See the `align_up` function for more information. pub fn aligned_up(self, align: U) -> Self where - U: Into, + U: Into, { PhysAddr(align_up(self.0, align.into())) } @@ -88,7 +88,7 @@ impl PhysAddr { /// See the `align_down` function for more information. pub fn aligned_down(self, align: U) -> Self where - U: Into, + U: Into, { PhysAddr(align_down(self.0, align.into())) } @@ -96,7 +96,7 @@ impl PhysAddr { /// Checks whether the physical address has the demanded alignment. pub fn is_aligned(self, align: U) -> bool where - U: Into, + U: Into, { self.aligned_down(align) == self } diff --git a/nucleus/src/arch/aarch64/memory/addr/virt_addr.rs b/nucleus/src/arch/aarch64/memory/addr/virt_addr.rs index c07850a..0f180fc 100644 --- a/nucleus/src/arch/aarch64/memory/addr/virt_addr.rs +++ b/nucleus/src/arch/aarch64/memory/addr/virt_addr.rs @@ -113,7 +113,7 @@ impl VirtAddr { /// See the `align_up` free function for more information. pub fn aligned_up(self, align: U) -> Self where - U: Into, + U: Into, { VirtAddr(align_up(self.0, align.into())) } @@ -123,7 +123,7 @@ impl VirtAddr { /// See the `align_down` free function for more information. pub fn aligned_down(self, align: U) -> Self where - U: Into, + U: Into, { VirtAddr(align_down(self.0, align.into())) } @@ -131,7 +131,7 @@ impl VirtAddr { /// Checks whether the virtual address has the demanded alignment. pub fn is_aligned(self, align: U) -> bool where - U: Into, + U: Into, { self.aligned_down(align) == self } diff --git a/nucleus/src/arch/aarch64/memory/phys_frame.rs b/nucleus/src/arch/aarch64/memory/phys_frame.rs index b04e13a..f030cc9 100644 --- a/nucleus/src/arch/aarch64/memory/phys_frame.rs +++ b/nucleus/src/arch/aarch64/memory/phys_frame.rs @@ -48,7 +48,7 @@ impl PhysFrame { /// Returns the frame that contains the given physical address. pub fn containing_address(address: PhysAddr) -> Self { PhysFrame { - start_address: address.align_down(S::SIZE), + start_address: address.aligned_down(S::SIZE), size: PhantomData, } } @@ -86,8 +86,9 @@ impl fmt::Debug for PhysFrame { impl Add for PhysFrame { type Output = Self; + /// Adds `rhs` same-sized frames to the current address. fn add(self, rhs: u64) -> Self::Output { - PhysFrame::containing_address(self.start_address() + rhs * u64::from(S::SIZE)) + PhysFrame::containing_address(self.start_address() + rhs * S::SIZE as u64) } } @@ -99,8 +100,10 @@ impl AddAssign for PhysFrame { impl Sub for PhysFrame { type Output = Self; + /// Subtracts `rhs` same-sized frames from the current address. + // @todo should I sub pages or just bytes here? fn sub(self, rhs: u64) -> Self::Output { - PhysFrame::containing_address(self.start_address() - rhs * u64::from(S::SIZE)) + PhysFrame::containing_address(self.start_address() - rhs * S::SIZE as u64) } } @@ -111,13 +114,14 @@ impl SubAssign for PhysFrame { } impl Sub> for PhysFrame { - type Output = u64; + type Output = usize; + /// Return number of frames between start and end addresses. fn sub(self, rhs: PhysFrame) -> Self::Output { - (self.start_address - rhs.start_address) / S::SIZE + (self.start_address - rhs.start_address) as usize / S::SIZE } } -/// An range of physical memory frames, exclusive the upper bound. +/// A range of physical memory frames, exclusive the upper bound. #[derive(Clone, Copy, PartialEq, Eq)] #[repr(C)] pub struct PhysFrameRange { diff --git a/nucleus/src/arch/aarch64/memory/virt_page.rs b/nucleus/src/arch/aarch64/memory/virt_page.rs index 8cfc148..a8ad211 100644 --- a/nucleus/src/arch/aarch64/memory/virt_page.rs +++ b/nucleus/src/arch/aarch64/memory/virt_page.rs @@ -46,7 +46,7 @@ impl Page { /// Returns the page that contains the given virtual address. pub fn containing_address(address: VirtAddr) -> Self { Page { - start_address: address.align_down(S::SIZE), + start_address: address.aligned_down(S::SIZE), size: PhantomData, } } @@ -147,7 +147,7 @@ impl Add for Page { type Output = Self; // @todo should I add pages or just bytes here? fn add(self, rhs: u64) -> Self::Output { - Page::containing_address(self.start_address() + rhs * u64::from(S::SIZE)) + Page::containing_address(self.start_address() + rhs * S::SIZE as u64) } } @@ -159,9 +159,10 @@ impl AddAssign for Page { impl Sub for Page { type Output = Self; + /// Subtracts `rhs` same-sized pages from the current address. // @todo should I sub pages or just bytes here? fn sub(self, rhs: u64) -> Self::Output { - Page::containing_address(self.start_address() - rhs * u64::from(S::SIZE)) + Page::containing_address(self.start_address() - rhs * S::SIZE as u64) } } @@ -172,9 +173,9 @@ impl SubAssign for Page { } impl Sub for Page { - type Output = u64; + type Output = usize; fn sub(self, rhs: Self) -> Self::Output { - (self.start_address - rhs.start_address) / S::SIZE + (self.start_address - rhs.start_address) as usize / S::SIZE } } diff --git a/nucleus/src/boot_info.rs b/nucleus/src/boot_info.rs index 40b99f8..05e6594 100644 --- a/nucleus/src/boot_info.rs +++ b/nucleus/src/boot_info.rs @@ -73,13 +73,13 @@ impl BootInfo { let mut new_reg: BootInfoMemRegion = BootInfoMemRegion::new(); /* Determine whether placing the region at the start or the end will create a bigger left over region */ - if reg_iter.start.aligned_up(1u64 << size_bits) - reg_iter.start - < reg_iter.end - reg_iter.end.aligned_down(1u64 << size_bits) + if reg_iter.start.aligned_up(1usize << size_bits) - reg_iter.start + < reg_iter.end - reg_iter.end.aligned_down(1usize << size_bits) { - new_reg.start = reg_iter.start.aligned_up(1u64 << size_bits); + new_reg.start = reg_iter.start.aligned_up(1usize << size_bits); new_reg.end = new_reg.start + (1u64 << size_bits); } else { - new_reg.end = reg_iter.end.aligned_down(1u64 << size_bits); + new_reg.end = reg_iter.end.aligned_down(1usize << size_bits); new_reg.start = new_reg.end - (1u64 << size_bits); } if new_reg.end > new_reg.start diff --git a/nucleus/src/main.rs b/nucleus/src/main.rs index bee43b7..b463127 100644 --- a/nucleus/src/main.rs +++ b/nucleus/src/main.rs @@ -12,6 +12,7 @@ #![feature(asm)] #![feature(global_asm)] #![feature(decl_macro)] +#![feature(const_fn)] #![feature(allocator_api)] #![feature(ptr_internals)] #![feature(format_args_nl)] diff --git a/nucleus/src/mm/mod.rs b/nucleus/src/mm/mod.rs index c6899a5..4bc765a 100644 --- a/nucleus/src/mm/mod.rs +++ b/nucleus/src/mm/mod.rs @@ -10,18 +10,18 @@ pub use bump_allocator::BumpAllocator; /// /// Returns the greatest x with alignment `align` so that x <= addr. /// The alignment must be a power of 2. -pub fn align_down(addr: u64, align: u64) -> u64 { +pub fn align_down(addr: u64, align: usize) -> u64 { assert!(align.is_power_of_two(), "`align` must be a power of two"); - addr & !(align - 1) + addr & !(align as u64 - 1) } /// Align address upwards. /// /// Returns the smallest x with alignment `align` so that x >= addr. /// The alignment must be a power of 2. -pub fn align_up(addr: u64, align: u64) -> u64 { +pub fn align_up(addr: u64, align: usize) -> u64 { assert!(align.is_power_of_two(), "`align` must be a power of two"); - let align_mask = align - 1; + let align_mask = align as u64 - 1; if addr & align_mask == 0 { addr // already aligned } else { @@ -31,6 +31,7 @@ pub fn align_up(addr: u64, align: u64) -> u64 { /// Calculate the next possible aligned address without sanity checking the /// input parameters. +// u64 for return and addr? #[inline] fn aligned_addr_unchecked(addr: usize, alignment: usize) -> usize { (addr + (alignment - 1)) & !(alignment - 1)