Skip to content
Snippets Groups Projects
Commit 88e516d5 authored by Martin Kröning's avatar Martin Kröning :crab:
Browse files

Move map_heap to arch

parent a455cbfa
Branches
Tags
No related merge requests found
......@@ -592,6 +592,21 @@ pub fn map<S: PageSize>(
root_pagetable.map_pages(range, physical_address, flags);
}
pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) {
let flags = {
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
flags
};
let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize);
for virt_addr in virt_addrs {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap();
map::<S>(virt_addr, phys_addr, 1, flags);
}
}
pub fn unmap<S: PageSize>(virtual_address: VirtAddr, count: usize) {
trace!(
"Unmapping virtual address {:#X} ({} pages)",
......
......@@ -190,6 +190,21 @@ pub fn map<S: PageSize>(
apic::ipi_tlb_flush();
}
pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) {
let flags = {
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
flags
};
let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize);
for virt_addr in virt_addrs {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap();
map::<S>(virt_addr, phys_addr, 1, flags);
}
}
unsafe fn recursive_page_table() -> RecursivePageTable<'static> {
let level_4_table_addr = 0xFFFF_FFFF_FFFF_F000;
let level_4_table_ptr = ptr::from_exposed_addr_mut(level_4_table_addr);
......
......@@ -51,24 +51,10 @@ pub fn task_heap_end() -> VirtAddr {
unsafe { HEAP_END_ADDRESS }
}
fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) {
let flags = {
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
flags
};
let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize);
for virt_addr in virt_addrs {
let phys_addr =
arch::mm::physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap();
arch::mm::paging::map::<S>(virt_addr, phys_addr, 1, flags);
}
}
#[cfg(target_os = "none")]
pub fn init() {
use crate::arch::mm::paging;
// Calculate the start and end addresses of the 2 MiB page(s) that map the kernel.
unsafe {
KERNEL_START_ADDRESS = env::get_base_address().align_down_to_large_page();
......@@ -182,7 +168,7 @@ pub fn init() {
// try to map a huge page
let mut counter = if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
map_heap::<HugePageSize>(virt_addr, 1);
paging::map_heap::<HugePageSize>(virt_addr, 1);
HugePageSize::SIZE as usize
} else {
0
......@@ -190,13 +176,13 @@ pub fn init() {
if counter == 0 && has_2mib_pages {
// fall back to large pages
map_heap::<LargePageSize>(virt_addr, 1);
paging::map_heap::<LargePageSize>(virt_addr, 1);
counter = LargePageSize::SIZE as usize;
}
if counter == 0 {
// fall back to normal pages, but map at least the size of a large page
map_heap::<BasePageSize>(
paging::map_heap::<BasePageSize>(
virt_addr,
LargePageSize::SIZE as usize / BasePageSize::SIZE as usize,
);
......@@ -219,21 +205,21 @@ pub fn init() {
&& align_down!(map_addr.as_usize(), HugePageSize::SIZE as usize) == 0
{
let size = align_down!(map_size, HugePageSize::SIZE as usize);
map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize);
paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
}
if has_2mib_pages && map_size > LargePageSize::SIZE as usize {
let size = align_down!(map_size, LargePageSize::SIZE as usize);
map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize);
paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
}
if map_size > BasePageSize::SIZE as usize {
let size = align_down!(map_size, BasePageSize::SIZE as usize);
map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize);
paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment