Commit 94f1a6f7 authored by Stefan Lankes's avatar Stefan Lankes
Browse files

revise bootloader, copy temporary kernel at the end of the module

- afterwards, we copy it back to the suggest virtual address
- unneeded, because we have already position independent code
parent e54742a0
......@@ -49,12 +49,12 @@ pub fn output_message_byte(byte: u8) {
COM1.write_byte(byte);
}
pub unsafe fn find_kernel() -> usize {
pub unsafe fn find_kernel() -> (usize, usize) {
// Identity-map the Multiboot information.
assert!(mb_info > 0, "Could not find Multiboot information");
loaderlog!("Found Multiboot information at 0x{:x}", mb_info);
let page_address = align_down!(mb_info, BasePageSize::SIZE);
paging::map::<BasePageSize>(page_address, page_address, 1, PageTableEntryFlags::empty());
paging::map::<BasePageSize>(page_address, page_address, 1, PageTableEntryFlags::WRITABLE);
// Load the Multiboot information and identity-map the modules information.
let multiboot = Multiboot::new(mb_info as u64, paddr_to_slice).unwrap();
......@@ -106,66 +106,37 @@ pub unsafe fn find_kernel() -> usize {
assert!(start_address > 0);
loaderlog!("Found an ELF module at 0x{:x}", start_address);
let page_address = align_down!(start_address, BasePageSize::SIZE);
paging::map::<BasePageSize>(page_address, page_address, 1, PageTableEntryFlags::empty());
start_address
}
pub unsafe fn move_kernel(
physical_address: usize,
virtual_address: usize,
mem_size: usize,
file_size: usize,
) -> usize {
// We want to move the application to realize a identify mapping
let page_count = align_up!(mem_size, LargePageSize::SIZE) / LargePageSize::SIZE;
loaderlog!("Use {} large pages for the application.", page_count);
paging::map::<LargePageSize>(
virtual_address,
virtual_address,
page_count,
PageTableEntryFlags::WRITABLE,
let counter =
(align_up!(start_address, LargePageSize::SIZE) - page_address) / BasePageSize::SIZE;
paging::map::<BasePageSize>(
page_address,
page_address,
counter,
PageTableEntryFlags::empty(),
);
for i in (0..align_up!(file_size, BasePageSize::SIZE) / BasePageSize::SIZE).rev() {
let tmp = 0x2000;
paging::map::<BasePageSize>(
tmp,
align_down!(physical_address, BasePageSize::SIZE) + i * BasePageSize::SIZE,
1,
PageTableEntryFlags::WRITABLE,
);
for j in 0..BasePageSize::SIZE {
*((virtual_address + i * BasePageSize::SIZE + j) as *mut u8) =
*((tmp + j) as *const u8);
}
// map also the rest of the module
let address = align_up!(start_address, LargePageSize::SIZE);
let counter = (align_up!(end_address, LargePageSize::SIZE) - address) / LargePageSize::SIZE;
if counter > 0 {
paging::map::<LargePageSize>(address, address, counter, PageTableEntryFlags::WRITABLE);
}
// clear rest of the kernel
let start = file_size;
let end = mem_size;
loaderlog!(
"Clear BSS from 0x{:x} to 0x{:x}",
virtual_address + start,
virtual_address + end
);
for i in start..end {
*((virtual_address + i) as *mut u8) = 0;
}
(start_address, end_address)
}
virtual_address
pub unsafe fn map_memory(address: usize, memory_size: usize) -> usize {
let address = align_up!(address, LargePageSize::SIZE);
let page_count = align_up!(memory_size, LargePageSize::SIZE) / LargePageSize::SIZE;
paging::map::<LargePageSize>(address, address, page_count, PageTableEntryFlags::WRITABLE);
address
}
pub unsafe fn boot_kernel(
new_physical_address: usize,
virtual_address: usize,
mem_size: usize,
entry_point: usize,
) {
pub unsafe fn boot_kernel(virtual_address: usize, mem_size: usize, entry_point: usize) -> ! {
// Supply the parameters to the HermitCore application.
BOOT_INFO.base = new_physical_address as u64;
BOOT_INFO.base = virtual_address as u64;
BOOT_INFO.image_size = mem_size as u64;
BOOT_INFO.mb_info = mb_info as u64;
BOOT_INFO.current_stack_address = (virtual_address - KERNEL_STACK_SIZE) as u64;
......@@ -199,5 +170,10 @@ pub unsafe fn boot_kernel(
"Jumping to HermitCore Application Entry Point at 0x{:x}",
entry_point
);
llvm_asm!("jmp *$0" :: "r"(entry_point), "{rdi}"(&BOOT_INFO as *const _ as u64) : "memory" : "volatile");
let func: extern "C" fn(boot_info: &'static mut BootInfo) -> ! =
core::mem::transmute(entry_point);
func(&mut BOOT_INFO);
// we never reach this point
}
......@@ -38,8 +38,9 @@ mod runtime_glue;
// IMPORTS
use crate::arch::paging::{BasePageSize, LargePageSize, PageSize};
use crate::arch::{BOOT_INFO, ELF_ARCH};
use crate::arch::{map_memory, BOOT_INFO, ELF_ARCH};
use crate::elf::*;
use core::intrinsics::{copy_nonoverlapping, write_bytes};
use core::ptr;
extern "C" {
......@@ -57,6 +58,46 @@ pub unsafe fn sections_init() {
);
}
pub unsafe fn load_kernel(header_start: usize, start_address: usize, mem_size: usize) -> usize {
let header = &*(header_start as *const ElfHeader);
assert!(header.ident.magic == ELF_MAGIC);
assert!(header.ident._class == ELF_CLASS_64);
assert!(header.ident.data == ELF_DATA_2LSB);
assert!(header.ident.pad[0] == ELF_PAD_HERMIT);
assert!(header.ty == ELF_ET_EXEC);
assert!(header.machine == ELF_ARCH);
let address = map_memory(start_address, mem_size);
loaderlog!("Load HermitCore Application as 0x{:x}", address);
let mut virtual_address = 0;
for i in 0..header.ph_entry_count {
let program_header = &*((header_start
+ header.ph_offset
+ (i * header.ph_entry_size) as usize) as *const ElfProgramHeader);
if program_header.ty == ELF_PT_LOAD {
if virtual_address == 0 {
virtual_address = program_header.virt_addr;
}
let pos = program_header.virt_addr - virtual_address;
copy_nonoverlapping(
(header_start + program_header.offset) as *const u8,
(address + pos) as *mut u8,
program_header.file_size,
);
write_bytes(
(address + pos + program_header.file_size) as *mut u8,
0,
program_header.mem_size - program_header.file_size,
);
}
}
address
}
pub unsafe fn check_kernel_elf_file(start_address: usize) -> (usize, usize, usize, usize, usize) {
// Verify that this module is a HermitCore ELF executable.
let header = &*(start_address as *const ElfHeader);
......
......@@ -12,22 +12,36 @@
extern crate rusty_loader;
use core::intrinsics::copy_nonoverlapping;
use rusty_loader::arch;
use rusty_loader::*;
/// Entry Point of the HermitCore Loader
/// (called from entry.asm or entry.S)
#[no_mangle]
pub unsafe extern "C" fn loader_main() {
pub unsafe extern "C" fn loader_main() -> ! {
sections_init();
arch::message_output_init();
loaderlog!("Started");
let start_address = arch::find_kernel();
let (physical_address, virtual_address, file_size, mem_size, entry_point) =
let (start_address, end_address) = arch::find_kernel();
let (_physical_address, virtual_address, _file_size, mem_size, entry_point) =
check_kernel_elf_file(start_address);
let new_physical_address =
arch::move_kernel(physical_address, virtual_address, mem_size, file_size);
arch::boot_kernel(new_physical_address, virtual_address, mem_size, entry_point);
let kernel_location = load_kernel(start_address, end_address, mem_size);
// move kernel to the virtual address
// TODO: if we have position independent code => moving isn't required
loaderlog!(
"Move kernel form 0x{:x} to 0x{:x}",
kernel_location,
virtual_address
);
copy_nonoverlapping(
kernel_location as *const u8,
virtual_address as *mut u8,
mem_size,
);
arch::boot_kernel(virtual_address, mem_size, entry_point);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment