Unverified Commit 353427d5 authored by jschwe's avatar jschwe Committed by GitHub
Browse files

Cleanup (#66)

* Fix some typos

* Fix linter warning: Function returns () instead of !

* Replace assert!(x == y) with assert_eq!(x,y)
parent 39310f71
......@@ -24,14 +24,15 @@
//! Architecture dependent interface to initialize a task
include!(concat!(env!("CARGO_TARGET_DIR"), "/config.rs"));
use alloc::rc::Rc;
use core::cell::RefCell;
use core::{mem, ptr};
use crate::arch::aarch64::kernel::percore::*;
use crate::arch::aarch64::kernel::processor;
use crate::scheduler::task::{Task, TaskFrame, TaskTLS};
use alloc::rc::Rc;
use core::cell::RefCell;
use core::{mem, ptr};
include!(concat!(env!("CARGO_TARGET_DIR"), "/config.rs"));
extern "C" {
static tls_start: u8;
......@@ -70,7 +71,7 @@ impl Drop for TaskStacks {
}
extern "C" fn leave_task() -> ! {
core_scheduler().exit(0);
core_scheduler().exit(0)
}
extern "C" fn task_entry(func: extern "C" fn(usize), arg: usize) {
......
......@@ -5,14 +5,15 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::marker::PhantomData;
use core::{fmt, ptr, usize};
use crate::arch::aarch64::kernel::percore::*;
use crate::arch::aarch64::kernel::processor;
use crate::arch::aarch64::mm::physicalmem;
use crate::arch::aarch64::mm::virtualmem;
use crate::mm;
use crate::scheduler;
use core::marker::PhantomData;
use core::{fmt, ptr, usize};
extern "C" {
#[linkage = "extern_weak"]
......@@ -135,8 +136,9 @@ impl PageTableEntry {
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT, INNER_SHAREABLE, and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
physical_address % BasePageSize::SIZE == 0,
assert_eq!(
physical_address % BasePageSize::SIZE,
0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
......@@ -371,7 +373,7 @@ impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
physical_address: usize,
flags: PageTableEntryFlags,
) {
assert!(L::LEVEL == S::MAP_LEVEL);
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
......@@ -387,7 +389,7 @@ impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// This is the default implementation called only for L3Table.
/// It is overridden by a specialized implementation for all tables with sub tables (all except L3Table).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
......
......@@ -41,8 +41,9 @@ pub fn init_page_tables() {}
pub fn allocate(size: usize) -> usize {
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......@@ -61,14 +62,16 @@ pub fn allocate(size: usize) -> usize {
pub fn allocate_aligned(size: usize, alignment: usize) -> usize {
assert!(size > 0);
assert!(alignment > 0);
assert!(
size % alignment == 0,
assert_eq!(
size % alignment,
0,
"Size {:#X} is not a multiple of the given alignment {:#X}",
size,
alignment
);
assert!(
alignment % BasePageSize::SIZE == 0,
assert_eq!(
alignment % BasePageSize::SIZE,
0,
"Alignment {:#X} is not a multiple of {:#X}",
alignment,
BasePageSize::SIZE
......@@ -97,8 +100,9 @@ pub fn deallocate(physical_address: usize, size: usize) {
physical_address
);
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......
......@@ -34,8 +34,9 @@ pub fn init() {
pub fn allocate(size: usize) -> usize {
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......@@ -62,15 +63,17 @@ pub fn deallocate(virtual_address: usize, size: usize) {
"Virtual address {:#X} is not < KERNEL_VIRTUAL_MEMORY_END",
virtual_address
);
assert!(
virtual_address % BasePageSize::SIZE == 0,
assert_eq!(
virtual_address % BasePageSize::SIZE,
0,
"Virtual address {:#X} is not a multiple of {:#X}",
virtual_address,
BasePageSize::SIZE
);
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......@@ -94,15 +97,17 @@ pub fn reserve(virtual_address: usize, size: usize) {
"Virtual address {:#X} is not < KERNEL_VIRTUAL_MEMORY_END",
virtual_address
);
assert!(
virtual_address % BasePageSize::SIZE == 0,
assert_eq!(
virtual_address % BasePageSize::SIZE,
0,
"Virtual address {:#X} is not a multiple of {:#X}",
virtual_address,
BasePageSize::SIZE
);
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......
......@@ -8,6 +8,8 @@
//! Architecture dependent interface to initialize a task
use core::{mem, ptr};
use crate::arch::x86_64::kernel::apic;
use crate::arch::x86_64::kernel::idt;
use crate::arch::x86_64::kernel::irq;
......@@ -17,7 +19,6 @@ use crate::config::*;
use crate::environment;
use crate::mm;
use crate::scheduler::task::{Task, TaskFrame};
use core::{mem, ptr};
#[repr(C, packed)]
struct State {
......@@ -331,7 +332,7 @@ extern "C" fn task_entry(func: extern "C" fn(usize), arg: usize) -> ! {
switch_to_kernel!();
// Exit task
core_scheduler().exit(0);
core_scheduler().exit(0)
}
impl TaskFrame for Task {
......
......@@ -41,8 +41,8 @@ unsafe fn pre_init(boot_info: &'static mut BootInfo) -> ! {
BOOT_INFO = boot_info as *mut BootInfo;
if boot_info.cpu_online == 0 {
boot_processor_main();
boot_processor_main()
} else {
application_processor_main();
application_processor_main()
}
}
......@@ -7,6 +7,13 @@
#![allow(dead_code)]
use core::marker::PhantomData;
use core::mem;
use core::ptr;
use multiboot::Multiboot;
use x86::controlregs;
use x86::irq::PageFaultError;
use crate::arch::x86_64::kernel::apic;
use crate::arch::x86_64::kernel::get_mbinfo;
use crate::arch::x86_64::kernel::irq;
......@@ -17,12 +24,6 @@ use crate::arch::x86_64::mm::physicalmem;
use crate::environment;
use crate::mm;
use crate::scheduler;
use core::marker::PhantomData;
use core::mem;
use core::ptr;
use multiboot::Multiboot;
use x86::controlregs;
use x86::irq::PageFaultError;
/// Uhyve's address of the initial GDT
const BOOT_GDT: usize = 0x1000;
......@@ -409,7 +410,7 @@ impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
......@@ -435,7 +436,7 @@ impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
assert_eq!(L::LEVEL, S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
......
......@@ -5,6 +5,9 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::sync::atomic::{AtomicUsize, Ordering};
use multiboot::{MemoryType, Multiboot};
use crate::arch::x86_64::kernel::{get_limit, get_mbinfo};
use crate::arch::x86_64::mm::paddr_to_slice;
use crate::arch::x86_64::mm::paging::{BasePageSize, PageSize};
......@@ -12,8 +15,6 @@ use crate::collections::Node;
use crate::mm;
use crate::mm::freelist::{FreeList, FreeListEntry};
use crate::synch::spinlock::*;
use core::sync::atomic::{AtomicUsize, Ordering};
use multiboot::{MemoryType, Multiboot};
static PHYSICAL_FREE_LIST: SpinlockIrqSave<FreeList> = SpinlockIrqSave::new(FreeList::new());
static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);
......@@ -87,8 +88,9 @@ pub fn total_memory_size() -> usize {
pub fn allocate(size: usize) -> Result<usize, ()> {
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......@@ -100,14 +102,16 @@ pub fn allocate(size: usize) -> Result<usize, ()> {
pub fn allocate_aligned(size: usize, alignment: usize) -> Result<usize, ()> {
assert!(size > 0);
assert!(alignment > 0);
assert!(
size % alignment == 0,
assert_eq!(
size % alignment,
0,
"Size {:#X} is not a multiple of the given alignment {:#X}",
size,
alignment
);
assert!(
alignment % BasePageSize::SIZE == 0,
assert_eq!(
alignment % BasePageSize::SIZE,
0,
"Alignment {:#X} is not a multiple of {:#X}",
alignment,
BasePageSize::SIZE
......@@ -125,8 +129,9 @@ pub fn deallocate(physical_address: usize, size: usize) {
physical_address
);
assert!(size > 0);
assert!(
size % BasePageSize::SIZE == 0,
assert_eq!(
size % BasePageSize::SIZE,
0,
"Size {:#X} is not a multiple of {:#X}",
size,
BasePageSize::SIZE
......
......@@ -130,7 +130,7 @@ mod test {
fn memcpy_and_memcmp_arrays() {
let (src, mut dst) = ([b'X'; 100], [b'Y'; 100]);
unsafe {
assert!(memcmp(src.as_ptr(), dst.as_ptr(), 100) != 0);
assert_ne!(memcmp(src.as_ptr(), dst.as_ptr(), 100), 0);
let _ = memcpy(dst.as_mut_ptr(), src.as_ptr(), 100);
assert_eq!(memcmp(src.as_ptr(), dst.as_ptr(), 100), 0);
}
......
......@@ -6,7 +6,11 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
pub mod task;
use alloc::boxed::Box;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::rc::Rc;
use core::cell::RefCell;
use core::sync::atomic::{AtomicU32, Ordering};
use crate::arch;
use crate::arch::irq;
......@@ -16,11 +20,8 @@ use crate::collections::AvoidInterrupts;
use crate::config::*;
use crate::scheduler::task::*;
use crate::synch::spinlock::*;
use alloc::boxed::Box;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::rc::Rc;
use core::cell::RefCell;
use core::sync::atomic::{AtomicU32, Ordering};
pub mod task;
/// Time slice of a task in microseconds.
/// When this time has elapsed and the scheduler is called, it may switch to another ready task.
......@@ -373,7 +374,7 @@ impl PerCoreScheduler {
}
/// Only the idle task should call this function to
/// reschdule the system. Set the idle task in halt
/// reschedule the system. Set the idle task in halt
/// state by leaving this function.
pub fn reschedule_and_wait(&mut self) {
irq::disable();
......
......@@ -8,11 +8,12 @@
// The implementation is inspired by Andrew D. Birrell's paper
// "Implementing Condition Variables with Semaphores"
use crate::synch::semaphore::Semaphore;
use alloc::boxed::Box;
use core::mem;
use core::sync::atomic::{AtomicIsize, Ordering};
use crate::synch::semaphore::Semaphore;
struct CondQueue {
counter: AtomicIsize,
sem1: Semaphore,
......@@ -32,7 +33,7 @@ impl CondQueue {
unsafe fn __sys_destroy_queue(ptr: usize) -> i32 {
let id = ptr as *mut usize;
if id.is_null() {
debug!("sys_wait: ivalid address to condition variable");
debug!("sys_wait: invalid address to condition variable");
return -1;
}
......@@ -91,7 +92,7 @@ pub unsafe fn sys_notify(ptr: usize, count: i32) -> i32 {
unsafe fn __sys_init_queue(ptr: usize) -> i32 {
let id = ptr as *mut usize;
if id.is_null() {
debug!("sys_init_queue: ivalid address to condition variable");
debug!("sys_init_queue: invalid address to condition variable");
return -1;
}
......
......@@ -7,24 +7,25 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
mod generic;
mod uhyve;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::{TryFrom, TryInto};
use core::fmt::Write;
use core::{isize, ptr, slice, str};
pub use self::generic::*;
pub use self::uhyve::*;
use crate::arch;
use crate::console;
use crate::environment;
use crate::errno::*;
use crate::synch::spinlock::SpinlockIrqSave;
use crate::syscalls::fs::{self, FilePerms, PosixFile, SeekWhence};
use crate::util;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::{TryFrom, TryInto};
use core::fmt::Write;
use core::{isize, ptr, slice, str};
use crate::syscalls::fs::{self, FilePerms, PosixFile, SeekWhence};
pub use self::generic::*;
pub use self::uhyve::*;
mod generic;
mod uhyve;
static DRIVER_LOCK: SpinlockIrqSave<()> = SpinlockIrqSave::new(());
......@@ -114,7 +115,7 @@ pub trait SyscallInterface: Send + Sync {
}
fn shutdown(&self, _arg: i32) -> ! {
arch::processor::shutdown();
arch::processor::shutdown()
}
fn get_mac_address(&self) -> Result<[u8; 6], ()> {
......
use core::convert::TryInto;
use core::isize;
#[cfg(feature = "newlib")]
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::{AtomicU32, Ordering};
// Copyright (c) 2018 Colin Finck, RWTH Aachen University
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
......@@ -15,11 +21,6 @@ use crate::scheduler;
use crate::scheduler::task::{Priority, TaskId};
use crate::syscalls;
use crate::syscalls::timer::timespec;
use core::convert::TryInto;
use core::isize;
#[cfg(feature = "newlib")]
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::{AtomicU32, Ordering};
#[cfg(feature = "newlib")]
pub type SignalHandler = extern "C" fn(i32);
......@@ -56,7 +57,7 @@ pub extern "C" fn sys_setprio(_id: *const Tid, _prio: i32) -> i32 {
fn __sys_exit(arg: i32) -> ! {
debug!("Exit program with error code {}!", arg);
syscalls::__sys_shutdown(arg);
syscalls::__sys_shutdown(arg)
}
#[no_mangle]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment