Skip to content
Snippets Groups Projects
Commit 20c9d8e8 authored by Stefan Lankes's avatar Stefan Lankes
Browse files

add system calls to change the priority of a task

parent f71fe60b
No related branches found
No related tags found
No related merge requests found
......@@ -20,8 +20,8 @@ pub mod task;
static NO_TASKS: AtomicU32 = AtomicU32::new(0);
/// Map between Core ID and per-core scheduler
static mut SCHEDULERS: Vec<&PerCoreScheduler> = Vec::new();
/// Map between Task ID and Task Control Block
static TASKS: SpinlockIrqSave<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
/// Map between Task ID and TaskHandle + Queue of waiting tasks
static TASKS: SpinlockIrqSave<BTreeMap<TaskId, (TaskHandle, VecDeque<TaskHandle>)>> =
SpinlockIrqSave::new(BTreeMap::new());
/// Unique identifier for a core.
......@@ -95,7 +95,13 @@ impl PerCoreScheduler {
let wakeup = {
#[cfg(feature = "smp")]
let mut input_locked = get_scheduler(core_id).input.lock();
TASKS.lock().insert(tid, VecDeque::with_capacity(1));
TASKS.lock().insert(
tid,
(
TaskHandle::new(tid, prio, core_id),
VecDeque::with_capacity(1),
),
);
NO_TASKS.fetch_add(1, Ordering::SeqCst);
#[cfg(feature = "smp")]
......@@ -189,7 +195,13 @@ impl PerCoreScheduler {
let wakeup = {
#[cfg(feature = "smp")]
let mut input_locked = get_scheduler(core_id).input.lock();
TASKS.lock().insert(tid, VecDeque::with_capacity(1));
TASKS.lock().insert(
tid,
(
TaskHandle::new(tid, current_task_borrowed.prio, core_id),
VecDeque::with_capacity(1),
),
);
NO_TASKS.fetch_add(1, Ordering::SeqCst);
#[cfg(feature = "smp")]
if core_id != core_scheduler().core_id {
......@@ -313,6 +325,45 @@ impl PerCoreScheduler {
.as_u64();
}
pub fn set_current_task_priority(&mut self, prio: Priority) {
irqsave(|| {
info!("Change priority of the current task");
self.current_task.borrow_mut().prio = prio;
});
}
pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
info!("Chabge priority of task {} to priority {}", id, prio);
irqsave(|| {
let task = get_task_handle(id).ok_or(())?;
#[cfg(feature = "smp")]
if task.get_core_id() == self.core_id {
if self.current_task.borrow().id == task.get_id() {
self.current_task.borrow_mut().prio = prio;
} else {
self.ready_queue
.set_priority(task, prio)
.expect("Do not find valid task in ready queue");
}
} else {
warn!("Have to change the priority on another core");
}
#[cfg(not(feature = "smp"))]
if self.current_task.borrow().id == task.get_id() {
self.current_task.borrow_mut().prio = prio;
} else {
self.ready_queue
.set_priority(task, prio)
.expect("Do not find valid task in ready queue");
}
Ok(())
})
}
/// Save the FPU context for the current FPU owner and restore it for the current task,
/// which wants to use the FPU now.
pub fn fpu_switch(&mut self) {
......@@ -340,7 +391,7 @@ impl PerCoreScheduler {
debug!("Cleaning up task {}", borrowed.id);
// wakeup tasks, which are waiting for task with the identifier id
if let Some(mut queue) = TASKS.lock().remove(&borrowed.id) {
if let Some((_, mut queue)) = TASKS.lock().remove(&borrowed.id) {
while let Some(task) = queue.pop_front() {
result = true;
self.custom_wakeup(task);
......@@ -528,7 +579,13 @@ pub fn add_current_core() {
let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
// Add the ID -> Task mapping.
TASKS.lock().insert(tid, VecDeque::with_capacity(1));
TASKS.lock().insert(
tid,
(
TaskHandle::new(tid, IDLE_PRIO, core_id),
VecDeque::with_capacity(1),
),
);
// Initialize a scheduler for this core.
debug!(
"Initializing scheduler for core {} with idle task {}",
......@@ -580,7 +637,7 @@ pub fn join(id: TaskId) -> Result<(), ()> {
{
let mut guard = TASKS.lock();
match guard.get_mut(&id) {
Some(queue) => {
Some((_, queue)) => {
queue.push_back(core_scheduler.get_current_task_handle());
core_scheduler.block_current_task(None);
}
......@@ -595,3 +652,12 @@ pub fn join(id: TaskId) -> Result<(), ()> {
Ok(())
}
fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
let guard = TASKS.lock();
match guard.get(&id) {
Some((task, _)) => Some(*task),
_ => None,
}
}
......@@ -331,6 +331,55 @@ impl PriorityTaskQueue {
IDLE_PRIO
}
}
/// Change priority of specifc task
pub fn set_priority(&mut self, handle: TaskHandle, prio: Priority) -> Result<(), ()> {
let i = handle.get_priority().into() as usize;
let pos = self.queues[i].head.as_mut().ok_or(())?;
loop {
if handle.id == pos.borrow().id {
let task = pos.clone();
{
let mut borrow = task.borrow_mut();
let new = borrow.next.as_ref().cloned();
if let Some(prev) = borrow.prev.as_mut() {
prev.borrow_mut().next = new;
}
let new = borrow.prev.as_ref().cloned();
if let Some(next) = borrow.next.as_mut() {
next.borrow_mut().prev = new;
}
drop(pos);
if borrow.prev.as_mut().is_none() {
// Ok, the task is head of the list
self.queues[i].head = borrow.next.as_ref().cloned();
}
if borrow.next.as_mut().is_none() {
// Ok, the task is tail of the list
self.queues[i].tail = borrow.prev.as_ref().cloned();
}
borrow.prio = prio;
borrow.next = None;
borrow.prev = None;
}
self.push(task);
return Ok(());
}
let mut pos_borrowed = pos.borrow_mut();
let pos = pos_borrowed.next.as_mut().ok_or(())?;
}
}
}
/// A task control block, which identifies either a process or a thread
......
......@@ -31,6 +31,7 @@ pub extern "C" fn sys_getpid() -> Tid {
kernel_function!(__sys_getpid())
}
#[cfg(feature = "newlib")]
extern "C" fn __sys_getprio(id: *const Tid) -> i32 {
let task = core_scheduler().get_current_task_handle();
......@@ -41,11 +42,13 @@ extern "C" fn __sys_getprio(id: *const Tid) -> i32 {
}
}
#[cfg(feature = "newlib")]
#[no_mangle]
pub extern "C" fn sys_getprio(id: *const Tid) -> i32 {
kernel_function!(__sys_getprio(id))
}
#[cfg(feature = "newlib")]
#[no_mangle]
pub extern "C" fn sys_setprio(_id: *const Tid, _prio: i32) -> i32 {
-ENOSYS
......@@ -281,8 +284,9 @@ pub extern "C" fn sys_join(id: Tid) -> i32 {
kernel_function!(__sys_join(id))
}
/// Mapping between TaskID and TaskHandle
static TASKS: SpinlockIrqSave<BTreeMap<TaskId, TaskHandle>> = SpinlockIrqSave::new(BTreeMap::new());
/// Mapping between blocked tasks and their TaskHandle
static BLOCKED_TASKS: SpinlockIrqSave<BTreeMap<TaskId, TaskHandle>> =
SpinlockIrqSave::new(BTreeMap::new());
extern "C" fn __sys_block_current_task(timeout: &Option<u64>) {
let wakeup_time = timeout.map(|t| arch::processor::get_timer_ticks() + t * 1000);
......@@ -290,7 +294,7 @@ extern "C" fn __sys_block_current_task(timeout: &Option<u64>) {
let handle = core_scheduler.get_current_task_handle();
let tid = core_scheduler.get_current_task_id();
TASKS.lock().insert(tid, handle);
BLOCKED_TASKS.lock().insert(tid, handle);
core_scheduler.block_current_task(wakeup_time);
}
......@@ -309,7 +313,7 @@ pub extern "C" fn sys_block_current_task_with_timeout(timeout: u64) {
extern "C" fn __sys_wakeup_task(id: Tid) {
let task_id = TaskId::from(id);
if let Some(handle) = TASKS.lock().remove(&task_id) {
if let Some(handle) = BLOCKED_TASKS.lock().remove(&task_id) {
core_scheduler().custom_wakeup(handle);
}
}
......@@ -329,3 +333,33 @@ extern "C" fn __sys_get_priority() -> u8 {
pub extern "C" fn sys_get_priority() -> u8 {
kernel_function!(__sys_get_priority())
}
extern "C" fn __sys_set_priority(id: Tid, prio: u8) {
if prio > 0 {
core_scheduler()
.set_priority(TaskId::from(id), Priority::from(prio))
.expect("Unable to set priority");
} else {
panic!("Invalid priority {}", prio);
}
}
/// Set priority of the thread with the identifier `id`
#[no_mangle]
pub extern "C" fn sys_set_priority(id: Tid, prio: u8) {
kernel_function!(__sys_set_priority(id, prio))
}
extern "C" fn __sys_set_current_task_priority(prio: u8) {
if prio > 0 {
core_scheduler().set_current_task_priority(Priority::from(prio));
} else {
panic!("Invalid priority {}", prio);
}
}
/// Set priority of the current thread
#[no_mangle]
pub extern "C" fn sys_set_current_task_priority(prio: u8) {
kernel_function!(__sys_set_current_task_priority(prio))
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment