Skip to content
Snippets Groups Projects
Commit 7ce75981 authored by Martin Kröning's avatar Martin Kröning :crab:
Browse files

Reimplement synch::spinlock with lock_api

parent b4e94471
No related branches found
No related tags found
No related merge requests found
......@@ -212,6 +212,7 @@ dependencies = [
"hashbrown",
"hermit-entry",
"include-transformed",
"lock_api",
"log",
"multiboot",
"num",
......@@ -233,6 +234,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "955be5d0ca0465caf127165acb47964f911e2bc26073e865deb8be7189302faf"
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
......
......@@ -73,6 +73,7 @@ crossbeam-utils = { version = "0.8", default-features = false }
hashbrown = { version = "0.12", default-features = false }
hermit-entry = { version = "0.9", features = ["kernel"] }
include-transformed = { version = "0.2", optional = true }
lock_api = "0.4"
log = { version = "0.4", default-features = false }
num = { version = "0.4", default-features = false }
num-derive = { version = "0.3", optional = true }
......
......@@ -87,7 +87,7 @@ async fn network_run() {
#[inline]
pub(crate) fn network_poll() {
if let Ok(mut guard) = NIC.try_lock() {
if let Some(mut guard) = NIC.try_lock() {
if let NetworkState::Initialized(nic) = guard.deref_mut() {
let time = now();
nic.poll_common(time);
......
......@@ -710,7 +710,7 @@ impl BlockedTaskQueue {
let mut cursor = self.list.cursor_front_mut();
#[cfg(feature = "tcp")]
if let Ok(mut guard) = crate::net::NIC.try_lock() {
if let Some(mut guard) = crate::net::NIC.try_lock() {
if let crate::net::NetworkState::Initialized(nic) = guard.deref_mut() {
let time = crate::net::now();
nic.poll_common(time);
......
#![allow(dead_code)]
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::arch::irq;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::Sync;
use core::ops::{Deref, DerefMut, Drop};
use core::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::{Backoff, CachePadded};
use crossbeam_utils::Backoff;
use lock_api::{GuardSend, Mutex, MutexGuard, RawMutex};
/// This type provides a lock based on busy waiting to realize mutual exclusion of tasks.
///
/// # Description
///
/// This structure behaves a lot like a normal Mutex. There are some differences:
///
/// - By using busy waiting, it can be used outside the runtime.
/// - It is a so called ticket lock (<https://en.wikipedia.org/wiki/Ticket_lock>)
/// and completely fair.
///
/// The interface is derived from <https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html>.
///
/// # Simple examples
///
/// ```
/// let spinlock = synch::Spinlock::new(0);
///
/// // Modify the data
/// {
/// let mut data = spinlock.lock();
/// *data = 2;
/// }
///
/// // Read the data
/// let answer =
/// {
/// let data = spinlock.lock();
/// *data
/// };
///
/// assert_eq!(answer, 2);
/// ```
pub struct Spinlock<T: ?Sized> {
queue: CachePadded<AtomicUsize>,
dequeue: CachePadded<AtomicUsize>,
data: UnsafeCell<T>,
}
use crate::arch::irq;
/// A guard to which the protected data can be accessed
///
/// When the guard falls out of scope it will release the lock.
pub struct SpinlockGuard<'a, T: ?Sized> {
dequeue: &'a CachePadded<AtomicUsize>,
ticket: usize,
data: &'a mut T,
/// Based on `spin::mutex::TicketMutex`, but with backoff.
pub struct RawTicketMutex {
next_ticket: AtomicUsize,
next_serving: AtomicUsize,
}
// Same unsafe impls as `Spinlock`
unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
unsafe impl RawMutex for RawTicketMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self {
next_ticket: AtomicUsize::new(0),
next_serving: AtomicUsize::new(0),
};
impl<T> Spinlock<T> {
pub const fn new(user_data: T) -> Spinlock<T> {
Spinlock {
queue: CachePadded::new(AtomicUsize::new(0)),
dequeue: CachePadded::new(AtomicUsize::new(1)),
data: UnsafeCell::new(user_data),
}
}
type GuardMarker = GuardSend;
/// Consumes this mutex, returning the underlying data.
#[allow(dead_code)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let Spinlock { data, .. } = self;
data.into_inner()
}
}
#[inline]
fn lock(&self) {
let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed);
impl<T: ?Sized> Spinlock<T> {
pub fn lock(&self) -> SpinlockGuard<'_, T> {
let backoff = Backoff::new();
let ticket = self.queue.fetch_add(1, Ordering::Relaxed) + 1;
while self.dequeue.load(Ordering::Acquire) != ticket {
while self.next_serving.load(Ordering::Acquire) != ticket {
backoff.spin();
}
SpinlockGuard {
dequeue: &self.dequeue,
ticket,
data: unsafe { &mut *self.data.get() },
}
}
pub fn try_lock(&self) -> Result<SpinlockGuard<'_, T>, ()> {
self.queue
#[inline]
fn try_lock(&self) -> bool {
let ticket = self
.next_ticket
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
if self.dequeue.load(Ordering::Acquire) == ticket + 1 {
if self.next_serving.load(Ordering::Acquire) == ticket {
Some(ticket + 1)
} else {
None
}
})
.map(|ticket| SpinlockGuard {
dequeue: &self.dequeue,
ticket: ticket + 1,
data: unsafe { &mut *self.data.get() },
})
.map_err(|_| {})
}
}
});
impl<T: ?Sized + fmt::Debug> fmt::Debug for Spinlock<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "queue: {} ", self.queue.load(Ordering::Relaxed))?;
write!(f, "dequeue: {}", self.dequeue.load(Ordering::Relaxed))
ticket.is_ok()
}
}
impl<T: ?Sized + Default> Default for Spinlock<T> {
fn default() -> Spinlock<T> {
Spinlock::new(Default::default())
}
}
impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.data
}
}
impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.data
#[inline]
unsafe fn unlock(&self) {
self.next_serving.fetch_add(1, Ordering::Release);
}
}
impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
/// The dropping of the SpinlockGuard will release the lock it was created from.
fn drop(&mut self) {
self.dequeue.store(self.ticket + 1, Ordering::Release);
#[inline]
fn is_locked(&self) -> bool {
let ticket = self.next_ticket.load(Ordering::Relaxed);
self.next_serving.load(Ordering::Relaxed) != ticket
}
}
/// This type provides a lock based on busy waiting to realize mutual exclusion of tasks.
///
/// # Description
///
/// This structure behaves a lot like a normal Mutex. There are some differences:
///
/// - Interrupts save lock => Interrupts will be disabled
/// - By using busy waiting, it can be used outside the runtime.
/// - It is a so called ticket lock (<https://en.wikipedia.org/wiki/Ticket_lock>)
/// and completely fair.
///
/// The interface is derived from <https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html>.
///
/// # Simple examples
///
/// ```
/// let spinlock = synch::SpinlockIrqSave::new(0);
///
/// // Modify the data
/// {
/// let mut data = spinlock.lock();
/// *data = 2;
/// }
///
/// // Read the data
/// let answer =
/// {
/// let data = spinlock.lock();
/// *data
/// };
///
/// assert_eq!(answer, 2);
/// ```
pub struct SpinlockIrqSave<T: ?Sized> {
queue: CachePadded<AtomicUsize>,
dequeue: CachePadded<AtomicUsize>,
data: UnsafeCell<T>,
}
pub type Spinlock<T> = Mutex<RawTicketMutex, T>;
pub type SpinlockGuard<'a, T> = MutexGuard<'a, RawTicketMutex, T>;
/// A guard to which the protected data can be accessed
///
/// When the guard falls out of scope it will release the lock.
pub struct SpinlockIrqSaveGuard<'a, T: ?Sized> {
dequeue: &'a CachePadded<AtomicUsize>,
ticket: usize,
irq: bool,
data: &'a mut T,
/// An interrupt-safe mutex.
pub struct RawInterruptMutex<M> {
inner: M,
interrupts: AtomicBool,
}
// Same unsafe impls as `SoinlockIrqSave`
unsafe impl<T: ?Sized + Send> Sync for SpinlockIrqSave<T> {}
unsafe impl<T: ?Sized + Send> Send for SpinlockIrqSave<T> {}
unsafe impl<M: RawMutex> RawMutex for RawInterruptMutex<M> {
const INIT: Self = Self {
inner: M::INIT,
interrupts: AtomicBool::new(false),
};
impl<T> SpinlockIrqSave<T> {
pub const fn new(user_data: T) -> SpinlockIrqSave<T> {
SpinlockIrqSave {
queue: CachePadded::new(AtomicUsize::new(0)),
dequeue: CachePadded::new(AtomicUsize::new(1)),
data: UnsafeCell::new(user_data),
}
}
type GuardMarker = M::GuardMarker;
/// Consumes this mutex, returning the underlying data.
#[allow(dead_code)]
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let SpinlockIrqSave { data, .. } = self;
data.into_inner()
#[inline]
fn lock(&self) {
let interrupts = irq::nested_disable();
self.inner.lock();
self.interrupts.store(interrupts, Ordering::Relaxed);
}
}
impl<T: ?Sized> SpinlockIrqSave<T> {
pub fn try_lock(&self) -> Result<SpinlockIrqSaveGuard<'_, T>, ()> {
let irq = irq::nested_disable();
self.queue
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
if self.dequeue.load(Ordering::Acquire) == ticket + 1 {
Some(ticket + 1)
} else {
None
}
})
.map(|ticket| SpinlockIrqSaveGuard {
dequeue: &self.dequeue,
ticket: ticket + 1,
irq,
data: unsafe { &mut *self.data.get() },
})
.map_err(|_| irq::nested_enable(irq))
}
pub fn lock(&self) -> SpinlockIrqSaveGuard<'_, T> {
let irq = irq::nested_disable();
let backoff = Backoff::new();
let ticket = self.queue.fetch_add(1, Ordering::Relaxed) + 1;
while self.dequeue.load(Ordering::Acquire) != ticket {
backoff.spin();
}
SpinlockIrqSaveGuard {
dequeue: &self.dequeue,
ticket,
irq,
data: unsafe { &mut *self.data.get() },
#[inline]
fn try_lock(&self) -> bool {
let interrupts = irq::nested_disable();
let ok = self.inner.try_lock();
if !ok {
irq::nested_enable(interrupts);
}
ok
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinlockIrqSave<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "queue: {} ", self.queue.load(Ordering::Relaxed))?;
write!(f, "dequeue: {}", self.dequeue.load(Ordering::Relaxed))
}
}
impl<T: ?Sized + Default> Default for SpinlockIrqSave<T> {
fn default() -> SpinlockIrqSave<T> {
SpinlockIrqSave::new(Default::default())
}
}
impl<'a, T: ?Sized> Deref for SpinlockIrqSaveGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.data
#[inline]
unsafe fn unlock(&self) {
let interrupts = self.interrupts.swap(false, Ordering::Relaxed);
unsafe {
self.inner.unlock();
}
irq::nested_enable(interrupts);
}
}
impl<'a, T: ?Sized> DerefMut for SpinlockIrqSaveGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.data
#[inline]
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}
impl<'a, T: ?Sized> Drop for SpinlockIrqSaveGuard<'a, T> {
/// The dropping of the SpinlockGuard will release the lock it was created from.
fn drop(&mut self) {
self.dequeue.store(self.ticket + 1, Ordering::Release);
irq::nested_enable(self.irq);
}
}
type RawInterruptTicketMutex = RawInterruptMutex<RawTicketMutex>;
pub type SpinlockIrqSave<T> = Mutex<RawInterruptTicketMutex, T>;
pub type SpinlockIrqSaveGuard<'a, T> = MutexGuard<'a, RawInterruptTicketMutex, T>;
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment