maitake_sync/
spin.rs

1//! Synchronous spinning-based synchronization primitives.
2//!
3//! The synchronization primitives in `maitake-sync` are _asynchronous_. They
4//! are designed to be used with [`core::task`] and [`core::future`], and when
5//! it is necessary to wait for another task to complete some work for the
6//! current task to proceed, `maitake`'s synchronization primitives wait by
7//! *yielding* to the asynchronous task scheduler to allow another task to
8//! proceed.
9//!
10//! This module, on the other hand, provides _synchronous_ (or _blocking_)
11//! synchronization primitives. Rather than yielding to the runtime, these
12//! synchronization primitives will block the current CPU core (or thread, if
13//! running in an environment with threads) until they are woken by other cores.
14//! This is performed by *spinning*: issuing yield or pause instructions in a
15//! loop until some value changes. These synchronization primitives are, in some
16//! cases, necessary to implement the async synchronization primitives that form
17//! `maitake-sync`'s core APIs. They are also exposed publicly so they can be
18//! used in other projects, when a spinlock-based synchronization primitive is
19//! needed.
20//!
21//! This module provides the following APIs:
22//!
23//! - [`Spinlock`]: a synchronous [mutual exclusion] spinlock, which implements
24//!   the [`blocking::RawMutex`] and [`blocking::ScopedRawMutex`] traits.
25//! - [`RwSpinlock`]: a synchronous [reader-writer] spinlock, which implements
26//!   the [`blocking::RawRwLock`] trait.
27//! - [`InitOnce`]: a cell storing a [`MaybeUninit`](core::mem::MaybeUninit)
28//!   value which must be manually initialized prior to use.
29//! - [`Lazy`]: an [`InitOnce`] cell coupled with an initializer function. The
30//!   [`Lazy`] cell ensures the initializer is called to initialize the
31//!   value the first time it is accessed.
32//!
33//! [mutual exclusion lock]: https://en.wikipedia.org/wiki/Mutual_exclusion
34//! [reader-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
35pub mod once;
36
37pub use self::once::{InitOnce, Lazy};
38
39use crate::{
40    blocking,
41    loom::sync::atomic::{AtomicBool, AtomicUsize, Ordering::*},
42    util::{fmt, Backoff},
43};
44// This import is pulled out because we want to reference it in docs, and
45// importing `use crate::blocking; use blocking::{RawMutex, RawRwLock};` makes
46// `blocking` appear used even if it's not referenced directly, while
47// `use crate::blocking::{self, RawMutex, RawRwLock};` causes the `self` to
48// appear "unused" to rustc. Yes, this is stupid, but this workaround felt
49// better than `allow(unused_imports)`.
50use blocking::{RawMutex, RawRwLock};
51
52/// A spinlock-based [`RawMutex`] implementation.
53///
54/// This mutex will spin with an exponential backoff while waiting for the lock
55/// to become available.
56///
57/// This type implements the [`RawMutex`] and
58/// [`ScopedRawMutex`](mutex_traits::ScopedRawMutex) traits from the
59/// [`mutex-traits`] crate. This allows it to be used with the
60/// [`blocking::Mutex`] type when a spinlock-based mutex is needed.
61#[derive(Debug)]
62pub struct Spinlock {
63    locked: AtomicBool,
64}
65
66/// A spinlock-based [`RawRwLock`] implementation.
67///
68/// This type implements the [`blocking::RawRwLock`] trait. This allows it to be
69/// used with the [`blocking::RwLock`] type when a spinlock-based reader-writer
70/// lock is needed.
71pub struct RwSpinlock {
72    state: AtomicUsize,
73}
74
75// === impl RawSpinlock ===
76
77impl Spinlock {
78    loom_const_fn! {
79        /// Returns a new `Spinlock`, in the unlocked state.
80        pub fn new() -> Self {
81            Self { locked: AtomicBool::new(false) }
82        }
83    }
84
85    #[inline]
86    #[must_use]
87    fn is_locked(&self) -> bool {
88        self.locked.load(Relaxed)
89    }
90}
91
92impl Default for Spinlock {
93    fn default() -> Self {
94        Self::new()
95    }
96}
97
98unsafe impl RawMutex for Spinlock {
99    type GuardMarker = ();
100
101    #[cfg_attr(test, track_caller)]
102    fn lock(&self) {
103        let mut boff = Backoff::default();
104        while test_dbg!(self
105            .locked
106            .compare_exchange(false, true, Acquire, Acquire)
107            .is_err())
108        {
109            while test_dbg!(self.is_locked()) {
110                boff.spin();
111            }
112        }
113    }
114
115    #[cfg_attr(test, track_caller)]
116    #[inline]
117    fn try_lock(&self) -> bool {
118        test_dbg!(self
119            .locked
120            .compare_exchange(false, true, Acquire, Acquire)
121            .is_ok())
122    }
123
124    #[cfg_attr(test, track_caller)]
125    #[inline]
126    unsafe fn unlock(&self) {
127        test_dbg!(self.locked.store(false, Release));
128    }
129
130    #[inline]
131    fn is_locked(&self) -> bool {
132        Spinlock::is_locked(self)
133    }
134}
135
136#[cfg(not(loom))]
137impl blocking::ConstInit for Spinlock {
138    // As usual, clippy is totally wrong about this --- the whole point of this
139    // constant is to create a *new* spinlock every time.
140    #[allow(clippy::declare_interior_mutable_const)]
141    const INIT: Self = Spinlock::new();
142}
143
144const UNLOCKED: usize = 0;
145const WRITER: usize = 1 << 0;
146const READER: usize = 1 << 1;
147
148impl RwSpinlock {
149    loom_const_fn! {
150        pub(crate) fn new() -> Self {
151            Self {
152                state: AtomicUsize::new(UNLOCKED),
153            }
154        }
155    }
156
157    pub(crate) fn reader_count(&self) -> usize {
158        self.state.load(Relaxed) >> 1
159    }
160}
161
162unsafe impl RawRwLock for RwSpinlock {
163    type GuardMarker = ();
164
165    #[cfg_attr(test, track_caller)]
166    fn lock_shared(&self) {
167        let mut boff = Backoff::new();
168        while !self.try_lock_shared() {
169            boff.spin();
170        }
171    }
172
173    #[cfg_attr(test, track_caller)]
174    fn try_lock_shared(&self) -> bool {
175        // Add a reader.
176        let state = test_dbg!(self.state.fetch_add(READER, Acquire));
177
178        // Ensure we don't overflow the reader count and clobber the lock's
179        // state.
180        assert!(
181            state < usize::MAX - (READER * 2),
182            "read lock counter overflow! this is very bad"
183        );
184
185        // Is the write lock held? If so, undo the increment and bail.
186        if state & WRITER == 1 {
187            test_dbg!(self.state.fetch_sub(READER, Release));
188            false
189        } else {
190            true
191        }
192    }
193
194    #[cfg_attr(test, track_caller)]
195    #[inline]
196    unsafe fn unlock_shared(&self) {
197        let _val = test_dbg!(self.state.fetch_sub(READER, Release));
198        debug_assert_eq!(
199            _val & WRITER,
200            0,
201            "tried to drop a read guard while write locked, something is Very Wrong!"
202        )
203    }
204
205    #[cfg_attr(test, track_caller)]
206    fn lock_exclusive(&self) {
207        let mut backoff = Backoff::new();
208
209        // Wait for the lock to become available and set the `WRITER` bit.
210        //
211        // Note that, unlike the `lock_shared` method, we don't use the
212        // `try_lock_exclusive` method here, as we would like to use
213        // `compare_exchange_weak` to allow spurious failures for improved
214        // performance. The `try_lock_exclusive` method  cannot use
215        // `compare_exchange_weak`, because it will never retry, and
216        // a spurious failure means we would incorrectly fail to lock the RwLock
217        // when we should have successfully locked it.
218        while test_dbg!(self
219            .state
220            .compare_exchange_weak(UNLOCKED, WRITER, Acquire, Relaxed))
221        .is_err()
222        {
223            test_dbg!(backoff.spin());
224        }
225    }
226
227    #[cfg_attr(test, track_caller)]
228    #[inline]
229    fn try_lock_exclusive(&self) -> bool {
230        test_dbg!(self
231            .state
232            .compare_exchange(UNLOCKED, WRITER, Acquire, Relaxed))
233        .is_ok()
234    }
235
236    #[cfg_attr(test, track_caller)]
237    #[inline]
238    unsafe fn unlock_exclusive(&self) {
239        let _val = test_dbg!(self.state.swap(UNLOCKED, Release));
240    }
241
242    #[inline]
243    fn is_locked(&self) -> bool {
244        self.state.load(Relaxed) & (WRITER | READER) != 0
245    }
246
247    #[inline]
248    fn is_locked_exclusive(&self) -> bool {
249        self.state.load(Relaxed) & WRITER == 1
250    }
251}
252
253#[cfg(not(loom))]
254impl blocking::ConstInit for RwSpinlock {
255    // As usual, clippy is totally wrong about this --- the whole point of this
256    // constant is to create a *new* spinlock every time.
257    #[allow(clippy::declare_interior_mutable_const)]
258    const INIT: Self = RwSpinlock::new();
259}
260
261impl fmt::Debug for RwSpinlock {
262    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
263        let state = &self.state.load(Relaxed);
264        f.debug_struct("RwSpinlock")
265            // N.B.: this does *not* use the `reader_count` and `has_writer`
266            // methods *intentionally*, because those two methods perform
267            // independent reads of the lock's state, and may race with other
268            // lock operations that occur concurrently. If, for example, we read
269            // a non-zero reader count, and then read the state again to check
270            // for a writer, the reader may have been released and a write lock
271            // acquired between the two reads, resulting in the `Debug` impl
272            // displaying an invalid state when the lock was not actually *in*
273            // such a state!
274            //
275            // Therefore, we instead perform a single load to snapshot the state
276            // and unpack both the reader count and the writer count from the
277            // lock.
278            .field("readers", &(state >> 1))
279            .field("writer", &(state & WRITER))
280            .finish()
281    }
282}