maitake_sync/blocking/
rwlock.rs

1/// A spinlock-based [readers-writer lock].
2///
3/// See the documentation for the [`RwLock`] type for more information.
4///
5/// [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
6use crate::{
7    loom::cell::{ConstPtr, MutPtr, UnsafeCell},
8    spin::RwSpinlock,
9    util::fmt,
10};
11use core::{
12    marker::PhantomData,
13    ops::{Deref, DerefMut},
14};
15
16/// A spinlock-based [readers-writer lock].
17///
18/// This type of lock allows a number of readers or at most one writer at any
19/// point in time. The write portion of this lock typically allows modification
20/// of the underlying data (exclusive access) and the read portion of this lock
21/// typically allows for read-only access (shared access).
22///
23/// In comparison, a [`blocking::Mutex`] does not distinguish between readers or writers
24/// that acquire the lock, therefore blocking any threads waiting for the lock to
25/// become available. An `RwLock` will allow any number of readers to acquire the
26/// lock as long as a writer is not holding the lock.
27///
28/// # Fairness
29///
30/// This is *not* a fair reader-writer lock.
31///
32/// # Loom-specific behavior
33///
34/// When `cfg(loom)` is enabled, this mutex will use Loom's simulated atomics,
35/// checked `UnsafeCell`, and simulated spin loop hints.
36///
37/// [`blocking::Mutex`]: crate::blocking::Mutex
38/// [readers-writer lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
39pub struct RwLock<T: ?Sized, Lock = RwSpinlock> {
40    lock: Lock,
41    data: UnsafeCell<T>,
42}
43
44/// An RAII implementation of a "scoped read lock" of a [`RwLock`]. When this
45/// structure is dropped (falls out of scope), the lock will be unlocked.
46///
47/// The data protected by the [`RwLock`] can be immutably accessed through this
48/// guard via its [`Deref`] implementation.
49///
50/// This structure is created by the [`read`] and [`try_read`] methods on
51/// [`RwLock`].
52///
53/// [`read`]: RwLock::read
54/// [`try_read`]: RwLock::try_read
55#[must_use = "if unused, the `RwLock` will immediately unlock"]
56pub struct RwLockReadGuard<'lock, T: ?Sized, Lock: RawRwLock = RwSpinlock> {
57    ptr: ConstPtr<T>,
58    lock: &'lock Lock,
59    _marker: PhantomData<Lock::GuardMarker>,
60}
61
62/// An RAII implementation of a "scoped write lock" of a [`RwLock`]. When this
63/// structure is dropped (falls out of scope), the lock will be unlocked.
64///
65/// The data protected by the [`RwLock`] can be mutably accessed through this
66/// guard via its [`Deref`] and [`DerefMut`] implementations.
67///
68/// This structure is created by the [`write`] and [`try_write`] methods on
69/// [`RwLock`].
70///
71/// [`write`]: RwLock::write
72/// [`try_write`]: RwLock::try_write
73#[must_use = "if unused, the `RwLock` will immediately unlock"]
74pub struct RwLockWriteGuard<'lock, T: ?Sized, Lock: RawRwLock = RwSpinlock> {
75    ptr: MutPtr<T>,
76    lock: &'lock Lock,
77    _marker: PhantomData<Lock::GuardMarker>,
78}
79
80/// Trait abstracting over blocking [`RwLock`] implementations (`maitake-sync`'s
81/// version).
82///
83/// # Safety
84///
85/// Implementations of this trait must ensure that the `RwLock` is actually
86/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
87/// lock exists, and a shared lock can't be acquire while an exclusive lock
88/// exists.
89pub unsafe trait RawRwLock {
90    /// Marker type which determines whether a lock guard should be [`Send`].
91    type GuardMarker;
92
93    /// Acquires a shared lock, blocking the current thread/CPU core until it is
94    /// able to do so.
95    fn lock_shared(&self);
96
97    /// Attempts to acquire a shared lock without blocking.
98    fn try_lock_shared(&self) -> bool;
99
100    /// Releases a shared lock.
101    ///
102    /// # Safety
103    ///
104    /// This method may only be called if a shared lock is held in the current context.
105    unsafe fn unlock_shared(&self);
106
107    /// Acquires an exclusive lock, blocking the current thread/CPU core until
108    /// it is able to do so.
109    fn lock_exclusive(&self);
110
111    /// Attempts to acquire an exclusive lock without blocking.
112    fn try_lock_exclusive(&self) -> bool;
113
114    /// Releases an exclusive lock.
115    ///
116    /// # Safety
117    ///
118    /// This method may only be called if an exclusive lock is held in the
119    /// current context.
120    unsafe fn unlock_exclusive(&self);
121
122    /// Returns `true` if this `RwLock` is currently locked in any way.
123    fn is_locked(&self) -> bool;
124
125    /// Returns `true` if this `RwLock` is currently locked exclusively.
126    fn is_locked_exclusive(&self) -> bool;
127}
128
129impl<T> RwLock<T> {
130    loom_const_fn! {
131        /// Creates a new, unlocked `RwLock<T>` protecting the provided `data`.
132        ///
133        /// # Examples
134        ///
135        /// ```
136        /// use maitake_sync::blocking::RwLock;
137        ///
138        /// let lock = RwLock::new(5);
139        /// # drop(lock);
140        /// ```
141        #[must_use]
142        pub fn new(data: T) -> Self {
143            Self {
144                lock: RwSpinlock::new(),
145                data: UnsafeCell::new(data),
146            }
147        }
148    }
149
150    /// Returns the current number of readers holding a read lock.
151    ///
152    /// # Note
153    ///
154    /// This method is not synchronized with attempts to increment the reader
155    /// count, and its value may become out of date as soon as it is read. This
156    /// is **not** intended to be used for synchronization purposes! It is
157    /// intended only for debugging purposes or for use as a heuristic.
158    #[inline]
159    #[must_use]
160    pub fn reader_count(&self) -> usize {
161        self.lock.reader_count()
162    }
163}
164
165impl<T: ?Sized, Lock: RawRwLock> RwLock<T, Lock> {
166    fn read_guard(&self) -> RwLockReadGuard<'_, T, Lock> {
167        RwLockReadGuard {
168            ptr: self.data.get(),
169            lock: &self.lock,
170            _marker: PhantomData,
171        }
172    }
173
174    fn write_guard(&self) -> RwLockWriteGuard<'_, T, Lock> {
175        RwLockWriteGuard {
176            ptr: self.data.get_mut(),
177            lock: &self.lock,
178            _marker: PhantomData,
179        }
180    }
181
182    /// Locks this `RwLock` for shared read access, spinning until it can be
183    /// acquired.
184    ///
185    /// The calling CPU core will spin (with an exponential backoff) until there
186    /// are no more writers which hold the lock. There may be other readers
187    /// currently inside the lock when this method returns. This method does not
188    /// provide any guarantees with respect to the ordering of whether
189    /// contentious readers or writers will acquire the lock first.
190    ///
191    /// Returns an RAII guard which will release this thread's shared access
192    /// once it is dropped.
193    #[cfg_attr(test, track_caller)]
194    pub fn read(&self) -> RwLockReadGuard<'_, T, Lock> {
195        self.lock.lock_shared();
196        self.read_guard()
197    }
198
199    /// Attempts to acquire this `RwLock` for shared read access.
200    ///
201    /// If the access could not be granted at this time, this method returns
202    /// [`None`]. Otherwise, [`Some`]`(`[`RwLockReadGuard`]`)` containing a RAII
203    /// guard is returned. The shared access is released when it is dropped.
204    ///
205    /// This function does not spin.
206    #[cfg_attr(test, track_caller)]
207    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T, Lock>> {
208        if self.lock.try_lock_shared() {
209            Some(self.read_guard())
210        } else {
211            None
212        }
213    }
214
215    /// Locks this `RwLock` for exclusive write access, spinning until write
216    /// access can be acquired.
217    ///
218    /// This function will not return while other writers or other readers
219    /// currently have access to the lock.
220    ///
221    /// Returns an RAII guard which will drop the write access of this `RwLock`
222    /// when dropped.
223    #[cfg_attr(test, track_caller)]
224    pub fn write(&self) -> RwLockWriteGuard<'_, T, Lock> {
225        self.lock.lock_exclusive();
226        self.write_guard()
227    }
228
229    /// Returns `true` if there is currently a writer holding a write lock.
230    ///
231    /// # Note
232    ///
233    /// This method is not synchronized its value may become out of date as soon
234    /// as it is read. This is **not** intended to be used for synchronization
235    /// purposes! It is intended only for debugging purposes or for use as a
236    /// heuristic.
237    #[inline]
238    #[must_use]
239    pub fn has_writer(&self) -> bool {
240        self.lock.is_locked_exclusive()
241    }
242
243    /// Attempts to acquire this `RwLock` for exclusive write access.
244    ///
245    /// If the access could not be granted at this time, this method returns
246    /// [`None`]. Otherwise, [`Some`]`(`[`RwLockWriteGuard`]`)` containing a
247    /// RAII guard is returned. The write access is released when it is dropped.
248    ///
249    /// This function does not spin.
250    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, Lock>> {
251        if self.lock.try_lock_exclusive() {
252            Some(self.write_guard())
253        } else {
254            None
255        }
256    }
257
258    /// Returns a mutable reference to the underlying data.
259    ///
260    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
261    /// take place -- the mutable borrow statically guarantees no locks exist.
262    ///
263    /// # Examples
264    ///
265    /// ```
266    /// let mut lock = maitake_sync::blocking::RwLock::new(0);
267    /// *lock.get_mut() = 10;
268    /// assert_eq!(*lock.read(), 10);
269    /// ```
270    pub fn get_mut(&mut self) -> &mut T {
271        unsafe {
272            // Safety: since this call borrows the `RwLock` mutably, no actual
273            // locking needs to take place -- the mutable borrow statically
274            // guarantees no locks exist.
275            self.data.with_mut(|data| &mut *data)
276        }
277    }
278}
279
280impl<T, Lock: RawRwLock> RwLock<T, Lock> {
281    /// Consumes this `RwLock`, returning the guarded data.
282    #[inline]
283    #[must_use]
284    pub fn into_inner(self) -> T {
285        self.data.into_inner()
286    }
287}
288
289impl<T: Default, Lock: Default> Default for RwLock<T, Lock> {
290    /// Creates a new `RwLock<T>`, with the `Default` value for T.
291    fn default() -> RwLock<T, Lock> {
292        RwLock {
293            data: UnsafeCell::new(Default::default()),
294            lock: Default::default(),
295        }
296    }
297}
298
299impl<T> From<T> for RwLock<T> {
300    /// Creates a new instance of an `RwLock<T>` which is unlocked.
301    /// This is equivalent to [`RwLock::new`].
302    fn from(t: T) -> Self {
303        RwLock::new(t)
304    }
305}
306
307impl<T, Lock> fmt::Debug for RwLock<T, Lock>
308where
309    T: fmt::Debug,
310    Lock: fmt::Debug + RawRwLock,
311{
312    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
313        f.debug_struct("RwLock")
314            .field(
315                "data",
316                &fmt::opt(&self.try_read()).or_else("<write locked>"),
317            )
318            .field("lock", &self.lock)
319            .finish()
320    }
321}
322
323unsafe impl<T: ?Sized + Send, Lock: Send> Send for RwLock<T, Lock> {}
324unsafe impl<T: ?Sized + Send + Sync, Lock: Sync> Sync for RwLock<T, Lock> {}
325
326// === impl RwLockReadGuard ===
327
328impl<T: ?Sized, Lock: RawRwLock> Deref for RwLockReadGuard<'_, T, Lock> {
329    type Target = T;
330    #[inline]
331    fn deref(&self) -> &Self::Target {
332        unsafe {
333            // Safety: we are holding a read lock, so it is okay to dereference
334            // the const pointer immutably.
335            self.ptr.deref()
336        }
337    }
338}
339
340impl<T: ?Sized, R: ?Sized, Lock> AsRef<R> for RwLockReadGuard<'_, T, Lock>
341where
342    T: AsRef<R>,
343    Lock: RawRwLock,
344{
345    #[inline]
346    fn as_ref(&self) -> &R {
347        self.deref().as_ref()
348    }
349}
350
351impl<T: ?Sized, Lock: RawRwLock> Drop for RwLockReadGuard<'_, T, Lock> {
352    #[inline]
353    #[cfg_attr(test, track_caller)]
354    fn drop(&mut self) {
355        unsafe { self.lock.unlock_shared() }
356    }
357}
358
359impl<T, Lock> fmt::Debug for RwLockReadGuard<'_, T, Lock>
360where
361    T: ?Sized + fmt::Debug,
362    Lock: RawRwLock,
363{
364    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
365        self.deref().fmt(f)
366    }
367}
368
369impl<T, Lock> fmt::Display for RwLockReadGuard<'_, T, Lock>
370where
371    T: ?Sized + fmt::Display,
372    Lock: RawRwLock,
373{
374    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375        self.deref().fmt(f)
376    }
377}
378
379/// A [`RwLockReadGuard`] is [`Sync`] if both `T` and the `Lock` type parameter
380/// are [`Sync`].
381unsafe impl<T, Lock> Sync for RwLockReadGuard<'_, T, Lock>
382where
383    T: ?Sized + Sync,
384    Lock: RawRwLock + Sync,
385{
386}
387/// A [`RwLockReadGuard`] is [`Send`] if both `T` and the `Lock` type parameter
388/// are [`Sync`], because sending a `RwLockReadGuard` is equivalent to sending a
389/// `&(T, Lock)`.
390///
391/// Additionally, the `Lock` type's [`RawRwLock::GuardMarker`] must indicate
392/// that the guard is [`Send`].
393unsafe impl<T, Lock> Send for RwLockReadGuard<'_, T, Lock>
394where
395    T: ?Sized + Sync,
396    Lock: RawRwLock + Sync,
397    Lock::GuardMarker: Send,
398{
399}
400
401// === impl RwLockWriteGuard ===
402
403impl<T: ?Sized, Lock: RawRwLock> Deref for RwLockWriteGuard<'_, T, Lock> {
404    type Target = T;
405    #[inline]
406    fn deref(&self) -> &Self::Target {
407        unsafe {
408            // Safety: we are holding the lock, so it is okay to dereference the
409            // mut pointer.
410            &*self.ptr.deref()
411        }
412    }
413}
414
415impl<T: ?Sized, Lock: RawRwLock> DerefMut for RwLockWriteGuard<'_, T, Lock> {
416    #[inline]
417    fn deref_mut(&mut self) -> &mut Self::Target {
418        unsafe {
419            // Safety: we are holding the lock, so it is okay to dereference the
420            // mut pointer.
421            self.ptr.deref()
422        }
423    }
424}
425
426impl<T: ?Sized, R: ?Sized, Lock> AsRef<R> for RwLockWriteGuard<'_, T, Lock>
427where
428    T: AsRef<R>,
429    Lock: RawRwLock,
430{
431    #[inline]
432    fn as_ref(&self) -> &R {
433        self.deref().as_ref()
434    }
435}
436
437impl<T: ?Sized, Lock: RawRwLock> Drop for RwLockWriteGuard<'_, T, Lock> {
438    #[inline]
439    #[cfg_attr(test, track_caller)]
440    fn drop(&mut self) {
441        unsafe { self.lock.unlock_exclusive() }
442    }
443}
444
445impl<T, Lock> fmt::Debug for RwLockWriteGuard<'_, T, Lock>
446where
447    T: ?Sized + fmt::Debug,
448    Lock: RawRwLock,
449{
450    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
451        self.deref().fmt(f)
452    }
453}
454
455impl<T, Lock> fmt::Display for RwLockWriteGuard<'_, T, Lock>
456where
457    T: ?Sized + fmt::Display,
458    Lock: RawRwLock,
459{
460    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
461        self.deref().fmt(f)
462    }
463}
464
465/// A [`RwLockWriteGuard`] is only [`Send`] if `T` is [`Send`] and [`Sync`],
466/// because it can be used to *move* a `T` across thread boundaries, as it
467/// allows mutable access to the `T` that can be used with
468/// [`core::mem::replace`] or [`core::mem::swap`].
469unsafe impl<T, Lock> Send for RwLockWriteGuard<'_, T, Lock>
470where
471    T: ?Sized + Send + Sync,
472    Lock: RawRwLock,
473    Lock::GuardMarker: Send,
474{
475}
476
477/// A [`RwLockWriteGuard`] is only [`Sync`] if `T` is [`Send`] and [`Sync`],
478/// because it can be used to *move* a `T` across thread boundaries, as it
479/// allows mutable access to the `T` that can be used with
480/// [`core::mem::replace`] or [`core::mem::swap`].
481unsafe impl<T, Lock> Sync for RwLockWriteGuard<'_, T, Lock>
482where
483    T: ?Sized + Send + Sync,
484    Lock: RawRwLock,
485{
486}
487
488#[cfg(test)]
489mod tests {
490    use super::*;
491    use crate::loom::{self, sync::Arc, thread};
492
493    #[test]
494    fn write() {
495        const WRITERS: usize = 2;
496
497        loom::model(|| {
498            let lock = Arc::new(RwLock::<usize>::new(0));
499            let threads = (0..WRITERS)
500                .map(|_| {
501                    let lock = lock.clone();
502                    thread::spawn(writer(lock))
503                })
504                .collect::<Vec<_>>();
505
506            for thread in threads {
507                thread.join().expect("writer thread mustn't panic");
508            }
509
510            let guard = lock.read();
511            assert_eq!(*guard, WRITERS, "final state must equal number of writers");
512        });
513    }
514
515    #[test]
516    fn read_write() {
517        // this hits loom's preemption bound with 2 writer threads.
518        const WRITERS: usize = if cfg!(loom) { 1 } else { 2 };
519
520        loom::model(|| {
521            let lock = Arc::new(RwLock::<usize>::new(0));
522            let w_threads = (0..WRITERS)
523                .map(|_| {
524                    let lock = lock.clone();
525                    thread::spawn(writer(lock))
526                })
527                .collect::<Vec<_>>();
528
529            {
530                let guard = lock.read();
531                assert!(*guard == 0 || *guard == 1 || *guard == 2);
532            }
533
534            for thread in w_threads {
535                thread.join().expect("writer thread mustn't panic")
536            }
537
538            let guard = lock.read();
539            assert_eq!(*guard, WRITERS, "final state must equal number of writers");
540        });
541    }
542
543    fn writer(lock: Arc<RwLock<usize>>) -> impl FnOnce() {
544        move || {
545            test_debug!("trying to acquire write lock...");
546            let mut guard = lock.write();
547            test_debug!("got write lock!");
548            *guard += 1;
549        }
550    }
551}