shared_rwlock/
lib.rs

1// SPDX-License-Identifier: Apache-2.0 OR MIT
2//
3// # Forked rust std::sync::poison(ver. 1.84.0)
4// See: https://github.com/rust-lang/rust/blob/1.84.0/library/std/src/sync/poison.rs
5// See Rust license detail: https://github.com/rust-lang/rust/pull/43498
6
7// TODO: Remove unnecessary example docs
8mod errors;
9mod poison;
10mod sys;
11
12pub use self::errors::LockError;
13pub use self::poison::{LockResult, PoisonError, TryLockError, TryLockResult};
14use self::sys::shared_memory;
15use core::cell::UnsafeCell;
16use core::ffi::c_void;
17use core::fmt;
18use core::marker::PhantomData;
19use core::mem::{ManuallyDrop, size_of};
20use core::ops::{Deref, DerefMut};
21use core::ptr::NonNull;
22
23#[repr(C)]
24struct SharedCell<T: ?Sized> {
25    // shared memory lock state: 64bytes(To avoid false sharing)
26    inner: sys::RwLock, // size 56bytes
27    poison: poison::Flag,
28    _pad39: u8,  // 0x39
29    _pad3a: u32, // 0x3a
30    // <------- 64bytes
31
32    // Shared memory data array start(Same as `MEMORY_MAPPED_VIEW_ADDRESS` ptr)
33    // offset: 0x40
34    data: UnsafeCell<T>,
35    // shared memory data array continue ......
36    // - an element of array
37    // - an element of array
38    // - an element of array
39}
40const _: () = assert!(core::mem::size_of::<SharedCell<u64>>() == 64 + 8);
41
42const RWLOCK_LOCK_STATE_SIZE: usize = 64;
43
44unsafe impl<T: ?Sized + Send> Send for SharedCell<T> {}
45unsafe impl<T: ?Sized + Send + Sync> Sync for SharedCell<T> {}
46
47/// It exists in the SharedMemory situation and atomically edits the data involved in the lock.
48///
49/// # Safety
50/// The behavior when other threads directly tamper with this memory is undefined.
51///
52/// # False sharing
53/// The data actually contains a database of addresses, which, once initialized, will receive a large number of
54/// read requests, but writes are unlikely to occur.
55///
56/// On the other hand, the lock flag is changed frequently, which means that frequent CPU cache synchronization
57/// runs if the database is covered on the same cache line.
58///
59/// To avoid this, 64 bytes of one cache line are separated from the data.
60/// A reader-writer lock
61///
62/// # Std description
63///
64/// This type of lock allows a number of readers or at most one writer at any
65/// point in time. The write portion of this lock typically allows modification
66/// of the underlying data (exclusive access) and the read portion of this lock
67/// typically allows for read-only access (shared access).
68///
69/// In comparison, a [`Mutex`] does not distinguish between readers or writers
70/// that acquire the lock, therefore blocking any threads waiting for the lock to
71/// become available. An `RwLock` will allow any number of readers to acquire the
72/// lock as long as a writer is not holding the lock.
73///
74/// The priority policy of the lock is dependent on the underlying operating
75/// system's implementation, and this type does not guarantee that any
76/// particular policy will be used. In particular, a writer which is waiting to
77/// acquire the lock in `write` might or might not block concurrent calls to
78/// `read`, e.g.:
79///
80/// <details><summary>Potential deadlock example</summary>
81///
82/// ```text
83/// // Thread 1              |  // Thread 2
84/// let _rg1 = lock.read();  |
85///                          |  // will block
86///                          |  let _wg = lock.write();
87/// // may deadlock          |
88/// let _rg2 = lock.read();  |
89/// ```
90///
91/// </details>
92///
93/// The type parameter `T` represents the data that this lock protects. It is
94/// required that `T` satisfies [`Send`] to be shared across threads and
95/// [`Sync`] to allow concurrent access through readers. The RAII guards
96/// returned from the locking methods implement [`Deref`] (and [`DerefMut`]
97/// for the `write` methods) to allow access to the content of the lock.
98///
99/// # Poisoning
100///
101/// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however,
102/// that an `RwLock` may only be poisoned if a panic occurs while it is locked
103/// exclusively (write mode). If a panic occurs in any reader, then the lock
104/// will not be poisoned.
105pub struct SharedRwLock<T: ?Sized> {
106    // Handle ptr(by `open`/`create`)
107    handle: NonNull<c_void>,
108    // Length of the shared data
109    len: usize,
110
111    // shared memory lock: mem::cast target.(need memory layout rule)
112    shared: NonNull<SharedCell<T>>,
113}
114
115impl<T: ?Sized> Drop for SharedRwLock<T> {
116    fn drop(&mut self) {
117        let ptr = self.shared.as_ptr().cast::<c_void>();
118        let _ = shared_memory::close(unsafe { self.handle.as_mut() }, ptr);
119    }
120}
121
122unsafe impl<T: ?Sized + Send> Send for SharedRwLock<T> {}
123unsafe impl<T: ?Sized + Sync> Sync for SharedRwLock<T> {}
124
125impl<T> SharedRwLock<T> {
126    /// Allocate `T` array shared memory. (T * `len`)
127    ///
128    /// The handle is subject to kernel-level locking, but verification has shown that read/write of the shared memory situation is not thread-safe. This is why `RwLock` is used.
129    ///
130    /// The lock data itself is allocated on the shared memory according to the C ABI and the lock state is read/write by AtomicT.
131    ///
132    /// # Errors
133    /// If memory cannot be opened, it creates, but if even that fails, it returns an error.
134    ///
135    /// # Note: Initial value when mem create.
136    /// Created memory is filled with 0, which is the same value as the first initialization.
137    ///
138    /// # Panics
139    /// Invalid pointer.
140    #[cfg(target_os = "windows")]
141    #[allow(clippy::unwrap_in_result)]
142    pub fn new(shared_id: &windows::core::HSTRING, len: usize) -> Result<(Self, bool), LockError> {
143        let size = RWLOCK_LOCK_STATE_SIZE + size_of::<T>() * len;
144        let ((handle, view), is_created) = shared_memory::open(shared_id, size)
145            .map(|pair| (pair, false))
146            .or_else(|_| shared_memory::create(shared_id, size).map(|pair| (pair, true)))?;
147
148        Ok((
149            Self {
150                handle: NonNull::new(handle.0).unwrap(),
151                len,
152                shared: NonNull::new(view.Value.cast::<SharedCell<T>>()).unwrap(),
153            },
154            is_created,
155        ))
156    }
157}
158
159impl<T: ?Sized> SharedRwLock<T> {
160    #[inline]
161    const fn shared(&self) -> &SharedCell<T> {
162        unsafe { self.shared.as_ref() }
163    }
164}
165
166/// RAII structure used to release the shared read access of a lock when
167/// dropped.
168///
169/// This structure is created by the [`read`] and [`try_read`] methods on
170/// [`RwLock`].
171///
172/// [`read`]: RwLock::read
173/// [`try_read`]: RwLock::try_read
174#[must_use = "if unused the RwLock will immediately unlock"]
175#[clippy::has_significant_drop]
176pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
177    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
178    // `RwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
179    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
180    // is preferable over `const* T` to allow for niche optimization.
181    data: NonNull<T>,
182    inner_lock: &'a sys::RwLock,
183    len: usize,
184}
185
186// impl<T: ?Sized> !Send for RwLockReadGuard<'_, T> {}
187unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
188
189/// RAII structure used to release the exclusive write access of a lock when
190/// dropped.
191///
192/// This structure is created by the [`write`] and [`try_write`] methods
193/// on [`RwLock`].
194///
195/// [`write`]: RwLock::write
196/// [`try_write`]: RwLock::try_write
197#[must_use = "if unused the RwLock will immediately unlock"]
198#[clippy::has_significant_drop]
199pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
200    lock: &'a SharedRwLock<T>,
201    poison: poison::Guard,
202}
203
204// impl<T: ?Sized> !Send for RwLockWriteGuard<'_, T> {}
205
206unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
207
208/// RAII structure used to release the shared read access of a lock when
209/// dropped, which can point to a subfield of the protected data.
210///
211/// This structure is created by the [`map`] and [`try_map`] methods
212/// on [`RwLockReadGuard`].
213///
214/// [`map`]: RwLockReadGuard::map
215/// [`try_map`]: RwLockReadGuard::try_map
216#[must_use = "if unused the RwLock will immediately unlock"]
217#[clippy::has_significant_drop]
218pub struct MappedRwLockReadGuard<'a, T: ?Sized + 'a> {
219    // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
220    // `MappedRwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
221    // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
222    // is preferable over `const* T` to allow for niche optimization.
223    data: NonNull<T>,
224    inner_lock: &'a sys::RwLock,
225    len: usize,
226}
227
228// impl<T: ?Sized> !Send for MappedRwLockReadGuard<'_, T> {}
229
230unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockReadGuard<'_, T> {}
231
232impl<T> SharedCell<T> {
233    // #[inline]
234    // pub const fn new() -> Self {
235    //     Self {
236    //         inner: sys::RwLock::new(),
237    //         poison: poison::Flag::new(),
238    //         _pad39: 0,
239    //         _pad3a: 0,
240    //         data: UnsafeCell::new(),
241    //     }
242    // }
243}
244
245/// RAII structure used to release the exclusive write access of a lock when
246/// dropped, which can point to a subfield of the protected data.
247///
248/// This structure is created by the [`map`] and [`try_map`] methods
249/// on [`RwLockWriteGuard`].
250///
251/// [`map`]: RwLockWriteGuard::map
252/// [`try_map`]: RwLockWriteGuard::try_map
253#[must_use = "if unused the RwLock will immediately unlock"]
254#[clippy::has_significant_drop]
255pub struct MappedRwLockWriteGuard<'a, T: ?Sized + 'a> {
256    // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
257    // `MappedRwLockWriteGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
258    // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
259    // below for the correct variance over `T` (invariance).
260    data: NonNull<T>,
261    inner_lock: &'a sys::RwLock,
262    poison_flag: &'a poison::Flag,
263    poison: poison::Guard,
264    _variance: PhantomData<&'a mut T>,
265    len: usize,
266}
267
268// impl<T: ?Sized> !Send for MappedRwLockWriteGuard<'_, T> {}
269
270unsafe impl<T: ?Sized + Sync> Sync for MappedRwLockWriteGuard<'_, T> {}
271
272// impl<T> RwLock<T> {
273//     /// Creates a new instance of an `RwLock<T>` which is unlocked.
274//     ///
275//     /// # Examples
276//     ///
277//     /// ```
278//     /// use std::sync::RwLock;
279//     ///
280//     /// let lock = RwLock::new(5);
281//     /// ```
282
283//     #[inline]
284//     pub const fn new(t: T) -> RwLock<T> {
285//         RwLock {
286//             inner: sys::RwLock::new(),
287//             poison: poison::Flag::new(),
288//             data: UnsafeCell::new(t),
289//         }
290//     }
291// }
292
293impl<T: ?Sized> SharedRwLock<T> {
294    /// Locks this `RwLock` with shared read access, blocking the current thread
295    /// until it can be acquired.
296    ///
297    /// The calling thread will be blocked until there are no more writers which
298    /// hold the lock. There may be other readers currently inside the lock when
299    /// this method returns. This method does not provide any guarantees with
300    /// respect to the ordering of whether contentious readers or writers will
301    /// acquire the lock first.
302    ///
303    /// Returns an RAII guard which will release this thread's shared access
304    /// once it is dropped.
305    ///
306    /// # Errors
307    ///
308    /// This function will return an error if the `RwLock` is poisoned. An
309    /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
310    /// lock. The failure will occur immediately after the lock has been
311    /// acquired.
312    ///
313    /// # Panics
314    ///
315    /// This function might panic when called if the lock is already held by the current thread.
316    ///
317    /// # Examples
318    ///
319    /// ```
320    /// use std::sync::{Arc, RwLock};
321    /// use std::thread;
322    ///
323    /// let lock = Arc::new(RwLock::new(1));
324    /// let c_lock = Arc::clone(&lock);
325    ///
326    /// let n = lock.read().unwrap();
327    /// assert_eq!(*n, 1);
328    ///
329    /// thread::spawn(move || {
330    ///     let r = c_lock.read();
331    ///     assert!(r.is_ok());
332    /// }).join().unwrap();
333    /// ```
334    #[inline]
335    pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
336        unsafe {
337            self.shared().inner.read();
338            RwLockReadGuard::new(self)
339        }
340    }
341
342    /// Attempts to acquire this `RwLock` with shared read access.
343    ///
344    /// If the access could not be granted at this time, then `Err` is returned.
345    /// Otherwise, an RAII guard is returned which will release the shared access
346    /// when it is dropped.
347    ///
348    /// This function does not block.
349    ///
350    /// This function does not provide any guarantees with respect to the ordering
351    /// of whether contentious readers or writers will acquire the lock first.
352    ///
353    /// # Errors
354    ///
355    /// This function will return the [`Poisoned`] error if the `RwLock` is
356    /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
357    /// an exclusive lock. `Poisoned` will only be returned if the lock would
358    /// have otherwise been acquired.
359    ///
360    /// This function will return the [`WouldBlock`] error if the `RwLock` could
361    /// not be acquired because it was already locked exclusively.
362    ///
363    /// [`Poisoned`]: TryLockError::Poisoned
364    /// [`WouldBlock`]: TryLockError::WouldBlock
365    ///
366    /// # Examples
367    ///
368    /// ```
369    /// use std::sync::RwLock;
370    ///
371    /// let lock = RwLock::new(1);
372    ///
373    /// match lock.try_read() {
374    ///     Ok(n) => assert_eq!(*n, 1),
375    ///     Err(_) => unreachable!(),
376    /// };
377    /// ```
378    #[inline]
379    pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
380        unsafe {
381            if self.shared().inner.try_read() {
382                Ok(RwLockReadGuard::new(self)?)
383            } else {
384                Err(TryLockError::WouldBlock)
385            }
386        }
387    }
388
389    /// Locks this `RwLock` with exclusive write access, blocking the current
390    /// thread until it can be acquired.
391    ///
392    /// This function will not return while other writers or other readers
393    /// currently have access to the lock.
394    ///
395    /// Returns an RAII guard which will drop the write access of this `RwLock`
396    /// when dropped.
397    ///
398    /// # Errors
399    ///
400    /// This function will return an error if the `RwLock` is poisoned. An
401    /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
402    /// lock. An error will be returned when the lock is acquired.
403    ///
404    /// # Panics
405    ///
406    /// This function might panic when called if the lock is already held by the current thread.
407    ///
408    /// # Examples
409    ///
410    /// ```
411    /// use std::sync::RwLock;
412    ///
413    /// let lock = RwLock::new(1);
414    ///
415    /// let mut n = lock.write().unwrap();
416    /// *n = 2;
417    ///
418    /// assert!(lock.try_read().is_err());
419    /// ```
420    #[inline]
421    pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
422        unsafe {
423            self.shared().inner.write();
424            RwLockWriteGuard::new(self)
425        }
426    }
427
428    /// Attempts to lock this `RwLock` with exclusive write access.
429    ///
430    /// If the lock could not be acquired at this time, then `Err` is returned.
431    /// Otherwise, an RAII guard is returned which will release the lock when
432    /// it is dropped.
433    ///
434    /// This function does not block.
435    ///
436    /// This function does not provide any guarantees with respect to the ordering
437    /// of whether contentious readers or writers will acquire the lock first.
438    ///
439    /// # Errors
440    ///
441    /// This function will return the [`Poisoned`] error if the `RwLock` is
442    /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
443    /// an exclusive lock. `Poisoned` will only be returned if the lock would
444    /// have otherwise been acquired.
445    ///
446    /// This function will return the [`WouldBlock`] error if the `RwLock` could
447    /// not be acquired because it was already locked exclusively.
448    ///
449    /// [`Poisoned`]: TryLockError::Poisoned
450    /// [`WouldBlock`]: TryLockError::WouldBlock
451    ///
452    ///
453    /// # Examples
454    ///
455    /// ```
456    /// use std::sync::RwLock;
457    ///
458    /// let lock = RwLock::new(1);
459    ///
460    /// let n = lock.read().unwrap();
461    /// assert_eq!(*n, 1);
462    ///
463    /// assert!(lock.try_write().is_err());
464    /// ```
465    #[inline]
466    pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
467        unsafe {
468            if self.shared().inner.try_write() {
469                Ok(RwLockWriteGuard::new(self)?)
470            } else {
471                Err(TryLockError::WouldBlock)
472            }
473        }
474    }
475
476    /// Determines whether the lock is poisoned.
477    ///
478    /// If another thread is active, the lock can still become poisoned at any
479    /// time. You should not trust a `false` value for program correctness
480    /// without additional synchronization.
481    ///
482    /// # Examples
483    ///
484    /// ```
485    /// use std::sync::{Arc, RwLock};
486    /// use std::thread;
487    ///
488    /// let lock = Arc::new(RwLock::new(0));
489    /// let c_lock = Arc::clone(&lock);
490    ///
491    /// let _ = thread::spawn(move || {
492    ///     let _lock = c_lock.write().unwrap();
493    ///     panic!(); // the lock gets poisoned
494    /// }).join();
495    /// assert_eq!(lock.is_poisoned(), true);
496    /// ```
497    #[inline]
498    pub fn is_poisoned(&self) -> bool {
499        self.shared().poison.get()
500    }
501
502    /// Clear the poisoned state from a lock.
503    ///
504    /// If the lock is poisoned, it will remain poisoned until this function is called. This allows
505    /// recovering from a poisoned state and marking that it has recovered. For example, if the
506    /// value is overwritten by a known-good value, then the lock can be marked as un-poisoned. Or
507    /// possibly, the value could be inspected to determine if it is in a consistent state, and if
508    /// so the poison is removed.
509    ///
510    /// # Examples
511    ///
512    /// ```
513    /// use std::sync::{Arc, RwLock};
514    /// use std::thread;
515    ///
516    /// let lock = Arc::new(RwLock::new(0));
517    /// let c_lock = Arc::clone(&lock);
518    ///
519    /// let _ = thread::spawn(move || {
520    ///     let _lock = c_lock.write().unwrap();
521    ///     panic!(); // the lock gets poisoned
522    /// }).join();
523    ///
524    /// assert_eq!(lock.is_poisoned(), true);
525    /// let guard = lock.write().unwrap_or_else(|mut e| {
526    ///     **e.get_mut() = 1;
527    ///     lock.clear_poison();
528    ///     e.into_inner()
529    /// });
530    /// assert_eq!(lock.is_poisoned(), false);
531    /// assert_eq!(*guard, 1);
532    /// ```
533    #[inline]
534    pub fn clear_poison(&self) {
535        self.shared().poison.clear();
536    }
537}
538
539impl<T: fmt::Debug> fmt::Debug for SharedRwLock<T> {
540    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
541        let mut d = f.debug_struct("RwLock");
542        d.field("handle", &self.handle);
543        d.field("shared_address", &(self.shared.as_ptr() as usize));
544        d.field("len", &self.len);
545
546        match self.try_read() {
547            Ok(guard) => {
548                d.field("data", &&*guard);
549            }
550            Err(TryLockError::Poisoned(err)) => {
551                d.field("data", &&**err.get_ref());
552            }
553            Err(TryLockError::WouldBlock) => {
554                d.field("data", &format_args!("<locked>"));
555            }
556        }
557        d.field("poisoned", &self.shared().poison.get());
558        d.finish_non_exhaustive()
559    }
560}
561
562// impl<T: Default> Default for RwLock<T> {
563//     /// Creates a new `RwLock<T>`, with the `Default` value for T.
564//     fn default() -> RwLock<T> {
565//         RwLock::new(Default::default())
566//     }
567// }
568
569// impl<T> From<T> for RwLock<T> {
570//     /// Creates a new instance of an `RwLock<T>` which is unlocked.
571//     /// This is equivalent to [`RwLock::new`].
572//     fn from(t: T) -> Self {
573//         RwLock::new(t)
574//     }
575// }
576
577impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
578    /// Creates a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
579    ///
580    /// # Safety
581    ///
582    /// This function is safe if and only if the same thread has successfully and safely called
583    /// `lock.inner.read()`, `lock.inner.try_read()`, or `lock.inner.downgrade()` before
584    /// instantiating this object.
585    unsafe fn new(lock: &'rwlock SharedRwLock<T>) -> LockResult<Self> {
586        poison::map_result(lock.shared().poison.borrow(), |()| RwLockReadGuard {
587            data: unsafe { NonNull::new_unchecked(lock.shared().data.get()) },
588            inner_lock: &lock.shared().inner,
589            len: lock.len,
590        })
591    }
592}
593
594impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
595    /// Creates a new instance of `RwLockWriteGuard<T>` from a `RwLock<T>`.
596    // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
597    // successfully called from the same thread before instantiating this object.
598    unsafe fn new(lock: &'rwlock SharedRwLock<T>) -> LockResult<Self> {
599        poison::map_result(lock.shared().poison.guard(), |guard| RwLockWriteGuard {
600            lock,
601            poison: guard,
602        })
603    }
604}
605
606impl<T> Deref for RwLockReadGuard<'_, T> {
607    type Target = [T];
608
609    fn deref(&self) -> &[T] {
610        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
611        unsafe { core::slice::from_raw_parts(self.data.as_ptr(), self.len) }
612    }
613}
614
615impl<T> Deref for RwLockWriteGuard<'_, T> {
616    type Target = [T];
617
618    fn deref(&self) -> &[T] {
619        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
620        unsafe { core::slice::from_raw_parts(self.lock.shared().data.get(), self.lock.len) }
621    }
622}
623
624impl<T> DerefMut for RwLockWriteGuard<'_, T> {
625    fn deref_mut(&mut self) -> &mut [T] {
626        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
627        unsafe { core::slice::from_raw_parts_mut(self.lock.shared().data.get(), self.lock.len) }
628    }
629}
630
631impl<T> Deref for MappedRwLockReadGuard<'_, T> {
632    type Target = [T];
633
634    fn deref(&self) -> &[T] {
635        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
636        // was created, and have been upheld throughout `map` and/or `try_map`.
637        unsafe { core::slice::from_raw_parts(self.data.as_ref(), self.len) }
638    }
639}
640
641impl<T> Deref for MappedRwLockWriteGuard<'_, T> {
642    type Target = [T];
643
644    fn deref(&self) -> &[T] {
645        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
646        // was created, and have been upheld throughout `map` and/or `try_map`.
647        unsafe { core::slice::from_raw_parts(self.data.as_ref(), self.len) }
648    }
649}
650
651impl<T> DerefMut for MappedRwLockWriteGuard<'_, T> {
652    fn deref_mut(&mut self) -> &mut [T] {
653        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
654        // was created, and have been upheld throughout `map` and/or `try_map`.
655        unsafe { core::slice::from_raw_parts_mut(self.data.as_mut(), self.len) }
656    }
657}
658
659impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
660    fn drop(&mut self) {
661        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
662        unsafe {
663            self.inner_lock.read_unlock();
664        }
665    }
666}
667
668impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
669    fn drop(&mut self) {
670        self.lock.shared().poison.done(&self.poison);
671        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
672        unsafe {
673            self.lock.shared().inner.write_unlock();
674        }
675    }
676}
677
678impl<T: ?Sized> Drop for MappedRwLockReadGuard<'_, T> {
679    fn drop(&mut self) {
680        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
681        // was created, and have been upheld throughout `map` and/or `try_map`.
682        unsafe {
683            self.inner_lock.read_unlock();
684        }
685    }
686}
687
688impl<T: ?Sized> Drop for MappedRwLockWriteGuard<'_, T> {
689    fn drop(&mut self) {
690        self.poison_flag.done(&self.poison);
691        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
692        // was created, and have been upheld throughout `map` and/or `try_map`.
693        unsafe {
694            self.inner_lock.write_unlock();
695        }
696    }
697}
698
699impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
700    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g.
701    /// an enum variant.
702    ///
703    /// The `RwLock` is already locked for reading, so this cannot fail.
704    ///
705    /// This is an associated function that needs to be used as
706    /// `RwLockReadGuard::map(...)`. A method would interfere with methods of
707    /// the same name on the contents of the `RwLockReadGuard` used through
708    /// `Deref`.
709    ///
710    /// # Panics
711    ///
712    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
713    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
714    where
715        F: FnOnce(&T) -> &U,
716        U: ?Sized,
717    {
718        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
719        // was created, and have been upheld throughout `map` and/or `try_map`.
720        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
721        // passed to it. If the closure panics, the guard will be dropped.
722        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
723        let orig = ManuallyDrop::new(orig);
724        MappedRwLockReadGuard { data, inner_lock: orig.inner_lock, len: orig.len }
725    }
726
727    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. The
728    /// original guard is returned as an `Err(...)` if the closure returns
729    /// `None`.
730    ///
731    /// The `RwLock` is already locked for reading, so this cannot fail.
732    ///
733    /// This is an associated function that needs to be used as
734    /// `RwLockReadGuard::try_map(...)`. A method would interfere with methods
735    /// of the same name on the contents of the `RwLockReadGuard` used through
736    /// `Deref`.
737    ///
738    /// # Errors
739    /// # Panics
740    ///
741    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
742    #[doc(alias = "filter_map")]
743    pub fn try_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
744    where
745        F: FnOnce(&T) -> Option<&U>,
746        U: ?Sized,
747    {
748        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
749        // was created, and have been upheld throughout `map` and/or `try_map`.
750        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
751        // passed to it. If the closure panics, the guard will be dropped.
752        match f(unsafe { orig.data.as_ref() }) {
753            Some(data) => {
754                let data = NonNull::from(data);
755                let orig = ManuallyDrop::new(orig);
756                Ok(MappedRwLockReadGuard { data, inner_lock: orig.inner_lock, len: orig.len })
757            }
758            None => Err(orig),
759        }
760    }
761}
762
763impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> {
764    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
765    /// e.g. an enum variant.
766    ///
767    /// The `RwLock` is already locked for reading, so this cannot fail.
768    ///
769    /// This is an associated function that needs to be used as
770    /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
771    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
772    /// used through `Deref`.
773    ///
774    /// # Panics
775    ///
776    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
777    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
778    where
779        F: FnOnce(&T) -> &U,
780        U: ?Sized,
781    {
782        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
783        // was created, and have been upheld throughout `map` and/or `try_map`.
784        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
785        // passed to it. If the closure panics, the guard will be dropped.
786        let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
787        let orig = ManuallyDrop::new(orig);
788        MappedRwLockReadGuard { data, inner_lock: orig.inner_lock, len: orig.len }
789    }
790
791    /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
792    /// The original guard is returned as an `Err(...)` if the closure returns
793    /// `None`.
794    ///
795    /// The `RwLock` is already locked for reading, so this cannot fail.
796    ///
797    /// This is an associated function that needs to be used as
798    /// `MappedRwLockReadGuard::try_map(...)`. A method would interfere with
799    /// methods of the same name on the contents of the `MappedRwLockReadGuard`
800    /// used through `Deref`.
801    ///
802    /// # Errors
803    /// # Panics
804    ///
805    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
806    #[doc(alias = "filter_map")]
807    pub fn try_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
808    where
809        F: FnOnce(&T) -> Option<&U>,
810        U: ?Sized,
811    {
812        // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
813        // was created, and have been upheld throughout `map` and/or `try_map`.
814        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
815        // passed to it. If the closure panics, the guard will be dropped.
816        match f(unsafe { orig.data.as_ref() }) {
817            Some(data) => {
818                let data = NonNull::from(data);
819                let orig = ManuallyDrop::new(orig);
820                Ok(MappedRwLockReadGuard { data, inner_lock: orig.inner_lock, len: orig.len })
821            }
822            None => Err(orig),
823        }
824    }
825}
826
827impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
828    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g.
829    /// an enum variant.
830    ///
831    /// The `RwLock` is already locked for writing, so this cannot fail.
832    ///
833    /// This is an associated function that needs to be used as
834    /// `RwLockWriteGuard::map(...)`. A method would interfere with methods of
835    /// the same name on the contents of the `RwLockWriteGuard` used through
836    /// `Deref`.
837    ///
838    /// # Panics
839    ///
840    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
841    pub fn map<U, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
842    where
843        F: FnOnce(&mut T) -> &mut U,
844        U: ?Sized,
845    {
846        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
847        // was created, and have been upheld throughout `map` and/or `try_map`.
848        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
849        // passed to it. If the closure panics, the guard will be dropped.
850        let data = NonNull::from(f(unsafe { &mut *orig.lock.shared().data.get() }));
851        let orig = ManuallyDrop::new(orig);
852        MappedRwLockWriteGuard {
853            data,
854            inner_lock: &orig.lock.shared().inner,
855            poison_flag: &orig.lock.shared().poison,
856            poison: orig.poison.clone(),
857            _variance: PhantomData,
858            len: orig.lock.len,
859        }
860    }
861
862    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data. The
863    /// original guard is returned as an `Err(...)` if the closure returns
864    /// `None`.
865    ///
866    /// The `RwLock` is already locked for writing, so this cannot fail.
867    ///
868    /// This is an associated function that needs to be used as
869    /// `RwLockWriteGuard::try_map(...)`. A method would interfere with methods
870    /// of the same name on the contents of the `RwLockWriteGuard` used through
871    /// `Deref`.
872    ///
873    /// # Errors
874    /// # Panics
875    ///
876    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
877    #[doc(alias = "filter_map")]
878    pub fn try_map<U, F>(orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
879    where
880        F: FnOnce(&mut T) -> Option<&mut U>,
881        U: ?Sized,
882    {
883        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
884        // was created, and have been upheld throughout `map` and/or `try_map`.
885        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
886        // passed to it. If the closure panics, the guard will be dropped.
887        match f(unsafe { &mut *orig.lock.shared().data.get() }) {
888            Some(data) => {
889                let data = NonNull::from(data);
890                let orig = ManuallyDrop::new(orig);
891                Ok(MappedRwLockWriteGuard {
892                    data,
893                    inner_lock: &orig.lock.shared().inner,
894                    poison_flag: &orig.lock.shared().poison,
895                    poison: orig.poison.clone(),
896                    _variance: PhantomData,
897                    len: orig.lock.len,
898                })
899            }
900            None => Err(orig),
901        }
902    }
903
904    /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
905    ///
906    /// This method will atomically change the state of the [`RwLock`] from exclusive mode into
907    /// shared mode. This means that it is impossible for a writing thread to get in between a
908    /// thread calling `downgrade` and the same thread reading whatever it wrote while it had the
909    /// [`RwLock`] in write mode.
910    ///
911    /// Note that since we have the `RwLockWriteGuard`, we know that the [`RwLock`] is already
912    /// locked for writing, so this method cannot fail.
913    ///
914    /// need `#![feature(rwlock_downgrade)]`
915    #[allow(clippy::mem_forget)]
916    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
917        let lock = s.lock;
918
919        // We don't want to call the destructor since that calls `write_unlock`.
920        core::mem::forget(s);
921
922        // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
923        // mode, satisfying the `downgrade` contract.
924        unsafe { lock.shared().inner.downgrade() };
925
926        // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
927        unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
928    }
929}
930
931impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
932    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data,
933    /// e.g. an enum variant.
934    ///
935    /// The `RwLock` is already locked for writing, so this cannot fail.
936    ///
937    /// This is an associated function that needs to be used as
938    /// `MappedRwLockWriteGuard::map(...)`. A method would interfere with
939    /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
940    /// used through `Deref`.
941    ///
942    /// # Panics
943    ///
944    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
945    pub fn map<U, F>(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
946    where
947        F: FnOnce(&mut T) -> &mut U,
948        U: ?Sized,
949    {
950        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
951        // was created, and have been upheld throughout `map` and/or `try_map`.
952        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
953        // passed to it. If the closure panics, the guard will be dropped.
954        let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
955        let orig = ManuallyDrop::new(orig);
956        MappedRwLockWriteGuard {
957            data,
958            inner_lock: orig.inner_lock,
959            poison_flag: orig.poison_flag,
960            poison: orig.poison.clone(),
961            _variance: PhantomData,
962            len: orig.len,
963        }
964    }
965
966    /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data.
967    /// The original guard is returned as an `Err(...)` if the closure returns
968    /// `None`.
969    ///
970    /// The `RwLock` is already locked for writing, so this cannot fail.
971    ///
972    /// This is an associated function that needs to be used as
973    /// `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with
974    /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
975    /// used through `Deref`.
976    ///
977    /// # Errors
978    /// # Panics
979    ///
980    /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
981    #[doc(alias = "filter_map")]
982    pub fn try_map<U, F>(mut orig: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
983    where
984        F: FnOnce(&mut T) -> Option<&mut U>,
985        U: ?Sized,
986    {
987        // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
988        // was created, and have been upheld throughout `map` and/or `try_map`.
989        // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
990        // passed to it. If the closure panics, the guard will be dropped.
991        match f(unsafe { orig.data.as_mut() }) {
992            Some(data) => {
993                let data = NonNull::from(data);
994                let orig = ManuallyDrop::new(orig);
995                Ok(MappedRwLockWriteGuard {
996                    data,
997                    inner_lock: orig.inner_lock,
998                    poison_flag: orig.poison_flag,
999                    poison: orig.poison.clone(),
1000                    _variance: PhantomData,
1001                    len: orig.len,
1002                })
1003            }
1004            None => Err(orig),
1005        }
1006    }
1007}
1008
1009#[cfg(target_os = "windows")]
1010#[cfg(test)]
1011mod tests {
1012    use crate::SharedRwLock;
1013    use std::sync::OnceLock;
1014    use std::thread;
1015    use windows::core::h;
1016
1017    //  50_000:   8.55s
1018    // 100_000:  17.75s
1019    const THREAD_COUNT: Primitive = 100_000;
1020    type Primitive = usize;
1021
1022    fn get_shared_memory() -> &'static SharedRwLock<Primitive> {
1023        static GLOBAL_SHARED_MEM: OnceLock<SharedRwLock<Primitive>> = OnceLock::new();
1024        GLOBAL_SHARED_MEM.get_or_init(|| SharedRwLock::new(h!("GlobalTest"), 1).unwrap().0)
1025    }
1026
1027    #[test]
1028    fn test_shared_memory_rwlock() {
1029        let shared_mem = get_shared_memory();
1030
1031        let reader_handles: Vec<_> = (0..THREAD_COUNT)
1032            .map(|_| {
1033                thread::spawn(|| {
1034                    #[cfg(feature = "tracing")]
1035                    {
1036                        let read_guard = get_shared_memory().read().unwrap();
1037                        tracing::trace!("{}", read_guard[0]);
1038                    }
1039                })
1040            })
1041            .collect();
1042
1043        let writer_handles: Vec<_> = (0..THREAD_COUNT)
1044            .map(|_| {
1045                thread::spawn(|| {
1046                    let mut write_guard = get_shared_memory().write().unwrap();
1047                    write_guard[0] += 1;
1048                })
1049            })
1050            .collect();
1051
1052        for handle in reader_handles {
1053            handle.join().unwrap();
1054        }
1055        for handle in writer_handles {
1056            handle.join().unwrap();
1057        }
1058
1059        assert_eq!(shared_mem.read().unwrap()[0], THREAD_COUNT);
1060    }
1061}