commonlibsse_ng\re\b/
BSAtomic.rs1use core::{
2 ffi::c_void,
3 sync::atomic::{AtomicU32, Ordering},
4};
5
6#[repr(C)]
9#[derive(Debug, Default, Clone, Copy, PartialEq)]
10pub struct BSCriticalSection {
11 critical_section: windows::Win32::System::Threading::CRITICAL_SECTION,
12}
13const_assert_eq!(core::mem::size_of::<BSCriticalSection>(), 0x28);
14
15#[repr(C)]
16pub struct BSEventFlag {
17 event: *mut c_void,
18}
19const_assert_eq!(core::mem::size_of::<BSEventFlag>(), 0x8);
20
21#[repr(C)]
22#[derive(Debug, Default)]
23pub struct BSNonReentrantSpinLock {
24 lock: AtomicU32,
25}
26const_assert_eq!(core::mem::size_of::<BSNonReentrantSpinLock>(), 0x4);
27
28#[repr(C)]
29struct BSSemaphoreBase {
30 semaphore: windows::Win32::Foundation::HANDLE,
31}
32const_assert_eq!(core::mem::size_of::<BSSemaphoreBase>(), 0x8);
33
34#[repr(C)]
35pub struct BSSemaphore {
36 _base: BSSemaphoreBase,
37}
38const_assert_eq!(core::mem::size_of::<BSSemaphore>(), 0x8);
39
40impl Default for BSSemaphore {
41 fn default() -> Self {
42 Self::new()
43 }
44}
45
46impl BSSemaphore {
47 pub fn new() -> Self {
49 Self {
50 _base: BSSemaphoreBase {
51 semaphore: unsafe {
52 windows::Win32::System::Threading::CreateSemaphoreW(
53 None,
54 0,
55 40,
56 windows::core::PCWSTR::null(),
57 )
58 .unwrap()
59 },
60 },
61 }
62 }
63}
64
65impl Drop for BSSemaphore {
66 fn drop(&mut self) {
67 unsafe {
68 if let Err(_error) = windows::Win32::Foundation::CloseHandle(self._base.semaphore) {
69 #[cfg(feature = "tracing")]
70 tracing::error!("Failed to close BSSemaphore's handle: {_error}");
71 };
72 }
73 }
74}
75
76#[repr(C)]
77#[derive(Debug, Default)]
78pub struct BSSpinLock {
79 owning_thread: AtomicU32,
80 lock_count: AtomicU32,
81}
82const_assert_eq!(core::mem::size_of::<BSSpinLock>(), 0x8);
83
84impl BSSpinLock {
85 pub const FAST_SPIN_THRESHOLD: usize = 10000;
86
87 #[inline]
88 pub const fn new() -> Self {
89 Self { owning_thread: AtomicU32::new(0), lock_count: AtomicU32::new(0) }
90 }
91
92 #[inline]
93 pub fn lock(&self) -> BSSpinLockGuard<'_> {
94 BSSpinLockGuard::new(self)
95 }
96
97 pub fn lock_with_pause_attempts(&self, pause_attempts: u32) {
98 let my_thread_id = unsafe { windows::Win32::System::Threading::GetCurrentThreadId() };
99
100 std::sync::atomic::fence(Ordering::Acquire);
103
104 if self.owning_thread.load(Ordering::SeqCst) == my_thread_id {
105 self.lock_count.fetch_add(1, Ordering::SeqCst);
106 } else {
107 let mut attempts = 0;
108 if self.lock_count.compare_exchange(0, 1, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
109 loop {
110 attempts += 1;
111 std::thread::yield_now();
114
115 if attempts >= pause_attempts {
116 let mut spin_count = 0;
117 while self
118 .lock_count
119 .compare_exchange(0, 1, Ordering::AcqRel, Ordering::Relaxed)
120 .is_ok()
121 {
122 if spin_count < 10 {
124 spin_count += 1;
125 } else {
126 std::thread::sleep(std::time::Duration::from_millis(1));
127 }
128 }
129 break;
130 }
131 }
132 }
133
134 self.owning_thread.store(my_thread_id, Ordering::SeqCst);
135 std::sync::atomic::fence(Ordering::Release);
137 }
138 }
139
140 pub fn unlock(&self) {
141 let my_thread_id = unsafe { windows::Win32::System::Threading::GetCurrentThreadId() };
142
143 if self.owning_thread.load(Ordering::Acquire) == my_thread_id {
144 if self.lock_count.load(Ordering::Acquire) == 1 {
145 self.owning_thread.store(0, Ordering::Release);
146 let _ = self.lock_count.compare_exchange(0, 1, Ordering::AcqRel, Ordering::Relaxed);
147 } else {
148 self.lock_count.fetch_sub(1, Ordering::Release);
149 }
150 }
151 }
152}
153
154#[repr(C)]
155#[derive(Debug, Default)]
156pub struct BSReadWriteLock {
157 writer_thread: AtomicU32,
158 lock: AtomicU32,
159}
160const_assert_eq!(core::mem::size_of::<BSReadWriteLock>(), 0x8);
161
162impl BSReadWriteLock {
163 pub const LOCK_WRITE: usize = 0x80000000;
164 pub const LOCK_COUNT_MASK: usize = 0xFFFFFFF;
165
166 #[commonlibsse_ng_derive_internal::relocate_fn(se_id = 66976, ae_id = 68233)]
167 pub unsafe fn lock_for_read(&self) {}
168
169 #[commonlibsse_ng_derive_internal::relocate_fn(se_id = 66982, ae_id = 68239)]
170 pub unsafe fn unlock_for_read(&self) {}
171
172 #[commonlibsse_ng_derive_internal::relocate_fn(se_id = 66977, ae_id = 68234)]
173 pub unsafe fn lock_for_write(&self) {}
174
175 #[commonlibsse_ng_derive_internal::relocate_fn(se_id = 66983, ae_id = 68240)]
176 pub unsafe fn unlock_for_write(&self) {}
177
178 #[inline]
179 pub fn write(&self) -> BSWriteLockGuard<'_> {
180 BSWriteLockGuard::new(self)
181 }
182
183 #[inline]
184 pub fn read(&self) -> BSReadLockGuard<'_> {
185 BSReadLockGuard::new(self)
186 }
187}
188
189#[repr(C)]
190pub struct BSSpinLockGuard<'a> {
191 lock: &'a BSSpinLock,
192}
193
194impl<'a> BSSpinLockGuard<'a> {
195 pub fn new(lock: &'a BSSpinLock) -> Self {
196 lock.lock_with_pause_attempts(0);
197 Self { lock }
198 }
199}
200
201impl Drop for BSSpinLockGuard<'_> {
202 fn drop(&mut self) {
203 self.lock.unlock();
204 self.lock.lock_count.fetch_sub(1, Ordering::SeqCst);
205 }
206}
207
208#[repr(C)]
209pub struct BSReadLockGuard<'a> {
210 lock: &'a BSReadWriteLock,
211}
212
213impl<'a> BSReadLockGuard<'a> {
214 pub fn new(lock: &'a BSReadWriteLock) -> Self {
215 unsafe { lock.lock_for_read() };
216 Self { lock }
217 }
218}
219
220impl Drop for BSReadLockGuard<'_> {
221 fn drop(&mut self) {
222 unsafe { self.lock.unlock_for_read() };
223 }
224}
225
226#[repr(C)]
227pub struct BSWriteLockGuard<'a> {
228 lock: &'a BSReadWriteLock,
229}
230
231impl<'a> BSWriteLockGuard<'a> {
232 pub fn new(lock: &'a BSReadWriteLock) -> Self {
233 unsafe { lock.lock_for_write() };
234 Self { lock }
235 }
236}
237
238impl Drop for BSWriteLockGuard<'_> {
239 fn drop(&mut self) {
240 unsafe { self.lock.unlock_for_write() };
241 }
242}