region/alloc.rs
1use crate::{os, page, util, Error, Protection, Result};
2
3/// A handle to an owned region of memory.
4///
5/// This handle does not dereference to a slice, since the underlying memory may
6/// have been created with [`Protection::NONE`].
7#[allow(clippy::len_without_is_empty)]
8pub struct Allocation {
9 base: *const (),
10 size: usize,
11}
12
13impl Allocation {
14 /// Returns a pointer to the allocation's base address.
15 ///
16 /// The address is always aligned to the operating system's page size.
17 #[inline(always)]
18 pub fn as_ptr<T>(&self) -> *const T {
19 self.base.cast()
20 }
21
22 /// Returns a mutable pointer to the allocation's base address.
23 #[inline(always)]
24 pub fn as_mut_ptr<T>(&mut self) -> *mut T {
25 self.base as *mut T
26 }
27
28 /// Returns two raw pointers spanning the allocation's address space.
29 ///
30 /// The returned range is half-open, which means that the end pointer points
31 /// one past the last element of the allocation. This way, an empty allocation
32 /// is represented by two equal pointers, and the difference between the two
33 /// pointers represents the size of the allocation.
34 #[inline(always)]
35 pub fn as_ptr_range<T>(&self) -> std::ops::Range<*const T> {
36 let range = self.as_range();
37 (range.start as *const T)..(range.end as *const T)
38 }
39
40 /// Returns two mutable raw pointers spanning the allocation's address space.
41 #[inline(always)]
42 pub fn as_mut_ptr_range<T>(&mut self) -> std::ops::Range<*mut T> {
43 let range = self.as_range();
44 (range.start as *mut T)..(range.end as *mut T)
45 }
46
47 /// Returns a range spanning the allocation's address space.
48 #[inline(always)]
49 pub fn as_range(&self) -> std::ops::Range<usize> {
50 (self.base as usize)..(self.base as usize).saturating_add(self.size)
51 }
52
53 /// Returns the size of the allocation in bytes.
54 ///
55 /// The size is always aligned to a multiple of the operating system's page
56 /// size.
57 #[inline(always)]
58 pub fn len(&self) -> usize {
59 self.size
60 }
61}
62
63impl Drop for Allocation {
64 #[inline]
65 fn drop(&mut self) {
66 let result = unsafe { os::free(self.base, self.size) };
67 debug_assert!(result.is_ok(), "freeing region: {:?}", result);
68 }
69}
70
71/// Allocates one or more pages of memory, with a defined protection.
72///
73/// This function provides a very simple interface for allocating anonymous
74/// virtual pages. The allocation address will be decided by the operating
75/// system.
76///
77/// # Parameters
78///
79/// - The size may not be zero.
80/// - The size is rounded up to the closest page boundary.
81///
82/// # Errors
83///
84/// - If an interaction with the underlying operating system fails, an error
85/// will be returned.
86/// - If size is zero, [`Error::InvalidParameter`] will be returned.
87///
88/// # OS-Specific Behavior
89///
90/// On NetBSD pages will be allocated without PaX memory protection restrictions
91/// (i.e. pages will be allowed to be modified to any combination of `RWX`).
92///
93/// # Examples
94///
95/// ```
96/// # fn main() -> region::Result<()> {
97/// # if cfg!(any(target_arch = "x86", target_arch = "x86_64"))
98/// # && !cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
99/// use region::Protection;
100/// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3u8];
101///
102/// let memory = region::alloc(100, Protection::READ_WRITE_EXECUTE)?;
103/// let slice = unsafe {
104/// std::slice::from_raw_parts_mut(memory.as_ptr::<u8>() as *mut u8, memory.len())
105/// };
106///
107/// slice[..6].copy_from_slice(&ret5);
108/// let x: extern "C" fn() -> i32 = unsafe { std::mem::transmute(slice.as_ptr()) };
109///
110/// assert_eq!(x(), 5);
111/// # }
112/// # Ok(())
113/// # }
114/// ```
115#[inline]
116pub fn alloc(size: usize, protection: Protection) -> Result<Allocation> {
117 if size == 0 {
118 return Err(Error::InvalidParameter("size"));
119 }
120
121 let size = page::ceil(size as *const ()) as usize;
122
123 unsafe {
124 let base = os::alloc(std::ptr::null::<()>(), size, protection)?;
125 Ok(Allocation { base, size })
126 }
127}
128
129/// Allocates one or more pages of memory, at a specific address, with a defined
130/// protection.
131///
132/// The returned memory allocation is not guaranteed to reside at the provided
133/// address. E.g. on Windows, new allocations that do not reside within already
134/// reserved memory, are aligned to the operating system's allocation
135/// granularity (most commonly 64KB).
136///
137/// # Implementation
138///
139/// This function is implemented using `VirtualAlloc` on Windows, and `mmap`
140/// with `MAP_FIXED` on POSIX.
141///
142/// # Parameters
143///
144/// - The address is rounded down to the closest page boundary.
145/// - The size may not be zero.
146/// - The size is rounded up to the closest page boundary, relative to the
147/// address.
148///
149/// # Errors
150///
151/// - If an interaction with the underlying operating system fails, an error
152/// will be returned.
153/// - If size is zero, [`Error::InvalidParameter`] will be returned.
154#[inline]
155pub fn alloc_at<T>(address: *const T, size: usize, protection: Protection) -> Result<Allocation> {
156 let (address, size) = util::round_to_page_boundaries(address, size)?;
157
158 unsafe {
159 let base = os::alloc(address.cast(), size, protection)?;
160 Ok(Allocation { base, size })
161 }
162}
163
164#[cfg(test)]
165mod tests {
166 use super::*;
167
168 #[test]
169 fn alloc_size_is_aligned_to_page_size() -> Result<()> {
170 let memory = alloc(1, Protection::NONE)?;
171 assert_eq!(memory.len(), page::size());
172 Ok(())
173 }
174
175 #[test]
176 fn alloc_rejects_empty_allocation() {
177 assert!(matches!(
178 alloc(0, Protection::NONE),
179 Err(Error::InvalidParameter(_))
180 ));
181 }
182
183 #[test]
184 fn alloc_obtains_correct_properties() -> Result<()> {
185 let memory = alloc(1, Protection::READ_WRITE)?;
186
187 let region = crate::query(memory.as_ptr::<()>())?;
188 assert_eq!(region.protection(), Protection::READ_WRITE);
189 assert!(region.len() >= memory.len());
190 assert!(!region.is_guarded());
191 assert!(!region.is_shared());
192 assert!(region.is_committed());
193
194 Ok(())
195 }
196
197 #[test]
198 fn alloc_frees_memory_when_dropped() -> Result<()> {
199 // Designing these tests can be quite tricky sometimes. When a page is
200 // allocated and then released, a subsequent `query` may allocate memory in
201 // the same location that has just been freed. For instance, NetBSD's
202 // kinfo_getvmmap uses `mmap` internally, which can lead to potentially
203 // confusing outcomes. To mitigate this, an additional buffer region is
204 // allocated to ensure that any memory allocated indirectly through `query`
205 // occupies a separate location in memory.
206 let (start, _buffer) = (
207 alloc(1, Protection::READ_WRITE)?,
208 alloc(1, Protection::READ_WRITE)?,
209 );
210
211 let base = start.as_ptr::<()>();
212 std::mem::drop(start);
213
214 let query = crate::query(base);
215 assert!(matches!(query, Err(Error::UnmappedRegion)));
216 Ok(())
217 }
218
219 #[test]
220 fn alloc_can_allocate_unused_region() -> Result<()> {
221 let base = alloc(1, Protection::NONE)?.as_ptr::<()>();
222 let memory = alloc_at(base, 1, Protection::READ_WRITE)?;
223 assert_eq!(memory.as_ptr(), base);
224 Ok(())
225 }
226
227 #[test]
228 #[cfg(not(any(target_os = "openbsd", target_os = "netbsd")))]
229 fn alloc_can_allocate_executable_region() -> Result<()> {
230 let memory = alloc(1, Protection::WRITE_EXECUTE)?;
231 assert_eq!(memory.len(), page::size());
232 Ok(())
233 }
234}