slice_pool2\sync/
owned.rs

1use super::ChunkChain;
2use std::ops::{Deref, DerefMut};
3use std::sync::Arc;
4use std::{fmt, mem, slice};
5
6/// Interface for any slice compatible with a thread-safe `SlicePool`.
7pub trait Sliceable<T>: Send + Sync + AsMut<[T]> + AsRef<[T]> {}
8
9/// Implements the trait for vectors and similar types.
10impl<T, V> Sliceable<T> for V
11where
12  T: Send,
13  V: Send + Sync + AsRef<[T]> + AsMut<[T]>,
14{
15}
16
17/// A thread-safe interface for allocating chunks in an owned slice.
18pub struct SlicePool<T: Send> {
19  chain: Arc<ChunkChain>,
20  slice: Arc<dyn Sliceable<T>>,
21}
22
23impl<T: Send + 'static> SlicePool<T> {
24  /// Constructs a new owned slice pool from a sliceable object.
25  pub fn new<S: Sliceable<T> + 'static>(slice: S) -> Self {
26    let size = slice.as_ref().len();
27
28    SlicePool {
29      chain: Arc::new(ChunkChain::new(size)),
30      slice: Arc::new(slice),
31    }
32  }
33
34  /// Allocates a new slice from the pool.
35  pub fn alloc(&self, size: usize) -> Option<SliceBox<T>> {
36    let chunk = self.chain.allocate(size)?;
37
38    // The following code uses unsafe, and is the only occurring instance of it.
39    // Since the 'SliceBox' is a self-referential type, Rust does not allow us
40    // to express this with its current lifetime semantics. To avoid this
41    // restriction, the slice is transmuted to a static and mutable slice. It
42    // can be treated as static, since it's next to the 'Arc', which is keeping
43    // the data alive. It can also be treated as mutable since the 'SliceBox'
44    // becomes the only way to access the slice.
45    let data: &'static mut [T] = unsafe {
46      let offset = chunk.offset as isize;
47      let base = (*self.slice).as_ref().as_ptr().offset(offset);
48      slice::from_raw_parts_mut(base as *mut _, size)
49    };
50
51    Some(SliceBox {
52      chain: self.chain.clone(),
53      slice: self.slice.clone(),
54      data,
55    })
56  }
57
58  /// Returns the address of the underlying slice.
59  pub fn as_ptr(&self) -> *const T {
60    (*self.slice).as_ref().as_ptr()
61  }
62
63  /// Returns the size of the underlying slice.
64  pub fn len(&self) -> usize {
65    (*self.slice).as_ref().len()
66  }
67}
68
69/// An allocation in an owned `SlicePool`.
70pub struct SliceBox<T: Send + 'static> {
71  #[allow(unused)]
72  slice: Arc<dyn Sliceable<T>>,
73  chain: Arc<ChunkChain>,
74  data: &'static mut [T],
75}
76
77impl<T: Send> Deref for SliceBox<T> {
78  type Target = [T];
79
80  fn deref(&self) -> &Self::Target {
81    self.data
82  }
83}
84
85impl<T: Send> DerefMut for SliceBox<T> {
86  fn deref_mut(&mut self) -> &mut [T] {
87    self.data
88  }
89}
90
91impl<T: Send> Drop for SliceBox<T> {
92  /// Returns the ownership of the slice to the pool.
93  fn drop(&mut self) {
94    let base = (*self.slice).as_ref().as_ptr();
95    let diff = (self.data.as_ptr() as isize).wrapping_sub(base as isize);
96    self.chain.release(diff as usize / mem::size_of::<T>())
97  }
98}
99
100impl<T: Send + fmt::Debug> fmt::Debug for SliceBox<T> {
101  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
102    write!(f, "{:?}", self.deref())
103  }
104}
105
106#[cfg(test)]
107mod tests {
108  use super::*;
109  use std::sync::Arc;
110  use std::thread;
111
112  #[test]
113  fn pool_owned_lifetime() {
114    let alloc = {
115      let values = vec![10, 20, 30, 40, 50, 60, 70, 80, 90, 100];
116      let memory = SlicePool::new(values);
117
118      let alloc = {
119        let alloc = memory.alloc(2).unwrap();
120        assert_eq!(*alloc, [10, 20]);
121        {
122          let alloc = memory.alloc(5).unwrap();
123          assert_eq!(*alloc, [30, 40, 50, 60, 70]);
124        }
125
126        let alloc = memory.alloc(1).unwrap();
127        assert_eq!(*alloc, [30]);
128        alloc
129      };
130      assert_eq!(*alloc, [30]);
131      alloc
132    };
133    assert_eq!(*alloc, [30]);
134  }
135
136  #[test]
137  fn pool_owned_thread() {
138    let storage = SlicePool::new(vec![10, 20, 30, 40]);
139    let pool = Arc::new(storage);
140
141    let val = pool.alloc(2).unwrap();
142    assert_eq!(*val, [10, 20]);
143
144    let pool2 = pool.clone();
145    thread::spawn(move || {
146      let val = pool2.alloc(2).unwrap();
147      assert_eq!(*val, [30, 40]);
148    }).join()
149    .unwrap();
150
151    assert_eq!(pool.len(), 4);
152  }
153
154  #[test]
155  fn pool_fragmentation() {
156    let pool = SlicePool::new(vec![10, 20, 30, 40, 50, 60, 70, 80, 90, 100]);
157
158    let val1 = pool.alloc(2).unwrap();
159    assert_eq!(*val1, [10, 20]);
160
161    let val2 = pool.alloc(4).unwrap();
162    assert_eq!(*val2, [30, 40, 50, 60]);
163
164    let val3 = pool.alloc(2).unwrap();
165    assert_eq!(*val3, [70, 80]);
166
167    // By dropping this allocation, a fragmentation occurs.
168    mem::drop(val2);
169
170    let val4 = pool.alloc(2).unwrap();
171    assert_eq!(*val4, [90, 100]);
172
173    let val5 = pool.alloc(4).unwrap();
174    assert_eq!(*val5, [30, 40, 50, 60]);
175  }
176
177  #[test]
178  fn pool_complex_fragmentation() {
179    let pool = SlicePool::new(vec![10, 20, 30, 40, 50, 60, 70, 80, 90, 100]);
180  
181    let val1 = pool.alloc(2).unwrap();
182    assert_eq!(*val1, [10, 20]);
183  
184    let val2 = pool.alloc(2).unwrap();
185    assert_eq!(*val2, [30, 40]);
186  
187    let val3 = pool.alloc(2).unwrap();
188    assert_eq!(*val3, [50, 60]);
189  
190    mem::drop(val1);
191    mem::drop(val3);
192    mem::drop(val2);
193  
194    let val4 = pool.alloc(1).unwrap();
195    assert_eq!(*val4, [10]);
196  
197    let val5 = pool.alloc(1).unwrap();
198    assert_eq!(*val5, [20]);
199  }
200}