slice_pool2\unsync/
owned.rs

1use super::ChunkChain;
2use std::ops::{Deref, DerefMut};
3use std::rc::Rc;
4use std::{fmt, mem, slice};
5
6/// Interface for any slice compatible with a non thread-safe `SlicePool`.
7pub trait Sliceable<T>: AsMut<[T]> + AsRef<[T]> {}
8
9/// Implements the trait for vectors and similar types.
10impl<T, V> Sliceable<T> for V where V: AsRef<[T]> + AsMut<[T]> {}
11
12/// A non thread-safe interface for allocating chunks in an owned slice.
13pub struct SlicePool<T> {
14  chain: Rc<ChunkChain>,
15  slice: Rc<dyn Sliceable<T>>,
16}
17
18impl<T: 'static> SlicePool<T> {
19  /// Constructs a new owned slice pool from a sliceable object.
20  pub fn new<S: Sliceable<T> + 'static>(slice: S) -> Self {
21    let size = slice.as_ref().len();
22
23    SlicePool {
24      chain: Rc::new(ChunkChain::new(size)),
25      slice: Rc::new(slice),
26    }
27  }
28
29  /// Allocates a new slice from the pool.
30  pub fn alloc(&self, size: usize) -> Option<SliceBox<T>> {
31    let chunk = self.chain.allocate(size)?;
32
33    // The following code uses unsafe, and is the only occurring instance of it.
34    // Since the 'SliceBox' is a self-referential type, Rust does not allow us
35    // to express this with its current lifetime semantics. To avoid this
36    // restriction, the slice is transmuted to a static and mutable slice. It
37    // can be treated as static, since it's next to the 'Arc', which is keeping
38    // the data alive. It can also be treated as mutable since the 'SliceBox'
39    // becomes the only way to access the slice.
40    let data: &'static mut [T] = unsafe {
41      let offset = chunk.offset as isize;
42      let base = (*self.slice).as_ref().as_ptr().offset(offset);
43      slice::from_raw_parts_mut(base as *mut _, size)
44    };
45
46    Some(SliceBox {
47      chain: self.chain.clone(),
48      slice: self.slice.clone(),
49      data,
50    })
51  }
52
53  /// Returns the address of the underlying slice.
54  pub fn as_ptr(&self) -> *const T {
55    (*self.slice).as_ref().as_ptr()
56  }
57
58  /// Returns the size of the underlying slice.
59  pub fn len(&self) -> usize {
60    (*self.slice).as_ref().len()
61  }
62}
63
64/// An allocation in an owned `SlicePool`.
65pub struct SliceBox<T: 'static> {
66  #[allow(unused)]
67  slice: Rc<dyn Sliceable<T>>,
68  chain: Rc<ChunkChain>,
69  data: &'static mut [T],
70}
71
72impl<T> Deref for SliceBox<T> {
73  type Target = [T];
74
75  fn deref(&self) -> &Self::Target {
76    self.data
77  }
78}
79
80impl<T> DerefMut for SliceBox<T> {
81  fn deref_mut(&mut self) -> &mut [T] {
82    self.data
83  }
84}
85
86impl<T> Drop for SliceBox<T> {
87  /// Returns the ownership of the slice to the pool.
88  fn drop(&mut self) {
89    let base = (*self.slice).as_ref().as_ptr();
90    let diff = (self.data.as_ptr() as isize).wrapping_sub(base as isize);
91    self.chain.release(diff as usize / mem::size_of::<T>())
92  }
93}
94
95impl<T: fmt::Debug> fmt::Debug for SliceBox<T> {
96  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
97    write!(f, "{:?}", self.deref())
98  }
99}
100
101#[cfg(test)]
102mod tests {
103  use super::*;
104
105  #[test]
106  fn pool_owned_lifetime() {
107    let alloc = {
108      let values = vec![10, 20, 30, 40, 50, 60, 70, 80, 90, 100];
109      let memory = SlicePool::new(values);
110
111      let alloc = {
112        let alloc = memory.alloc(2).unwrap();
113        assert_eq!(*alloc, [10, 20]);
114        {
115          let alloc = memory.alloc(5).unwrap();
116          assert_eq!(*alloc, [30, 40, 50, 60, 70]);
117        }
118
119        let alloc = memory.alloc(1).unwrap();
120        assert_eq!(*alloc, [30]);
121        alloc
122      };
123      assert_eq!(*alloc, [30]);
124      alloc
125    };
126    assert_eq!(*alloc, [30]);
127  }
128
129  #[test]
130  fn pool_fragmentation() {
131    let pool = SlicePool::new(vec![10, 20, 30, 40, 50, 60, 70, 80, 90, 100]);
132
133    let val1 = pool.alloc(2).unwrap();
134    assert_eq!(*val1, [10, 20]);
135
136    let val2 = pool.alloc(4).unwrap();
137    assert_eq!(*val2, [30, 40, 50, 60]);
138
139    let val3 = pool.alloc(2).unwrap();
140    assert_eq!(*val3, [70, 80]);
141
142    // By dropping this allocation, a fragmentation occurs.
143    mem::drop(val2);
144
145    let val4 = pool.alloc(2).unwrap();
146    assert_eq!(*val4, [90, 100]);
147
148    let val5 = pool.alloc(4).unwrap();
149    assert_eq!(*val5, [30, 40, 50, 60]);
150  }
151}