ab_contracts_slots/
aligned_buffer.rs

1#[cfg(test)]
2mod tests;
3
4use ab_contracts_io_type::MAX_ALIGNMENT;
5use alloc::boxed::Box;
6use core::mem::MaybeUninit;
7use core::ptr::NonNull;
8use core::sync::atomic::{AtomicU32, Ordering};
9use core::{mem, slice};
10
11#[repr(C, align(16))]
12struct AlignedBytes([u8; MAX_ALIGNMENT as usize]);
13
14const _: () = {
15    assert!(
16        align_of::<AlignedBytes>() == size_of::<AlignedBytes>(),
17        "Size and alignment are both 16 bytes"
18    );
19};
20
21#[repr(C, align(16))]
22struct ConstInnerBuffer {
23    strong_count: AtomicU32,
24}
25
26const _: () = {
27    assert!(align_of::<ConstInnerBuffer>() == align_of::<AlignedBytes>());
28    assert!(size_of::<ConstInnerBuffer>() == size_of::<AlignedBytes>());
29};
30
31static mut CONST_INNER_BUFFER: &mut [ConstInnerBuffer] = &mut [ConstInnerBuffer {
32    strong_count: AtomicU32::new(1),
33}];
34
35struct ConstInnerBufferPtr(NonNull<[MaybeUninit<AlignedBytes>]>);
36// SAFETY: Statically allocated memory buffer can be used from any thread
37unsafe impl Sync for ConstInnerBufferPtr {}
38
39/// SAFETY: Size and layout of both `NonNull<[ConstInnerBuffer]>` and `ConstInnerBufferPtr`
40/// is the same, `CONST_STRONG_COUNT` static is only mutated through atomic operations
41static EMPTY_SHARED_ALIGNED_BUFFER_BUFFER: ConstInnerBufferPtr = unsafe {
42    mem::transmute::<NonNull<[ConstInnerBuffer]>, ConstInnerBufferPtr>(NonNull::from_mut(
43        CONST_INNER_BUFFER,
44    ))
45};
46static EMPTY_SHARED_ALIGNED_BUFFER: SharedAlignedBuffer = SharedAlignedBuffer {
47    inner: InnerBuffer {
48        buffer: EMPTY_SHARED_ALIGNED_BUFFER_BUFFER.0,
49        len: 0,
50    },
51};
52
53#[derive(Debug)]
54#[repr(C)]
55struct InnerBuffer {
56    // The first bytes are allocated for `len` and `strong_count`
57    buffer: NonNull<[MaybeUninit<AlignedBytes>]>,
58    len: u32,
59}
60
61// SAFETY: Heap-allocated memory buffer can be used from any thread
62unsafe impl Send for InnerBuffer {}
63// SAFETY: Heap-allocated memory buffer can be used from any thread
64unsafe impl Sync for InnerBuffer {}
65
66impl Default for InnerBuffer {
67    #[inline(always)]
68    fn default() -> Self {
69        let buffer = InnerBuffer {
70            buffer: EMPTY_SHARED_ALIGNED_BUFFER.inner.buffer,
71            len: 0,
72        };
73        buffer.strong_count_ref().fetch_add(1, Ordering::AcqRel);
74        buffer
75    }
76}
77
78impl Clone for InnerBuffer {
79    #[inline(always)]
80    fn clone(&self) -> Self {
81        self.strong_count_ref().fetch_add(1, Ordering::AcqRel);
82
83        Self {
84            buffer: self.buffer,
85            len: self.len,
86        }
87    }
88}
89
90impl Drop for InnerBuffer {
91    #[inline(always)]
92    fn drop(&mut self) {
93        if self.strong_count_ref().fetch_sub(1, Ordering::AcqRel) == 1 {
94            // SAFETY: Created from `Box` in constructor
95            let _ = unsafe { Box::from_non_null(self.buffer) };
96        }
97    }
98}
99
100impl InnerBuffer {
101    /// Allocates a new buffer + one [`AlignedBytes`] worth of memory at the beginning for `len` and
102    /// `strong_count` in case it is later converted to [`SharedAlignedBuffer`].
103    ///
104    /// `len` and `strong_count` field are automatically initialized as `0` and `1`.
105    #[inline(always)]
106    fn allocate(capacity: u32) -> Self {
107        let buffer = Box::into_non_null(Box::new_uninit_slice(
108            1 + (capacity as usize).div_ceil(size_of::<AlignedBytes>()),
109        ));
110        let mut instance = Self { buffer, len: 0 };
111        // SAFETY: 0 bytes initialized
112        unsafe {
113            instance.len_write(0);
114            instance.strong_count_initialize();
115        }
116        instance
117    }
118
119    #[inline(always)]
120    fn len_read(&self) -> u32 {
121        self.len
122    }
123
124    /// `len` bytes must be initialized
125    #[inline(always)]
126    unsafe fn len_write(&mut self, len: u32) {
127        self.len = len;
128    }
129
130    // TODO: Store precomputed capacity and expose pointer to it
131    #[inline(always)]
132    fn capacity(&self) -> u32 {
133        // API constraints capacity to `u32`, hence this never truncates, `- 1` due to
134        // `strong_count` stored at the beginning of the buffer
135        ((self.buffer.len() - 1) * size_of::<AlignedBytes>()) as u32
136    }
137
138    #[inline(always)]
139    fn strong_count_ref(&self) -> &AtomicU32 {
140        // SAFETY: The first bytes are allocated for `len` and `strong_count`, which is are
141        // correctly aligned copy types initialized in the constructor
142        unsafe { self.buffer.as_ptr().cast::<AtomicU32>().as_ref_unchecked() }
143    }
144
145    #[inline(always)]
146    fn strong_count_initialize(&mut self) {
147        // SAFETY: The first bytes are allocated for `len` and `strong_count`, which is are
148        // correctly aligned copy types
149        unsafe {
150            self.buffer
151                .as_ptr()
152                .cast::<AtomicU32>()
153                .write(AtomicU32::new(1))
154        };
155    }
156
157    #[inline(always)]
158    fn as_slice(&self) -> &[u8] {
159        let len = self.len_read() as usize;
160        // SAFETY: Not null and length is a protected invariant of the implementation
161        unsafe { slice::from_raw_parts(self.as_ptr(), len) }
162    }
163
164    #[inline(always)]
165    fn as_mut_slice(&mut self) -> &mut [u8] {
166        let len = self.len_read() as usize;
167        // SAFETY: Not null and length is a protected invariant of the implementation
168        unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), len) }
169    }
170
171    #[inline(always)]
172    fn as_ptr(&self) -> *const u8 {
173        let buffer_ptr = self.buffer.as_ptr().cast_const().cast::<u8>();
174        // SAFETY: Constructor allocates the first element for `strong_count`
175        unsafe { buffer_ptr.add(size_of::<AlignedBytes>()) }
176    }
177
178    #[inline(always)]
179    fn as_mut_ptr(&mut self) -> *mut u8 {
180        let buffer_ptr = self.buffer.as_ptr().cast::<u8>();
181        // SAFETY: Constructor allocates the first element for `strong_count`
182        unsafe { buffer_ptr.add(size_of::<AlignedBytes>()) }
183    }
184}
185
186/// Owned aligned buffer for executor purposes.
187///
188/// See [`SharedAlignedBuffer`] for a version that can be cheaply cloned, while reusing the original
189/// allocation.
190///
191/// Data is aligned to 16 bytes (128 bits), which is the largest alignment required by primitive
192/// types and by extension any type that implements `TrivialType`/`IoType`.
193#[derive(Debug)]
194pub struct OwnedAlignedBuffer {
195    inner: InnerBuffer,
196}
197
198impl Clone for OwnedAlignedBuffer {
199    #[inline(always)]
200    fn clone(&self) -> Self {
201        let mut new_instance = Self::with_capacity(self.capacity());
202        new_instance.copy_from_slice(self.as_slice());
203        new_instance
204    }
205}
206
207impl OwnedAlignedBuffer {
208    /// Create a new instance with at least specified capacity.
209    ///
210    /// NOTE: Actual capacity might be larger due to alignment requirements.
211    #[inline(always)]
212    pub fn with_capacity(capacity: u32) -> Self {
213        Self {
214            inner: InnerBuffer::allocate(capacity),
215        }
216    }
217
218    /// Create a new instance from provided bytes.
219    ///
220    /// # Panics
221    /// If `bytes.len()` doesn't fit into `u32`
222    #[inline(always)]
223    pub fn from_bytes(bytes: &[u8]) -> Self {
224        let mut instance = Self::with_capacity(0);
225        instance.copy_from_slice(bytes);
226        instance
227    }
228
229    #[inline(always)]
230    pub fn as_slice(&self) -> &[u8] {
231        self.inner.as_slice()
232    }
233
234    #[inline(always)]
235    pub fn as_mut_slice(&mut self) -> &mut [u8] {
236        self.inner.as_mut_slice()
237    }
238
239    #[inline(always)]
240    pub fn as_ptr(&self) -> *const u8 {
241        self.inner.as_ptr()
242    }
243
244    #[inline(always)]
245    pub fn as_mut_ptr(&mut self) -> *mut u8 {
246        self.inner.as_mut_ptr()
247    }
248
249    #[inline(always)]
250    pub fn into_shared(self) -> SharedAlignedBuffer {
251        SharedAlignedBuffer { inner: self.inner }
252    }
253
254    /// Ensure capacity of the buffer is at least `capacity`.
255    ///
256    /// Will re-allocate if necessary.
257    #[inline(always)]
258    pub fn ensure_capacity(&mut self, capacity: u32) {
259        // `+ size_of::<AlignedBytes>()` for `strong_count`
260        if capacity > self.capacity() {
261            let mut new_buffer = Self::with_capacity(capacity);
262            new_buffer.copy_from_slice(self.as_slice());
263            *self = new_buffer;
264        }
265    }
266
267    /// Will re-allocate if capacity is not enough to store provided bytes.
268    ///
269    /// # Panics
270    /// If `bytes.len()` doesn't fit into `u32`
271    #[inline(always)]
272    pub fn copy_from_slice(&mut self, bytes: &[u8]) {
273        let Ok(len) = u32::try_from(bytes.len()) else {
274            panic!("Too many bytes");
275        };
276
277        if len > self.capacity() {
278            // Allocate new buffer
279            self.inner = InnerBuffer::allocate(len);
280        }
281
282        // SAFETY: Sufficient capacity guaranteed above, natural alignment of bytes is 1 for input
283        // and output, non-overlapping allocations guaranteed by type system
284        unsafe {
285            self.as_mut_ptr()
286                .copy_from_nonoverlapping(bytes.as_ptr(), bytes.len());
287
288            self.inner.len_write(len);
289        }
290    }
291
292    #[inline(always)]
293    pub fn is_empty(&self) -> bool {
294        self.inner.len_read() == 0
295    }
296
297    #[inline(always)]
298    pub fn len(&self) -> u32 {
299        self.inner.len_read()
300    }
301
302    // TODO: Store precomputed capacity and expose pointer to it
303    #[inline(always)]
304    pub fn capacity(&self) -> u32 {
305        self.inner.capacity()
306    }
307
308    /// Set the length of the useful data to specified value.
309    ///
310    /// # Safety
311    /// There must be `new_len` bytes initialized in the buffer.
312    ///
313    /// # Panics
314    /// If `bytes.len()` doesn't fit into `u32`
315    #[inline(always)]
316    pub unsafe fn set_len(&mut self, new_len: u32) {
317        assert!(
318            new_len <= self.capacity(),
319            "Too many bytes {} > {}",
320            new_len,
321            self.capacity()
322        );
323        // SAFETY: Guaranteed by method contract
324        unsafe {
325            self.inner.len_write(new_len);
326        }
327    }
328}
329
330/// Shared aligned buffer for executor purposes.
331///
332/// See [`OwnedAlignedBuffer`] for a version that can be mutated.
333///
334/// Data is aligned to 16 bytes (128 bits), which is the largest alignment required by primitive
335/// types and by extension any type that implements `TrivialType`/`IoType`.
336///
337/// NOTE: Counter for number of shared instances is `u32` and will wrap around if exceeded breaking
338/// internal invariants (which is extremely unlikely, but still).
339#[derive(Debug, Default, Clone)]
340pub struct SharedAlignedBuffer {
341    inner: InnerBuffer,
342}
343
344// SAFETY: Heap-allocated memory buffer and atomic can be used from any thread
345unsafe impl Send for SharedAlignedBuffer {}
346// SAFETY: Heap-allocated memory buffer and atomic can be used from any thread
347unsafe impl Sync for SharedAlignedBuffer {}
348
349impl SharedAlignedBuffer {
350    /// Static reference to an empty buffer
351    #[inline(always)]
352    pub fn empty_ref() -> &'static Self {
353        &EMPTY_SHARED_ALIGNED_BUFFER
354    }
355
356    /// Create a new instance from provided bytes.
357    ///
358    /// # Panics
359    /// If `bytes.len()` doesn't fit into `u32`
360    #[inline(always)]
361    pub fn from_bytes(bytes: &[u8]) -> Self {
362        OwnedAlignedBuffer::from_bytes(bytes).into_shared()
363    }
364
365    /// Convert into owned buffer.
366    ///
367    /// If this is the last shared instance, then allocation will be reused, otherwise new
368    /// allocation will be created.
369    ///
370    /// Returns `None` if there exit other shared instances.
371    #[inline(always)]
372    pub fn into_owned(self) -> OwnedAlignedBuffer {
373        if self.inner.strong_count_ref().load(Ordering::Acquire) == 1 {
374            OwnedAlignedBuffer { inner: self.inner }
375        } else {
376            OwnedAlignedBuffer::from_bytes(self.as_slice())
377        }
378    }
379
380    #[inline(always)]
381    pub fn as_slice(&self) -> &[u8] {
382        self.inner.as_slice()
383    }
384
385    #[inline(always)]
386    pub fn as_ptr(&self) -> *const u8 {
387        self.inner.as_ptr()
388    }
389
390    #[inline(always)]
391    pub fn is_empty(&self) -> bool {
392        self.inner.len_read() == 0
393    }
394
395    #[inline(always)]
396    pub fn len(&self) -> u32 {
397        self.inner.len_read()
398    }
399}