ab_core_primitives/block/body/
owned.rs

1//! Data structures related to the owned version of [`BlockBody`]
2
3use crate::block::body::{
4    BeaconChainBody, BlockBody, GenericBlockBody, IntermediateShardBlockInfo,
5    IntermediateShardBody, LeafShardBlockInfo, LeafShardBody,
6};
7use crate::block::header::owned::{
8    OwnedIntermediateShardHeader, OwnedIntermediateShardHeaderError, OwnedLeafShardHeader,
9};
10use crate::pot::PotCheckpoints;
11use crate::segments::SegmentRoot;
12use crate::shard::ShardKind;
13use crate::transaction::Transaction;
14use crate::transaction::owned::{OwnedTransaction, OwnedTransactionError};
15use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
16use ab_io_type::trivial_type::TrivialType;
17use core::fmt;
18use core::iter::TrustedLen;
19use derive_more::From;
20use rclite::Arc;
21use yoke::Yoke;
22
23/// Generic owned block body
24pub trait GenericOwnedBlockBody:
25    Clone + fmt::Debug + Send + Sync + Into<OwnedBlockBody> + 'static
26{
27    /// Shard kind
28    const SHARD_KIND: ShardKind;
29
30    /// Block body
31    type Body<'a>: GenericBlockBody<'a>
32    where
33        Self: 'a;
34
35    /// Inner buffer with block body contents
36    fn buffer(&self) -> &SharedAlignedBuffer;
37
38    /// Number of clones in memory
39    fn ref_count(&self) -> usize;
40
41    /// Get a regular block body out of the owned version
42    fn body(&self) -> &Self::Body<'_>;
43}
44
45/// Transaction addition error
46#[derive(Debug, thiserror::Error)]
47enum AddTransactionError {
48    /// Block body is too large
49    #[error("Block body is too large")]
50    BlockBodyIsTooLarge,
51    /// Too many transactions
52    #[error("Too many transactions")]
53    TooManyTransactions,
54    /// Failed to add transaction
55    #[error("Failed to add transaction: {error}")]
56    FailedToAddTransaction {
57        /// Inner error
58        #[from]
59        error: OwnedTransactionError,
60    },
61}
62
63/// Transaction that can be written into the body
64pub trait WritableBodyTransaction {
65    /// Write this transaction into the body
66    fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError>;
67}
68
69impl WritableBodyTransaction for Transaction<'_> {
70    fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
71        OwnedTransaction::from_parts_into(
72            self.header,
73            self.read_slots,
74            self.write_slots,
75            self.payload,
76            self.seal,
77            buffer,
78        )
79    }
80}
81
82impl WritableBodyTransaction for &OwnedTransaction {
83    fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
84        if buffer.append(self.buffer().as_slice()) {
85            Ok(())
86        } else {
87            Err(OwnedTransactionError::TransactionTooLarge)
88        }
89    }
90}
91
92#[derive(Debug, Clone)]
93struct TransactionBuilder {
94    num_transactions_offset: usize,
95    buffer: OwnedAlignedBuffer,
96}
97
98impl TransactionBuilder {
99    fn new(num_transactions_offset: usize, buffer: OwnedAlignedBuffer) -> Self {
100        Self {
101            num_transactions_offset,
102            buffer,
103        }
104    }
105
106    /// Add transaction to the body
107    fn add_transaction<T>(&mut self, transaction: T) -> Result<(), AddTransactionError>
108    where
109        T: WritableBodyTransaction,
110    {
111        // Transactions are aligned, but the very first might come after non-transaction fields that
112        // were not aligned
113        if self.inc_transaction_count()? == 1 && !align_to_16_bytes_with_padding(&mut self.buffer) {
114            self.dec_transaction_count();
115            return Err(AddTransactionError::BlockBodyIsTooLarge);
116        }
117
118        let old_buffer_len = self.buffer.len();
119
120        transaction
121            .write_into(&mut self.buffer)
122            .inspect_err(|_error| {
123                self.dec_transaction_count();
124            })?;
125
126        if !align_to_16_bytes_with_padding(&mut self.buffer) {
127            self.dec_transaction_count();
128            // Length was obtained from the same buffer before last write
129            unsafe {
130                self.buffer.set_len(old_buffer_len);
131            }
132            return Err(AddTransactionError::BlockBodyIsTooLarge);
133        }
134
135        Ok(())
136    }
137
138    /// Finish building block body
139    #[inline(always)]
140    fn finish(self) -> OwnedAlignedBuffer {
141        self.buffer
142    }
143
144    /// Increase the number of stored transactions and return the new value
145    #[inline(always)]
146    fn inc_transaction_count(&mut self) -> Result<u32, AddTransactionError> {
147        // SAFETY: Constructor ensures the offset is valid and has space for `u32` (but not
148        // necessarily aligned)
149        unsafe {
150            let num_transactions_ptr = self
151                .buffer
152                .as_mut_ptr()
153                .add(self.num_transactions_offset)
154                .cast::<u32>();
155            let num_transactions = num_transactions_ptr.read_unaligned();
156            let num_transactions = num_transactions
157                .checked_add(1)
158                .ok_or(AddTransactionError::TooManyTransactions)?;
159            num_transactions_ptr.write_unaligned(num_transactions);
160            Ok(num_transactions)
161        }
162    }
163
164    /// Decrease the number of stored transactions
165    #[inline(always)]
166    fn dec_transaction_count(&mut self) {
167        // SAFETY: Constructor ensures the offset is valid and has space for `u32` (but not
168        // necessarily aligned)
169        unsafe {
170            let num_transactions_ptr = self
171                .buffer
172                .as_mut_ptr()
173                .add(self.num_transactions_offset)
174                .cast::<u32>();
175            let num_transactions = num_transactions_ptr.read_unaligned();
176            let num_transactions = num_transactions.saturating_sub(1);
177            num_transactions_ptr.write_unaligned(num_transactions);
178        }
179    }
180}
181
182/// Errors for [`OwnedBeaconChainBody`]
183#[derive(Debug, thiserror::Error)]
184pub enum OwnedBeaconChainBodyError {
185    /// Too many PoT checkpoints
186    #[error("Too many PoT checkpoints: {actual}")]
187    TooManyPotCheckpoints {
188        /// Actual number of PoT checkpoints
189        actual: usize,
190    },
191    /// Too many own segment roots
192    #[error("Too many own segment roots: {actual}")]
193    TooManyOwnSegmentRoots {
194        /// Actual number of own segment roots
195        actual: usize,
196    },
197    /// Too many intermediate shard blocks
198    #[error("Too many intermediate shard blocks: {actual}")]
199    TooManyIntermediateShardBlocks {
200        /// Actual number of intermediate shard blocks
201        actual: usize,
202    },
203    /// Too many intermediate shard own segment roots
204    #[error("Too many intermediate shard own segment roots: {actual}")]
205    TooManyIntermediateShardOwnSegmentRoots {
206        /// Actual number of own segment roots
207        actual: usize,
208    },
209    /// Too many intermediate shard child segment roots
210    #[error("Too many intermediate shard child segment roots: {actual}")]
211    TooManyIntermediateShardChildSegmentRoots {
212        /// Actual number of child segment roots
213        actual: usize,
214    },
215    /// Failed to intermediate shard header
216    #[error("Failed to intermediate shard header: {error}")]
217    FailedToAddIntermediateShard {
218        /// Inner error
219        #[from]
220        error: OwnedIntermediateShardHeaderError,
221    },
222    /// Block body is too large
223    #[error("Block body is too large")]
224    BlockBodyIsTooLarge,
225}
226
227/// An owned version of [`BeaconChainBody`].
228///
229/// It is correctly aligned in memory and well suited for sending and receiving over the network
230/// efficiently or storing in memory or on disk.
231#[derive(Debug, Clone)]
232pub struct OwnedBeaconChainBody {
233    inner: Arc<Yoke<BeaconChainBody<'static>, SharedAlignedBuffer>>,
234}
235
236impl GenericOwnedBlockBody for OwnedBeaconChainBody {
237    const SHARD_KIND: ShardKind = ShardKind::BeaconChain;
238
239    type Body<'a> = BeaconChainBody<'a>;
240
241    #[inline(always)]
242    fn buffer(&self) -> &SharedAlignedBuffer {
243        self.buffer()
244    }
245
246    #[inline(always)]
247    fn ref_count(&self) -> usize {
248        self.ref_count()
249    }
250
251    #[inline(always)]
252    fn body(&self) -> &Self::Body<'_> {
253        self.body()
254    }
255}
256
257impl OwnedBeaconChainBody {
258    /// Create a new instance
259    pub fn new<'a, OSR, ISB>(
260        own_segment_roots: OSR,
261        intermediate_shard_blocks: ISB,
262        pot_checkpoints: &[PotCheckpoints],
263    ) -> Result<Self, OwnedBeaconChainBodyError>
264    where
265        OSR: TrustedLen<Item = SegmentRoot>,
266        ISB: TrustedLen<Item = IntermediateShardBlockInfo<'a>> + Clone + 'a,
267    {
268        let num_pot_checkpoints = pot_checkpoints.len();
269        let num_pot_checkpoints = u32::try_from(num_pot_checkpoints).map_err(|_error| {
270            OwnedBeaconChainBodyError::TooManyPotCheckpoints {
271                actual: num_pot_checkpoints,
272            }
273        })?;
274        let num_own_segment_roots = own_segment_roots.size_hint().0;
275        let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
276            OwnedBeaconChainBodyError::TooManyOwnSegmentRoots {
277                actual: num_own_segment_roots,
278            }
279        })?;
280        let num_blocks = intermediate_shard_blocks.size_hint().0;
281        let num_blocks = u8::try_from(num_blocks).map_err(|_error| {
282            OwnedBeaconChainBodyError::TooManyIntermediateShardBlocks { actual: num_blocks }
283        })?;
284
285        let mut buffer = OwnedAlignedBuffer::with_capacity(
286            u8::SIZE
287                + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
288                // This is only an estimate to get in the ballpark where reallocation should not be
289                // necessary in many cases
290                + u32::from(num_blocks) * OwnedIntermediateShardHeader::max_allocation_for(&[]) * 2,
291        );
292
293        let true = buffer.append(&num_pot_checkpoints.to_le_bytes()) else {
294            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
295        };
296
297        let true = buffer.append(&[num_own_segment_roots]) else {
298            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
299        };
300        for own_segment_root in own_segment_roots {
301            let true = buffer.append(own_segment_root.as_ref()) else {
302                unreachable!("Checked size above; qed");
303            };
304        }
305        // TODO: Would be nice for `IntermediateShardBlocksInfo` to have API to write this by itself
306        {
307            let true = buffer.append(&num_blocks.to_le_bytes()) else {
308                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
309            };
310            let mut segments_roots_num_cursor = buffer.len() as usize;
311            for _ in 0..num_blocks {
312                let true = buffer.append(&[0, 0, 0]) else {
313                    unreachable!("Checked size above; qed");
314                };
315            }
316            let true = align_to_8_with_padding(&mut buffer) else {
317                unreachable!("Checked size above; qed");
318            };
319            for intermediate_shard_block in intermediate_shard_blocks.clone() {
320                if !intermediate_shard_block.own_segment_roots.is_empty()
321                    || !intermediate_shard_block.child_segment_roots.is_empty()
322                {
323                    let num_own_segment_roots = intermediate_shard_block.own_segment_roots.len();
324                    let num_own_segment_roots =
325                        u8::try_from(num_own_segment_roots).map_err(|_error| {
326                            OwnedBeaconChainBodyError::TooManyIntermediateShardOwnSegmentRoots {
327                                actual: num_own_segment_roots,
328                            }
329                        })?;
330                    let num_child_segment_roots =
331                        intermediate_shard_block.child_segment_roots.len();
332                    let num_child_segment_roots =
333                        u16::try_from(num_child_segment_roots).map_err(|_error| {
334                            OwnedBeaconChainBodyError::TooManyIntermediateShardChildSegmentRoots {
335                                actual: num_child_segment_roots,
336                            }
337                        })?;
338                    let num_child_segment_roots = num_child_segment_roots.to_le_bytes();
339                    buffer.as_mut_slice()[segments_roots_num_cursor..][..3].copy_from_slice(&[
340                        num_own_segment_roots,
341                        num_child_segment_roots[0],
342                        num_child_segment_roots[1],
343                    ]);
344                }
345                segments_roots_num_cursor += 3;
346
347                OwnedIntermediateShardHeader::from_parts_into(
348                    intermediate_shard_block.header.prefix,
349                    intermediate_shard_block.header.result,
350                    intermediate_shard_block.header.consensus_info,
351                    intermediate_shard_block.header.beacon_chain_info(),
352                    intermediate_shard_block.header.child_shard_blocks(),
353                    &mut buffer,
354                )?;
355                if !align_to_8_with_padding(&mut buffer) {
356                    return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
357                }
358                if let Some(segment_roots_proof) = intermediate_shard_block.segment_roots_proof
359                    && !buffer.append(segment_roots_proof)
360                {
361                    return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
362                }
363                if !intermediate_shard_block.own_segment_roots.is_empty()
364                    && !buffer.append(
365                        SegmentRoot::repr_from_slice(intermediate_shard_block.own_segment_roots)
366                            .as_flattened(),
367                    )
368                {
369                    return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
370                }
371                if !intermediate_shard_block.child_segment_roots.is_empty()
372                    && !buffer.append(
373                        SegmentRoot::repr_from_slice(intermediate_shard_block.child_segment_roots)
374                            .as_flattened(),
375                    )
376                {
377                    return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
378                }
379            }
380        }
381
382        let true = buffer.append(PotCheckpoints::bytes_from_slice(pot_checkpoints).as_flattened())
383        else {
384            return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
385        };
386
387        // TODO: Avoid extra parsing here or at least go through unchecked version
388        Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
389    }
390
391    /// Create an owned body from a buffer
392    #[inline]
393    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
394        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
395        //  https://github.com/unicode-org/icu4x/issues/6665
396        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
397            let Some((body, extra_bytes)) = BeaconChainBody::try_from_bytes(buffer) else {
398                return Err(());
399            };
400            if !extra_bytes.is_empty() {
401                return Err(());
402            }
403
404            Ok(body)
405        })
406        .map_err(move |()| buffer)?;
407
408        Ok(Self {
409            inner: Arc::new(inner),
410        })
411    }
412
413    /// Inner buffer with block body contents
414    #[inline(always)]
415    pub fn buffer(&self) -> &SharedAlignedBuffer {
416        self.inner.backing_cart()
417    }
418
419    /// Number of clones in memory
420    #[inline(always)]
421    pub fn ref_count(&self) -> usize {
422        self.inner.strong_count()
423    }
424
425    /// Get [`BeaconChainBody`] out of [`OwnedBeaconChainBody`]
426    #[inline(always)]
427    pub fn body(&self) -> &BeaconChainBody<'_> {
428        self.inner.get()
429    }
430}
431
432/// Errors for [`OwnedIntermediateShardBody`]
433#[derive(Debug, thiserror::Error)]
434pub enum OwnedIntermediateShardBodyError {
435    /// Too many own segment roots
436    #[error("Too many own segment roots: {actual}")]
437    TooManyOwnSegmentRoots {
438        /// Actual number of own segment roots
439        actual: usize,
440    },
441    /// Too many leaf shard blocks
442    #[error("Too many leaf shard blocks: {actual}")]
443    TooManyLeafShardBlocks {
444        /// Actual number of leaf shard blocks
445        actual: usize,
446    },
447    /// Too many leaf shard own segment roots
448    #[error("Too many leaf shard own segment roots: {actual}")]
449    TooManyLeafShardOwnSegmentRoots {
450        /// Actual number of own segment roots
451        actual: usize,
452    },
453}
454
455/// An owned version of [`IntermediateShardBody`].
456///
457/// It is correctly aligned in memory and well suited for sending and receiving over the network
458/// efficiently or storing in memory or on disk.
459#[derive(Debug, Clone)]
460pub struct OwnedIntermediateShardBody {
461    inner: Arc<Yoke<IntermediateShardBody<'static>, SharedAlignedBuffer>>,
462}
463
464impl GenericOwnedBlockBody for OwnedIntermediateShardBody {
465    const SHARD_KIND: ShardKind = ShardKind::IntermediateShard;
466
467    type Body<'a> = IntermediateShardBody<'a>;
468
469    #[inline(always)]
470    fn buffer(&self) -> &SharedAlignedBuffer {
471        self.buffer()
472    }
473
474    #[inline(always)]
475    fn ref_count(&self) -> usize {
476        self.ref_count()
477    }
478
479    #[inline(always)]
480    fn body(&self) -> &Self::Body<'_> {
481        self.body()
482    }
483}
484
485impl OwnedIntermediateShardBody {
486    /// Create a new instance
487    pub fn new<'a, OSR, LSB>(
488        own_segment_roots: OSR,
489        leaf_shard_blocks: LSB,
490    ) -> Result<Self, OwnedIntermediateShardBodyError>
491    where
492        OSR: TrustedLen<Item = SegmentRoot>,
493        LSB: TrustedLen<Item = LeafShardBlockInfo<'a>> + Clone + 'a,
494    {
495        let num_own_segment_roots = own_segment_roots.size_hint().0;
496        let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
497            OwnedIntermediateShardBodyError::TooManyOwnSegmentRoots {
498                actual: num_own_segment_roots,
499            }
500        })?;
501        let num_blocks = leaf_shard_blocks.size_hint().0;
502        let num_blocks = u8::try_from(num_blocks).map_err(|_error| {
503            OwnedIntermediateShardBodyError::TooManyLeafShardBlocks { actual: num_blocks }
504        })?;
505
506        let mut buffer = OwnedAlignedBuffer::with_capacity(
507            u8::SIZE
508                + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
509                // This is only an estimate to get in the ballpark where reallocation should not be
510                // necessary
511                + u32::from(num_blocks) * OwnedLeafShardHeader::MAX_ALLOCATION * 2,
512        );
513
514        let true = buffer.append(&[num_own_segment_roots]) else {
515            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
516        };
517        for own_segment_root in own_segment_roots {
518            let true = buffer.append(own_segment_root.as_ref()) else {
519                unreachable!("Checked size above; qed");
520            };
521        }
522        // TODO: Would be nice for `LeafShardBlocksInfo` to have API to write this by itself
523        {
524            let true = buffer.append(&num_blocks.to_le_bytes()) else {
525                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
526            };
527            let mut own_segments_roots_num_cursor = buffer.len() as usize;
528            for _ in 0..num_blocks {
529                let true = buffer.append(&[0]) else {
530                    unreachable!("Checked size above; qed");
531                };
532            }
533            let true = align_to_8_with_padding(&mut buffer) else {
534                unreachable!("Checked size above; qed");
535            };
536            for leaf_shard_block in leaf_shard_blocks.clone() {
537                if !leaf_shard_block.own_segment_roots.is_empty() {
538                    let num_own_segment_roots = leaf_shard_block.own_segment_roots.len();
539                    let num_own_segment_roots =
540                        u8::try_from(num_own_segment_roots).map_err(|_error| {
541                            OwnedIntermediateShardBodyError::TooManyLeafShardOwnSegmentRoots {
542                                actual: num_own_segment_roots,
543                            }
544                        })?;
545                    buffer.as_mut_slice()[own_segments_roots_num_cursor] = num_own_segment_roots;
546                }
547                own_segments_roots_num_cursor += 1;
548
549                OwnedLeafShardHeader::from_parts_into(
550                    leaf_shard_block.header.prefix,
551                    leaf_shard_block.header.result,
552                    leaf_shard_block.header.consensus_info,
553                    leaf_shard_block.header.beacon_chain_info(),
554                    &mut buffer,
555                );
556                let true = align_to_8_with_padding(&mut buffer) else {
557                    unreachable!("Checked size above; qed");
558                };
559                if let Some(segment_roots_proof) = leaf_shard_block.segment_roots_proof {
560                    let true = buffer.append(segment_roots_proof) else {
561                        unreachable!("Checked size above; qed");
562                    };
563                }
564                if !leaf_shard_block.own_segment_roots.is_empty() {
565                    let true = buffer.append(
566                        SegmentRoot::repr_from_slice(leaf_shard_block.own_segment_roots)
567                            .as_flattened(),
568                    ) else {
569                        unreachable!("Checked size above; qed");
570                    };
571                }
572            }
573        }
574
575        // TODO: Avoid extra parsing here or at least go through unchecked version
576        Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
577    }
578
579    /// Create an owned body from a buffer
580    #[inline]
581    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
582        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
583        //  https://github.com/unicode-org/icu4x/issues/6665
584        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
585            let Some((body, extra_bytes)) = IntermediateShardBody::try_from_bytes(buffer) else {
586                return Err(());
587            };
588            if !extra_bytes.is_empty() {
589                return Err(());
590            }
591
592            Ok(body)
593        })
594        .map_err(move |()| buffer)?;
595
596        Ok(Self {
597            inner: Arc::new(inner),
598        })
599    }
600
601    /// Inner buffer with block body contents
602    #[inline(always)]
603    pub fn buffer(&self) -> &SharedAlignedBuffer {
604        self.inner.backing_cart()
605    }
606
607    /// Number of clones in memory
608    #[inline(always)]
609    pub fn ref_count(&self) -> usize {
610        self.inner.strong_count()
611    }
612
613    /// Get [`IntermediateShardBody`] out of [`OwnedIntermediateShardBody`]
614    #[inline(always)]
615    pub fn body(&self) -> &IntermediateShardBody<'_> {
616        self.inner.get()
617    }
618}
619
620/// Errors for [`OwnedLeafShardBody`]
621#[derive(Debug, thiserror::Error)]
622pub enum OwnedLeafShardBodyError {
623    /// Too many own segment roots
624    #[error("Too many own segment roots: {actual}")]
625    TooManyOwnSegmentRoots {
626        /// Actual number of own segment roots
627        actual: usize,
628    },
629    /// Block body is too large
630    #[error("Block body is too large")]
631    BlockBodyIsTooLarge,
632    /// Too many transactions
633    #[error("Too many transactions")]
634    TooManyTransactions,
635    /// Failed to add transaction
636    #[error("Failed to add transaction: {error}")]
637    FailedToAddTransaction {
638        /// Inner error
639        error: OwnedTransactionError,
640    },
641}
642
643impl From<AddTransactionError> for OwnedLeafShardBodyError {
644    fn from(value: AddTransactionError) -> Self {
645        match value {
646            AddTransactionError::BlockBodyIsTooLarge => {
647                OwnedLeafShardBodyError::BlockBodyIsTooLarge
648            }
649            AddTransactionError::TooManyTransactions => {
650                OwnedLeafShardBodyError::TooManyTransactions
651            }
652            AddTransactionError::FailedToAddTransaction { error } => {
653                OwnedLeafShardBodyError::FailedToAddTransaction { error }
654            }
655        }
656    }
657}
658
659/// An owned version of [`LeafShardBody`].
660///
661/// It is correctly aligned in memory and well suited for sending and receiving over the network
662/// efficiently or storing in memory or on disk.
663#[derive(Debug, Clone)]
664pub struct OwnedLeafShardBody {
665    inner: Arc<Yoke<LeafShardBody<'static>, SharedAlignedBuffer>>,
666}
667
668impl GenericOwnedBlockBody for OwnedLeafShardBody {
669    const SHARD_KIND: ShardKind = ShardKind::LeafShard;
670
671    type Body<'a> = LeafShardBody<'a>;
672
673    #[inline(always)]
674    fn buffer(&self) -> &SharedAlignedBuffer {
675        self.buffer()
676    }
677
678    #[inline(always)]
679    fn ref_count(&self) -> usize {
680        self.ref_count()
681    }
682
683    #[inline(always)]
684    fn body(&self) -> &Self::Body<'_> {
685        self.body()
686    }
687}
688
689impl OwnedLeafShardBody {
690    /// Initialize building of [`OwnedLeafShardBody`]
691    pub fn init<OSR>(
692        own_segment_roots: OSR,
693    ) -> Result<OwnedLeafShardBlockBodyBuilder, OwnedLeafShardBodyError>
694    where
695        OSR: TrustedLen<Item = SegmentRoot>,
696    {
697        let num_own_segment_roots = own_segment_roots.size_hint().0;
698        let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
699            OwnedLeafShardBodyError::TooManyOwnSegmentRoots {
700                actual: num_own_segment_roots,
701            }
702        })?;
703
704        let mut buffer = OwnedAlignedBuffer::with_capacity(
705            u8::SIZE + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32,
706        );
707
708        let true = buffer.append(&[num_own_segment_roots]) else {
709            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
710        };
711        for own_segment_root in own_segment_roots {
712            let true = buffer.append(own_segment_root.as_ref()) else {
713                unreachable!("Checked size above; qed");
714            };
715        }
716
717        let num_transactions_offset = buffer.len() as usize;
718        let true = buffer.append(&0u32.to_le_bytes()) else {
719            unreachable!("Checked size above; qed");
720        };
721
722        Ok(OwnedLeafShardBlockBodyBuilder {
723            transaction_builder: TransactionBuilder::new(num_transactions_offset, buffer),
724        })
725    }
726
727    /// Create an owned body from a buffer
728    #[inline]
729    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
730        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
731        //  https://github.com/unicode-org/icu4x/issues/6665
732        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
733            let Some((body, extra_bytes)) = LeafShardBody::try_from_bytes(buffer) else {
734                return Err(());
735            };
736            if !extra_bytes.is_empty() {
737                return Err(());
738            }
739
740            Ok(body)
741        })
742        .map_err(move |()| buffer)?;
743
744        Ok(Self {
745            inner: Arc::new(inner),
746        })
747    }
748
749    /// Inner buffer with block body contents
750    #[inline(always)]
751    pub fn buffer(&self) -> &SharedAlignedBuffer {
752        self.inner.backing_cart()
753    }
754
755    /// Number of clones in memory
756    #[inline(always)]
757    pub fn ref_count(&self) -> usize {
758        self.inner.strong_count()
759    }
760
761    /// Get [`LeafShardBody`] out of [`OwnedLeafShardBody`]
762    #[inline(always)]
763    pub fn body(&self) -> &LeafShardBody<'_> {
764        self.inner.get()
765    }
766}
767
768/// Builder for [`OwnedLeafShardBody`] that allows to add more transactions
769#[derive(Debug, Clone)]
770pub struct OwnedLeafShardBlockBodyBuilder {
771    transaction_builder: TransactionBuilder,
772}
773
774impl OwnedLeafShardBlockBodyBuilder {
775    /// Add transaction to the body
776    #[inline(always)]
777    pub fn add_transaction<T>(&mut self, transaction: T) -> Result<(), OwnedLeafShardBodyError>
778    where
779        T: WritableBodyTransaction,
780    {
781        self.transaction_builder.add_transaction(transaction)?;
782
783        Ok(())
784    }
785
786    /// Finish building block body
787    pub fn finish(self) -> OwnedLeafShardBody {
788        // TODO: Avoid extra parsing here or at least go through unchecked version
789        OwnedLeafShardBody::from_buffer(self.transaction_builder.finish().into_shared())
790            .expect("Known to be created correctly; qed")
791    }
792}
793
794/// An owned version of [`BlockBody`].
795///
796/// It is correctly aligned in memory and well suited for sending and receiving over the network
797/// efficiently or storing in memory or on disk.
798#[derive(Debug, Clone, From)]
799pub enum OwnedBlockBody {
800    /// Block body corresponds to the beacon chain
801    BeaconChain(OwnedBeaconChainBody),
802    /// Block body corresponds to an intermediate shard
803    IntermediateShard(OwnedIntermediateShardBody),
804    /// Block body corresponds to a leaf shard
805    LeafShard(OwnedLeafShardBody),
806}
807
808impl OwnedBlockBody {
809    /// Create an owned body from a buffer
810    #[inline]
811    pub fn from_buffer(
812        buffer: SharedAlignedBuffer,
813        shard_kind: ShardKind,
814    ) -> Result<Self, SharedAlignedBuffer> {
815        Ok(match shard_kind {
816            ShardKind::BeaconChain => Self::BeaconChain(OwnedBeaconChainBody::from_buffer(buffer)?),
817            ShardKind::IntermediateShard => {
818                Self::IntermediateShard(OwnedIntermediateShardBody::from_buffer(buffer)?)
819            }
820            ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardBody::from_buffer(buffer)?),
821            ShardKind::Phantom | ShardKind::Invalid => {
822                // Blocks for such shards do not exist
823                return Err(buffer);
824            }
825        })
826    }
827
828    /// Inner buffer block body contents
829    #[inline]
830    pub fn buffer(&self) -> &SharedAlignedBuffer {
831        match self {
832            Self::BeaconChain(owned_body) => owned_body.buffer(),
833            Self::IntermediateShard(owned_body) => owned_body.buffer(),
834            Self::LeafShard(owned_body) => owned_body.buffer(),
835        }
836    }
837
838    /// Number of clones in memory
839    #[inline(always)]
840    pub fn ref_count(&self) -> usize {
841        match self {
842            Self::BeaconChain(owned_body) => owned_body.ref_count(),
843            Self::IntermediateShard(owned_body) => owned_body.ref_count(),
844            Self::LeafShard(owned_body) => owned_body.ref_count(),
845        }
846    }
847
848    /// Get [`BlockBody`] out of [`OwnedBlockBody`]
849    #[inline]
850    pub fn body(&self) -> BlockBody<'_> {
851        match self {
852            Self::BeaconChain(owned_body) => BlockBody::BeaconChain(*owned_body.body()),
853            Self::IntermediateShard(owned_body) => BlockBody::IntermediateShard(*owned_body.body()),
854            Self::LeafShard(owned_body) => BlockBody::LeafShard(*owned_body.body()),
855        }
856    }
857}
858
859/// Aligns buffer to 8 bytes by adding necessary padding zero bytes.
860///
861/// Returns `false` if buffer becomes too long.
862#[inline(always)]
863#[must_use]
864fn align_to_8_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
865    let alignment = align_of::<u64>();
866    // Optimized version of the following due to alignment being a power of 2:
867    // let unaligned_by = self.payload.as_ptr().addr() % alignment;
868    let unaligned_by = buffer.as_ptr().addr() & (alignment - 1);
869    if unaligned_by > 0 {
870        // SAFETY: Subtracted value is always smaller than alignment
871        let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
872
873        if !buffer.append(&0u64.to_le_bytes()[..padding_bytes]) {
874            return false;
875        }
876    }
877
878    true
879}
880
881/// Aligns buffer to 16 bytes by adding necessary padding zero bytes.
882///
883/// Returns `false` if buffer becomes too long.
884#[inline(always)]
885#[must_use]
886fn align_to_16_bytes_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
887    let alignment = align_of::<u128>();
888    // Optimized version of the following due to alignment being a power of 2:
889    // let unaligned_by = self.payload.as_ptr().addr() % alignment;
890    let unaligned_by = buffer.as_ptr().addr() & (alignment - 1);
891    if unaligned_by > 0 {
892        // SAFETY: Subtracted value is always smaller than alignment
893        let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
894
895        if !buffer.append(&0u128.to_le_bytes()[..padding_bytes]) {
896            return false;
897        }
898    }
899
900    true
901}