ab_core_primitives/block/body/
owned.rs1use crate::block::body::{
4 BeaconChainBody, BlockBody, GenericBlockBody, IntermediateShardBlockInfo,
5 IntermediateShardBody, LeafShardBlockInfo, LeafShardBody,
6};
7use crate::block::header::owned::{
8 OwnedIntermediateShardHeader, OwnedIntermediateShardHeaderError, OwnedLeafShardHeader,
9};
10use crate::pot::PotCheckpoints;
11use crate::segments::SegmentRoot;
12use crate::shard::RealShardKind;
13use crate::transaction::Transaction;
14use crate::transaction::owned::{OwnedTransaction, OwnedTransactionError};
15use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
16use ab_io_type::trivial_type::TrivialType;
17use core::fmt;
18use core::iter::TrustedLen;
19use derive_more::From;
20use rclite::Arc;
21use yoke::Yoke;
22
23pub trait GenericOwnedBlockBody:
25 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockBody> + 'static
26{
27 const SHARD_KIND: RealShardKind;
29
30 type Body<'a>: GenericBlockBody<'a>
32 where
33 Self: 'a;
34
35 fn buffer(&self) -> &SharedAlignedBuffer;
37
38 fn ref_count(&self) -> usize;
40
41 fn body(&self) -> &Self::Body<'_>;
43}
44
45#[derive(Debug, thiserror::Error)]
47enum AddTransactionError {
48 #[error("Block body is too large")]
50 BlockBodyIsTooLarge,
51 #[error("Too many transactions")]
53 TooManyTransactions,
54 #[error("Failed to add transaction: {error}")]
56 FailedToAddTransaction {
57 #[from]
59 error: OwnedTransactionError,
60 },
61}
62
63pub trait WritableBodyTransaction {
65 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError>;
67}
68
69impl WritableBodyTransaction for Transaction<'_> {
70 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
71 OwnedTransaction::from_parts_into(
72 self.header,
73 self.read_slots,
74 self.write_slots,
75 self.payload,
76 self.seal,
77 buffer,
78 )
79 }
80}
81
82impl WritableBodyTransaction for &OwnedTransaction {
83 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
84 if buffer.append(self.buffer().as_slice()) {
85 Ok(())
86 } else {
87 Err(OwnedTransactionError::TransactionTooLarge)
88 }
89 }
90}
91
92#[derive(Debug, Clone)]
93struct TransactionBuilder {
94 num_transactions_offset: usize,
95 buffer: OwnedAlignedBuffer,
96}
97
98impl TransactionBuilder {
99 fn new(num_transactions_offset: usize, buffer: OwnedAlignedBuffer) -> Self {
100 Self {
101 num_transactions_offset,
102 buffer,
103 }
104 }
105
106 fn add_transaction<T>(&mut self, transaction: T) -> Result<(), AddTransactionError>
108 where
109 T: WritableBodyTransaction,
110 {
111 if self.inc_transaction_count()? == 1 && !align_to_16_bytes_with_padding(&mut self.buffer) {
114 self.dec_transaction_count();
115 return Err(AddTransactionError::BlockBodyIsTooLarge);
116 }
117
118 let old_buffer_len = self.buffer.len();
119
120 transaction
121 .write_into(&mut self.buffer)
122 .inspect_err(|_error| {
123 self.dec_transaction_count();
124 })?;
125
126 if !align_to_16_bytes_with_padding(&mut self.buffer) {
127 self.dec_transaction_count();
128 unsafe {
130 self.buffer.set_len(old_buffer_len);
131 }
132 return Err(AddTransactionError::BlockBodyIsTooLarge);
133 }
134
135 Ok(())
136 }
137
138 #[inline(always)]
140 fn finish(self) -> OwnedAlignedBuffer {
141 self.buffer
142 }
143
144 #[inline(always)]
146 fn inc_transaction_count(&mut self) -> Result<u32, AddTransactionError> {
147 unsafe {
150 let num_transactions_ptr = self
151 .buffer
152 .as_mut_ptr()
153 .add(self.num_transactions_offset)
154 .cast::<u32>();
155 let num_transactions = num_transactions_ptr.read_unaligned();
156 let num_transactions = num_transactions
157 .checked_add(1)
158 .ok_or(AddTransactionError::TooManyTransactions)?;
159 num_transactions_ptr.write_unaligned(num_transactions);
160 Ok(num_transactions)
161 }
162 }
163
164 #[inline(always)]
166 fn dec_transaction_count(&mut self) {
167 unsafe {
170 let num_transactions_ptr = self
171 .buffer
172 .as_mut_ptr()
173 .add(self.num_transactions_offset)
174 .cast::<u32>();
175 let num_transactions = num_transactions_ptr.read_unaligned();
176 let num_transactions = num_transactions.saturating_sub(1);
177 num_transactions_ptr.write_unaligned(num_transactions);
178 }
179 }
180}
181
182#[derive(Debug, thiserror::Error)]
184pub enum OwnedBeaconChainBodyError {
185 #[error("Too many PoT checkpoints: {actual}")]
187 TooManyPotCheckpoints {
188 actual: usize,
190 },
191 #[error("Too many own segment roots: {actual}")]
193 TooManyOwnSegmentRoots {
194 actual: usize,
196 },
197 #[error("Too many intermediate shard blocks: {actual}")]
199 TooManyIntermediateShardBlocks {
200 actual: usize,
202 },
203 #[error("Too many intermediate shard own segment roots: {actual}")]
205 TooManyIntermediateShardOwnSegmentRoots {
206 actual: usize,
208 },
209 #[error("Too many intermediate shard child segment roots: {actual}")]
211 TooManyIntermediateShardChildSegmentRoots {
212 actual: usize,
214 },
215 #[error("Failed to intermediate shard header: {error}")]
217 FailedToAddIntermediateShard {
218 #[from]
220 error: OwnedIntermediateShardHeaderError,
221 },
222 #[error("Block body is too large")]
224 BlockBodyIsTooLarge,
225}
226
227#[derive(Debug, Clone)]
232pub struct OwnedBeaconChainBody {
233 inner: Arc<Yoke<BeaconChainBody<'static>, SharedAlignedBuffer>>,
234}
235
236impl GenericOwnedBlockBody for OwnedBeaconChainBody {
237 const SHARD_KIND: RealShardKind = RealShardKind::BeaconChain;
238
239 type Body<'a> = BeaconChainBody<'a>;
240
241 #[inline(always)]
242 fn buffer(&self) -> &SharedAlignedBuffer {
243 self.buffer()
244 }
245
246 #[inline(always)]
247 fn ref_count(&self) -> usize {
248 self.ref_count()
249 }
250
251 #[inline(always)]
252 fn body(&self) -> &Self::Body<'_> {
253 self.body()
254 }
255}
256
257impl OwnedBeaconChainBody {
258 pub fn new<'a, OSR, ISB>(
260 own_segment_roots: OSR,
261 intermediate_shard_blocks: ISB,
262 pot_checkpoints: &[PotCheckpoints],
263 ) -> Result<Self, OwnedBeaconChainBodyError>
264 where
265 OSR: TrustedLen<Item = SegmentRoot>,
266 ISB: TrustedLen<Item = IntermediateShardBlockInfo<'a>> + Clone + 'a,
267 {
268 let num_pot_checkpoints = pot_checkpoints.len();
269 let num_pot_checkpoints = u32::try_from(num_pot_checkpoints).map_err(|_error| {
270 OwnedBeaconChainBodyError::TooManyPotCheckpoints {
271 actual: num_pot_checkpoints,
272 }
273 })?;
274 let num_own_segment_roots = own_segment_roots.size_hint().0;
275 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
276 OwnedBeaconChainBodyError::TooManyOwnSegmentRoots {
277 actual: num_own_segment_roots,
278 }
279 })?;
280 let num_blocks = intermediate_shard_blocks.size_hint().0;
281 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
282 OwnedBeaconChainBodyError::TooManyIntermediateShardBlocks { actual: num_blocks }
283 })?;
284
285 let mut buffer = OwnedAlignedBuffer::with_capacity(
286 u32::SIZE
287 + u8::SIZE
288 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
289 + u16::SIZE
290 + u32::from(num_blocks) * OwnedIntermediateShardHeader::max_allocation_for(&[]) * 2,
293 );
294
295 let true = buffer.append(&num_pot_checkpoints.to_le_bytes()) else {
296 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
297 };
298
299 let true = buffer.append(&[num_own_segment_roots]) else {
300 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
301 };
302 for own_segment_root in own_segment_roots {
303 let true = buffer.append(own_segment_root.as_ref()) else {
304 unreachable!("Checked size above; qed");
305 };
306 }
307 {
309 let true = buffer.append(&num_blocks.to_le_bytes()) else {
310 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
311 };
312 let mut segments_roots_num_cursor = buffer.len() as usize;
313 for _ in 0..num_blocks {
314 let true = buffer.append(&[0, 0, 0]) else {
316 unreachable!("Checked size above; qed");
317 };
318 }
319 let true = align_to_8_with_padding(&mut buffer) else {
320 unreachable!("Checked size above; qed");
321 };
322 for intermediate_shard_block in intermediate_shard_blocks.clone() {
323 if !intermediate_shard_block.own_segment_roots.is_empty()
324 || !intermediate_shard_block.child_segment_roots.is_empty()
325 {
326 let num_own_segment_roots = intermediate_shard_block.own_segment_roots.len();
327 let num_own_segment_roots =
328 u8::try_from(num_own_segment_roots).map_err(|_error| {
329 OwnedBeaconChainBodyError::TooManyIntermediateShardOwnSegmentRoots {
330 actual: num_own_segment_roots,
331 }
332 })?;
333 let num_child_segment_roots =
334 intermediate_shard_block.child_segment_roots.len();
335 let num_child_segment_roots =
336 u16::try_from(num_child_segment_roots).map_err(|_error| {
337 OwnedBeaconChainBodyError::TooManyIntermediateShardChildSegmentRoots {
338 actual: num_child_segment_roots,
339 }
340 })?;
341 let num_child_segment_roots = num_child_segment_roots.to_le_bytes();
342 buffer.as_mut_slice()[segments_roots_num_cursor..][..3].copy_from_slice(&[
343 num_own_segment_roots,
344 num_child_segment_roots[0],
345 num_child_segment_roots[1],
346 ]);
347 }
348 segments_roots_num_cursor += 3;
349
350 OwnedIntermediateShardHeader::from_parts_into(
351 intermediate_shard_block.header.prefix,
352 intermediate_shard_block.header.result,
353 intermediate_shard_block.header.consensus_info,
354 intermediate_shard_block.header.beacon_chain_info(),
355 intermediate_shard_block.header.child_shard_blocks(),
356 &mut buffer,
357 )?;
358 if !align_to_8_with_padding(&mut buffer) {
359 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
360 }
361 if let Some(segment_roots_proof) = intermediate_shard_block.segment_roots_proof
362 && !buffer.append(segment_roots_proof)
363 {
364 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
365 }
366 if !intermediate_shard_block.own_segment_roots.is_empty()
367 && !buffer.append(
368 SegmentRoot::repr_from_slice(intermediate_shard_block.own_segment_roots)
369 .as_flattened(),
370 )
371 {
372 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
373 }
374 if !intermediate_shard_block.child_segment_roots.is_empty()
375 && !buffer.append(
376 SegmentRoot::repr_from_slice(intermediate_shard_block.child_segment_roots)
377 .as_flattened(),
378 )
379 {
380 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
381 }
382 }
383 }
384
385 let true = buffer.append(PotCheckpoints::bytes_from_slice(pot_checkpoints).as_flattened())
386 else {
387 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
388 };
389
390 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
392 }
393
394 #[inline]
396 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
397 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
400 let Some((body, extra_bytes)) = BeaconChainBody::try_from_bytes(buffer) else {
401 return Err(());
402 };
403 if !extra_bytes.is_empty() {
404 return Err(());
405 }
406
407 Ok(body)
408 })
409 .map_err(move |()| buffer)?;
410
411 Ok(Self {
412 inner: Arc::new(inner),
413 })
414 }
415
416 #[inline(always)]
418 pub fn buffer(&self) -> &SharedAlignedBuffer {
419 self.inner.backing_cart()
420 }
421
422 #[inline(always)]
424 pub fn ref_count(&self) -> usize {
425 self.inner.strong_count()
426 }
427
428 #[inline(always)]
430 pub fn body(&self) -> &BeaconChainBody<'_> {
431 self.inner.get()
432 }
433}
434
435#[derive(Debug, thiserror::Error)]
437pub enum OwnedIntermediateShardBodyError {
438 #[error("Too many own segment roots: {actual}")]
440 TooManyOwnSegmentRoots {
441 actual: usize,
443 },
444 #[error("Too many leaf shard blocks: {actual}")]
446 TooManyLeafShardBlocks {
447 actual: usize,
449 },
450 #[error("Too many leaf shard own segment roots: {actual}")]
452 TooManyLeafShardOwnSegmentRoots {
453 actual: usize,
455 },
456}
457
458#[derive(Debug, Clone)]
463pub struct OwnedIntermediateShardBody {
464 inner: Arc<Yoke<IntermediateShardBody<'static>, SharedAlignedBuffer>>,
465}
466
467impl GenericOwnedBlockBody for OwnedIntermediateShardBody {
468 const SHARD_KIND: RealShardKind = RealShardKind::IntermediateShard;
469
470 type Body<'a> = IntermediateShardBody<'a>;
471
472 #[inline(always)]
473 fn buffer(&self) -> &SharedAlignedBuffer {
474 self.buffer()
475 }
476
477 #[inline(always)]
478 fn ref_count(&self) -> usize {
479 self.ref_count()
480 }
481
482 #[inline(always)]
483 fn body(&self) -> &Self::Body<'_> {
484 self.body()
485 }
486}
487
488impl OwnedIntermediateShardBody {
489 pub fn new<'a, OSR, LSB>(
491 own_segment_roots: OSR,
492 leaf_shard_blocks: LSB,
493 ) -> Result<Self, OwnedIntermediateShardBodyError>
494 where
495 OSR: TrustedLen<Item = SegmentRoot>,
496 LSB: TrustedLen<Item = LeafShardBlockInfo<'a>> + Clone + 'a,
497 {
498 let num_own_segment_roots = own_segment_roots.size_hint().0;
499 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
500 OwnedIntermediateShardBodyError::TooManyOwnSegmentRoots {
501 actual: num_own_segment_roots,
502 }
503 })?;
504 let num_blocks = leaf_shard_blocks.size_hint().0;
505 let num_blocks = u8::try_from(num_blocks).map_err(|_error| {
506 OwnedIntermediateShardBodyError::TooManyLeafShardBlocks { actual: num_blocks }
507 })?;
508
509 let mut buffer = OwnedAlignedBuffer::with_capacity(
510 u8::SIZE
511 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
512 + u32::from(num_blocks) * OwnedLeafShardHeader::MAX_ALLOCATION * 2,
515 );
516
517 let true = buffer.append(&[num_own_segment_roots]) else {
518 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
519 };
520 for own_segment_root in own_segment_roots {
521 let true = buffer.append(own_segment_root.as_ref()) else {
522 unreachable!("Checked size above; qed");
523 };
524 }
525 {
527 let true = buffer.append(&num_blocks.to_le_bytes()) else {
528 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
529 };
530 let mut own_segments_roots_num_cursor = buffer.len() as usize;
531 for _ in 0..num_blocks {
532 let true = buffer.append(&[0]) else {
533 unreachable!("Checked size above; qed");
534 };
535 }
536 let true = align_to_8_with_padding(&mut buffer) else {
537 unreachable!("Checked size above; qed");
538 };
539 for leaf_shard_block in leaf_shard_blocks.clone() {
540 if !leaf_shard_block.own_segment_roots.is_empty() {
541 let num_own_segment_roots = leaf_shard_block.own_segment_roots.len();
542 let num_own_segment_roots =
543 u8::try_from(num_own_segment_roots).map_err(|_error| {
544 OwnedIntermediateShardBodyError::TooManyLeafShardOwnSegmentRoots {
545 actual: num_own_segment_roots,
546 }
547 })?;
548 buffer.as_mut_slice()[own_segments_roots_num_cursor] = num_own_segment_roots;
549 }
550 own_segments_roots_num_cursor += 1;
551
552 OwnedLeafShardHeader::from_parts_into(
553 leaf_shard_block.header.prefix,
554 leaf_shard_block.header.result,
555 leaf_shard_block.header.consensus_info,
556 leaf_shard_block.header.beacon_chain_info(),
557 &mut buffer,
558 );
559 let true = align_to_8_with_padding(&mut buffer) else {
560 unreachable!("Checked size above; qed");
561 };
562 if let Some(segment_roots_proof) = leaf_shard_block.segment_roots_proof {
563 let true = buffer.append(segment_roots_proof) else {
564 unreachable!("Checked size above; qed");
565 };
566 }
567 if !leaf_shard_block.own_segment_roots.is_empty() {
568 let true = buffer.append(
569 SegmentRoot::repr_from_slice(leaf_shard_block.own_segment_roots)
570 .as_flattened(),
571 ) else {
572 unreachable!("Checked size above; qed");
573 };
574 }
575 }
576 }
577
578 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
580 }
581
582 #[inline]
584 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
585 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
588 let Some((body, extra_bytes)) = IntermediateShardBody::try_from_bytes(buffer) else {
589 return Err(());
590 };
591 if !extra_bytes.is_empty() {
592 return Err(());
593 }
594
595 Ok(body)
596 })
597 .map_err(move |()| buffer)?;
598
599 Ok(Self {
600 inner: Arc::new(inner),
601 })
602 }
603
604 #[inline(always)]
606 pub fn buffer(&self) -> &SharedAlignedBuffer {
607 self.inner.backing_cart()
608 }
609
610 #[inline(always)]
612 pub fn ref_count(&self) -> usize {
613 self.inner.strong_count()
614 }
615
616 #[inline(always)]
618 pub fn body(&self) -> &IntermediateShardBody<'_> {
619 self.inner.get()
620 }
621}
622
623#[derive(Debug, thiserror::Error)]
625pub enum OwnedLeafShardBodyError {
626 #[error("Too many own segment roots: {actual}")]
628 TooManyOwnSegmentRoots {
629 actual: usize,
631 },
632 #[error("Block body is too large")]
634 BlockBodyIsTooLarge,
635 #[error("Too many transactions")]
637 TooManyTransactions,
638 #[error("Failed to add transaction: {error}")]
640 FailedToAddTransaction {
641 error: OwnedTransactionError,
643 },
644}
645
646impl From<AddTransactionError> for OwnedLeafShardBodyError {
647 fn from(value: AddTransactionError) -> Self {
648 match value {
649 AddTransactionError::BlockBodyIsTooLarge => {
650 OwnedLeafShardBodyError::BlockBodyIsTooLarge
651 }
652 AddTransactionError::TooManyTransactions => {
653 OwnedLeafShardBodyError::TooManyTransactions
654 }
655 AddTransactionError::FailedToAddTransaction { error } => {
656 OwnedLeafShardBodyError::FailedToAddTransaction { error }
657 }
658 }
659 }
660}
661
662#[derive(Debug, Clone)]
667pub struct OwnedLeafShardBody {
668 inner: Arc<Yoke<LeafShardBody<'static>, SharedAlignedBuffer>>,
669}
670
671impl GenericOwnedBlockBody for OwnedLeafShardBody {
672 const SHARD_KIND: RealShardKind = RealShardKind::LeafShard;
673
674 type Body<'a> = LeafShardBody<'a>;
675
676 #[inline(always)]
677 fn buffer(&self) -> &SharedAlignedBuffer {
678 self.buffer()
679 }
680
681 #[inline(always)]
682 fn ref_count(&self) -> usize {
683 self.ref_count()
684 }
685
686 #[inline(always)]
687 fn body(&self) -> &Self::Body<'_> {
688 self.body()
689 }
690}
691
692impl OwnedLeafShardBody {
693 pub fn init<OSR>(
695 own_segment_roots: OSR,
696 ) -> Result<OwnedLeafShardBlockBodyBuilder, OwnedLeafShardBodyError>
697 where
698 OSR: TrustedLen<Item = SegmentRoot>,
699 {
700 let num_own_segment_roots = own_segment_roots.size_hint().0;
701 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
702 OwnedLeafShardBodyError::TooManyOwnSegmentRoots {
703 actual: num_own_segment_roots,
704 }
705 })?;
706
707 let mut buffer = OwnedAlignedBuffer::with_capacity(
708 u8::SIZE + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32,
709 );
710
711 let true = buffer.append(&[num_own_segment_roots]) else {
712 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
713 };
714 for own_segment_root in own_segment_roots {
715 let true = buffer.append(own_segment_root.as_ref()) else {
716 unreachable!("Checked size above; qed");
717 };
718 }
719
720 let num_transactions_offset = buffer.len() as usize;
721 let true = buffer.append(&0u32.to_le_bytes()) else {
722 unreachable!("Checked size above; qed");
723 };
724
725 Ok(OwnedLeafShardBlockBodyBuilder {
726 transaction_builder: TransactionBuilder::new(num_transactions_offset, buffer),
727 })
728 }
729
730 #[inline]
732 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
733 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
736 let Some((body, extra_bytes)) = LeafShardBody::try_from_bytes(buffer) else {
737 return Err(());
738 };
739 if !extra_bytes.is_empty() {
740 return Err(());
741 }
742
743 Ok(body)
744 })
745 .map_err(move |()| buffer)?;
746
747 Ok(Self {
748 inner: Arc::new(inner),
749 })
750 }
751
752 #[inline(always)]
754 pub fn buffer(&self) -> &SharedAlignedBuffer {
755 self.inner.backing_cart()
756 }
757
758 #[inline(always)]
760 pub fn ref_count(&self) -> usize {
761 self.inner.strong_count()
762 }
763
764 #[inline(always)]
766 pub fn body(&self) -> &LeafShardBody<'_> {
767 self.inner.get()
768 }
769}
770
771#[derive(Debug, Clone)]
773pub struct OwnedLeafShardBlockBodyBuilder {
774 transaction_builder: TransactionBuilder,
775}
776
777impl OwnedLeafShardBlockBodyBuilder {
778 #[inline(always)]
780 pub fn add_transaction<T>(&mut self, transaction: T) -> Result<(), OwnedLeafShardBodyError>
781 where
782 T: WritableBodyTransaction,
783 {
784 self.transaction_builder.add_transaction(transaction)?;
785
786 Ok(())
787 }
788
789 pub fn finish(self) -> OwnedLeafShardBody {
791 OwnedLeafShardBody::from_buffer(self.transaction_builder.finish().into_shared())
793 .expect("Known to be created correctly; qed")
794 }
795}
796
797#[derive(Debug, Clone, From)]
802pub enum OwnedBlockBody {
803 BeaconChain(OwnedBeaconChainBody),
805 IntermediateShard(OwnedIntermediateShardBody),
807 LeafShard(OwnedLeafShardBody),
809}
810
811impl OwnedBlockBody {
812 #[inline]
814 pub fn from_buffer(
815 buffer: SharedAlignedBuffer,
816 shard_kind: RealShardKind,
817 ) -> Result<Self, SharedAlignedBuffer> {
818 Ok(match shard_kind {
819 RealShardKind::BeaconChain => {
820 Self::BeaconChain(OwnedBeaconChainBody::from_buffer(buffer)?)
821 }
822 RealShardKind::IntermediateShard => {
823 Self::IntermediateShard(OwnedIntermediateShardBody::from_buffer(buffer)?)
824 }
825 RealShardKind::LeafShard => Self::LeafShard(OwnedLeafShardBody::from_buffer(buffer)?),
826 })
827 }
828
829 #[inline]
831 pub fn buffer(&self) -> &SharedAlignedBuffer {
832 match self {
833 Self::BeaconChain(owned_body) => owned_body.buffer(),
834 Self::IntermediateShard(owned_body) => owned_body.buffer(),
835 Self::LeafShard(owned_body) => owned_body.buffer(),
836 }
837 }
838
839 #[inline(always)]
841 pub fn ref_count(&self) -> usize {
842 match self {
843 Self::BeaconChain(owned_body) => owned_body.ref_count(),
844 Self::IntermediateShard(owned_body) => owned_body.ref_count(),
845 Self::LeafShard(owned_body) => owned_body.ref_count(),
846 }
847 }
848
849 #[inline]
851 pub fn body(&self) -> BlockBody<'_> {
852 match self {
853 Self::BeaconChain(owned_body) => BlockBody::BeaconChain(*owned_body.body()),
854 Self::IntermediateShard(owned_body) => BlockBody::IntermediateShard(*owned_body.body()),
855 Self::LeafShard(owned_body) => BlockBody::LeafShard(*owned_body.body()),
856 }
857 }
858}
859
860#[inline(always)]
864#[must_use]
865fn align_to_8_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
866 let alignment = align_of::<u64>();
867 let unaligned_by = buffer.len() as usize % alignment;
868 if unaligned_by > 0 {
869 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
871
872 if !buffer.append(&0u64.to_le_bytes()[..padding_bytes]) {
873 return false;
874 }
875 }
876
877 true
878}
879
880#[inline(always)]
884#[must_use]
885fn align_to_16_bytes_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
886 let alignment = align_of::<u128>();
887 let unaligned_by = buffer.len() as usize % alignment;
888 if unaligned_by > 0 {
889 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
891
892 if !buffer.append(&0u128.to_le_bytes()[..padding_bytes]) {
893 return false;
894 }
895 }
896
897 true
898}