1use crate::block::body::{
4 BeaconChainBody, BlockBody, GenericBlockBody, IntermediateShardBlockInfo,
5 IntermediateShardBody, LeafShardBlockInfo, LeafShardBody,
6};
7use crate::block::header::owned::{
8 OwnedIntermediateShardHeader, OwnedIntermediateShardHeaderError, OwnedLeafShardHeader,
9};
10use crate::pot::PotCheckpoints;
11use crate::segments::{LocalSegmentIndex, SegmentRoot};
12use crate::shard::RealShardKind;
13use crate::transaction::Transaction;
14use crate::transaction::owned::{OwnedTransaction, OwnedTransactionError};
15use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
16use ab_io_type::trivial_type::TrivialType;
17use core::fmt;
18use core::iter::TrustedLen;
19use derive_more::From;
20use rclite::Arc;
21use yoke::Yoke;
22
23pub trait GenericOwnedBlockBody:
25 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockBody> + 'static
26{
27 const SHARD_KIND: RealShardKind;
29
30 type Body<'a>: GenericBlockBody<'a>
32 where
33 Self: 'a;
34
35 fn buffer(&self) -> &SharedAlignedBuffer;
37
38 fn ref_count(&self) -> usize;
40
41 fn body(&self) -> &Self::Body<'_>;
43}
44
45#[derive(Debug, thiserror::Error)]
47enum AddTransactionError {
48 #[error("Block body is too large")]
50 BlockBodyIsTooLarge,
51 #[error("Too many transactions")]
53 TooManyTransactions,
54 #[error("Failed to add transaction: {error}")]
56 FailedToAddTransaction {
57 #[from]
59 error: OwnedTransactionError,
60 },
61}
62
63pub trait WritableBodyTransaction {
65 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError>;
67}
68
69impl WritableBodyTransaction for Transaction<'_> {
70 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
71 OwnedTransaction::from_parts_into(
72 self.header,
73 self.read_slots,
74 self.write_slots,
75 self.payload,
76 self.seal,
77 buffer,
78 )
79 }
80}
81
82impl WritableBodyTransaction for &OwnedTransaction {
83 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
84 if buffer.append(self.buffer().as_slice()) {
85 Ok(())
86 } else {
87 Err(OwnedTransactionError::TransactionTooLarge)
88 }
89 }
90}
91
92#[derive(Debug, Clone)]
93struct TransactionBuilder {
94 num_transactions_offset: usize,
95 buffer: OwnedAlignedBuffer,
96}
97
98impl TransactionBuilder {
99 fn new(num_transactions_offset: usize, buffer: OwnedAlignedBuffer) -> Self {
100 Self {
101 num_transactions_offset,
102 buffer,
103 }
104 }
105
106 fn add_transaction<T>(&mut self, transaction: T) -> Result<(), AddTransactionError>
108 where
109 T: WritableBodyTransaction,
110 {
111 if self.inc_transaction_count()? == 1 && !align_to_16_bytes_with_padding(&mut self.buffer) {
114 self.dec_transaction_count();
115 return Err(AddTransactionError::BlockBodyIsTooLarge);
116 }
117
118 let old_buffer_len = self.buffer.len();
119
120 transaction
121 .write_into(&mut self.buffer)
122 .inspect_err(|_error| {
123 self.dec_transaction_count();
124 })?;
125
126 if !align_to_16_bytes_with_padding(&mut self.buffer) {
127 self.dec_transaction_count();
128 unsafe {
130 self.buffer.set_len(old_buffer_len);
131 }
132 return Err(AddTransactionError::BlockBodyIsTooLarge);
133 }
134
135 Ok(())
136 }
137
138 #[inline(always)]
140 fn finish(self) -> OwnedAlignedBuffer {
141 self.buffer
142 }
143
144 #[inline(always)]
146 fn inc_transaction_count(&mut self) -> Result<u32, AddTransactionError> {
147 unsafe {
150 let num_transactions_ptr = self
151 .buffer
152 .as_mut_ptr()
153 .add(self.num_transactions_offset)
154 .cast::<u32>();
155 let num_transactions = num_transactions_ptr.read_unaligned();
156 let num_transactions = num_transactions
157 .checked_add(1)
158 .ok_or(AddTransactionError::TooManyTransactions)?;
159 num_transactions_ptr.write_unaligned(num_transactions);
160 Ok(num_transactions)
161 }
162 }
163
164 #[inline(always)]
166 fn dec_transaction_count(&mut self) {
167 unsafe {
170 let num_transactions_ptr = self
171 .buffer
172 .as_mut_ptr()
173 .add(self.num_transactions_offset)
174 .cast::<u32>();
175 let num_transactions = num_transactions_ptr.read_unaligned();
176 let num_transactions = num_transactions.saturating_sub(1);
177 num_transactions_ptr.write_unaligned(num_transactions);
178 }
179 }
180}
181
182#[derive(Debug, thiserror::Error)]
184pub enum OwnedBeaconChainBodyError {
185 #[error("Too many PoT checkpoints: {actual}")]
187 TooManyPotCheckpoints {
188 actual: usize,
190 },
191 #[error("Too many own segment roots: {actual}")]
193 TooManyOwnSegmentRoots {
194 actual: usize,
196 },
197 #[error("Too many intermediate shard blocks: {actual}")]
199 TooManyIntermediateShardBlocks {
200 actual: usize,
202 },
203 #[error("Too many intermediate shard own segment roots: {actual}")]
205 TooManyIntermediateShardOwnSegmentRoots {
206 actual: usize,
208 },
209 #[error("Too many leaf shard blocks with segments: {actual}")]
211 TooManyLeafShardBlocksWithSegments {
212 actual: usize,
214 },
215 #[error("Too many leaf shard block segments: {actual}")]
217 TooManyLeafShardBlockSegments {
218 actual: usize,
220 },
221 #[error("Too many intermediate shard child segment roots: {actual}")]
223 TooManyIntermediateShardChildSegmentRoots {
224 actual: usize,
226 },
227 #[error("Failed to intermediate shard header: {error}")]
229 FailedToAddIntermediateShard {
230 #[from]
232 error: OwnedIntermediateShardHeaderError,
233 },
234 #[error("Block body is too large")]
236 BlockBodyIsTooLarge,
237}
238
239#[derive(Debug, Clone)]
244pub struct OwnedBeaconChainBody {
245 inner: Arc<Yoke<BeaconChainBody<'static>, SharedAlignedBuffer>>,
246}
247
248impl GenericOwnedBlockBody for OwnedBeaconChainBody {
249 const SHARD_KIND: RealShardKind = RealShardKind::BeaconChain;
250
251 type Body<'a> = BeaconChainBody<'a>;
252
253 #[inline(always)]
254 fn buffer(&self) -> &SharedAlignedBuffer {
255 self.buffer()
256 }
257
258 #[inline(always)]
259 fn ref_count(&self) -> usize {
260 self.ref_count()
261 }
262
263 #[inline(always)]
264 fn body(&self) -> &Self::Body<'_> {
265 self.body()
266 }
267}
268
269impl OwnedBeaconChainBody {
270 pub fn new<'a, OS, ISB>(
273 own_segments: OS,
274 intermediate_shard_blocks: ISB,
275 pot_checkpoints: &[PotCheckpoints],
276 ) -> Result<Self, OwnedBeaconChainBodyError>
277 where
278 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
279 ISB: TrustedLen<Item = IntermediateShardBlockInfo<'a>> + 'a,
282 {
283 let num_pot_checkpoints = pot_checkpoints.len();
284 let num_pot_checkpoints = u32::try_from(num_pot_checkpoints).map_err(|_error| {
285 OwnedBeaconChainBodyError::TooManyPotCheckpoints {
286 actual: num_pot_checkpoints,
287 }
288 })?;
289 let num_own_segment_roots = own_segments.size_hint().0;
290 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
291 OwnedBeaconChainBodyError::TooManyOwnSegmentRoots {
292 actual: num_own_segment_roots,
293 }
294 })?;
295 let num_blocks = intermediate_shard_blocks.size_hint().0;
296 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
297 OwnedBeaconChainBodyError::TooManyIntermediateShardBlocks { actual: num_blocks }
298 })?;
299
300 let mut buffer = OwnedAlignedBuffer::with_capacity(
301 u32::SIZE
302 + u8::SIZE
303 + if num_own_segment_roots > 0 { LocalSegmentIndex::SIZE } else { 0 }
304 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
305 + u16::SIZE
306 + u32::from(num_blocks) * OwnedIntermediateShardHeader::max_allocation_for(&[]) * 2,
309 );
310
311 let true = buffer.append(&num_pot_checkpoints.to_le_bytes()) else {
312 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
313 };
314
315 let true = buffer.append(&[num_own_segment_roots]) else {
316 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
317 };
318 let mut own_segments = own_segments.peekable();
319 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
320 let true = buffer.append(first_local_segment_index.as_bytes()) else {
321 unreachable!("Checked size above; qed");
322 };
323 }
324 for (_segment_index, own_segment_root) in own_segments {
325 let true = buffer.append(own_segment_root.as_ref()) else {
326 unreachable!("Checked size above; qed");
327 };
328 }
329 {
331 let true = buffer.append(&num_blocks.to_le_bytes()) else {
332 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
333 };
334 let mut segments_roots_num_cursor = buffer.len() as usize;
335 for _ in 0..num_blocks {
336 let true = buffer.append(&[0, 0]) else {
339 unreachable!("Checked size above; qed");
340 };
341 }
342 let true = align_to_8_with_padding(&mut buffer) else {
343 unreachable!("Checked size above; qed");
344 };
345 for intermediate_shard_block in intermediate_shard_blocks {
346 let num_own_segment_roots = intermediate_shard_block
347 .own_segments
348 .as_ref()
349 .map(|own_segments| own_segments.segment_roots.len())
350 .unwrap_or_default();
351 let num_own_segment_roots =
352 u8::try_from(num_own_segment_roots).map_err(|_error| {
353 OwnedBeaconChainBodyError::TooManyIntermediateShardOwnSegmentRoots {
354 actual: num_own_segment_roots,
355 }
356 })?;
357 let num_leaf_shard_blocks_with_segments = intermediate_shard_block
358 .leaf_shards_segments()
359 .size_hint()
360 .0;
361 let num_leaf_shard_blocks_with_segments =
362 u8::try_from(num_leaf_shard_blocks_with_segments).map_err(|_error| {
363 OwnedBeaconChainBodyError::TooManyLeafShardBlocksWithSegments {
364 actual: num_leaf_shard_blocks_with_segments,
365 }
366 })?;
367
368 buffer.as_mut_slice()[segments_roots_num_cursor..][..2]
369 .copy_from_slice(&[num_own_segment_roots, num_leaf_shard_blocks_with_segments]);
370 segments_roots_num_cursor += 2;
371
372 OwnedIntermediateShardHeader::from_parts_into(
373 intermediate_shard_block.header.prefix,
374 intermediate_shard_block.header.result,
375 intermediate_shard_block.header.consensus_info,
376 intermediate_shard_block.header.beacon_chain_info(),
377 intermediate_shard_block.header.child_shard_blocks(),
378 &mut buffer,
379 )?;
380
381 if let Some(segments_proof) = &intermediate_shard_block.segments_proof
382 && !buffer.append(*segments_proof)
383 {
384 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
385 }
386
387 if let Some(own_segments) = &intermediate_shard_block.own_segments {
388 if !buffer.append(own_segments.first_local_segment_index.as_bytes()) {
389 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
390 }
391 if !buffer.append(
392 SegmentRoot::repr_from_slice(own_segments.segment_roots).as_flattened(),
393 ) {
394 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
395 }
396 }
397
398 for (_shard_index, own_segments) in intermediate_shard_block.leaf_shards_segments()
399 {
400 let num_own_segment_roots = own_segments.segment_roots.len();
401 let num_own_segment_roots =
402 u8::try_from(num_own_segment_roots).map_err(|_error| {
403 OwnedBeaconChainBodyError::TooManyLeafShardBlockSegments {
404 actual: num_own_segment_roots,
405 }
406 })?;
407 if !buffer.append(&[num_own_segment_roots]) {
408 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
409 }
410 }
411 for (shard_index, own_segments) in intermediate_shard_block.leaf_shards_segments() {
412 if !buffer.append(shard_index.as_bytes()) {
413 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
414 }
415 if !buffer.append(own_segments.first_local_segment_index.as_bytes()) {
416 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
417 }
418 if !buffer.append(
419 SegmentRoot::repr_from_slice(own_segments.segment_roots).as_flattened(),
420 ) {
421 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
422 }
423 }
424
425 if !align_to_8_with_padding(&mut buffer) {
426 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
427 }
428 }
429 }
430
431 let true = buffer.append(PotCheckpoints::bytes_from_slice(pot_checkpoints).as_flattened())
432 else {
433 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
434 };
435
436 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
438 }
439
440 #[inline]
442 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
443 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
446 let Some((body, extra_bytes)) = BeaconChainBody::try_from_bytes(buffer) else {
447 return Err(());
448 };
449 if !extra_bytes.is_empty() {
450 return Err(());
451 }
452
453 Ok(body)
454 })
455 .map_err(move |()| buffer)?;
456
457 Ok(Self {
458 inner: Arc::new(inner),
459 })
460 }
461
462 #[inline(always)]
464 pub fn buffer(&self) -> &SharedAlignedBuffer {
465 self.inner.backing_cart()
466 }
467
468 #[inline(always)]
470 pub fn ref_count(&self) -> usize {
471 self.inner.strong_count()
472 }
473
474 #[inline(always)]
476 pub fn body(&self) -> &BeaconChainBody<'_> {
477 self.inner.get()
478 }
479}
480
481#[derive(Debug, thiserror::Error)]
483pub enum OwnedIntermediateShardBodyError {
484 #[error("Too many own segment roots: {actual}")]
486 TooManyOwnSegmentRoots {
487 actual: usize,
489 },
490 #[error("Too many leaf shard blocks: {actual}")]
492 TooManyLeafShardBlocks {
493 actual: usize,
495 },
496 #[error("Too many leaf shard own segment roots: {actual}")]
498 TooManyLeafShardOwnSegmentRoots {
499 actual: usize,
501 },
502}
503
504#[derive(Debug, Clone)]
509pub struct OwnedIntermediateShardBody {
510 inner: Arc<Yoke<IntermediateShardBody<'static>, SharedAlignedBuffer>>,
511}
512
513impl GenericOwnedBlockBody for OwnedIntermediateShardBody {
514 const SHARD_KIND: RealShardKind = RealShardKind::IntermediateShard;
515
516 type Body<'a> = IntermediateShardBody<'a>;
517
518 #[inline(always)]
519 fn buffer(&self) -> &SharedAlignedBuffer {
520 self.buffer()
521 }
522
523 #[inline(always)]
524 fn ref_count(&self) -> usize {
525 self.ref_count()
526 }
527
528 #[inline(always)]
529 fn body(&self) -> &Self::Body<'_> {
530 self.body()
531 }
532}
533
534impl OwnedIntermediateShardBody {
535 pub fn new<'a, OS, LSB>(
538 own_segments: OS,
539 leaf_shard_blocks: LSB,
540 ) -> Result<Self, OwnedIntermediateShardBodyError>
541 where
542 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
543 LSB: TrustedLen<Item = LeafShardBlockInfo<'a>> + 'a,
544 {
545 let num_own_segment_roots = own_segments.size_hint().0;
546 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
547 OwnedIntermediateShardBodyError::TooManyOwnSegmentRoots {
548 actual: num_own_segment_roots,
549 }
550 })?;
551 let num_blocks = leaf_shard_blocks.size_hint().0;
552 let num_blocks = u8::try_from(num_blocks).map_err(|_error| {
553 OwnedIntermediateShardBodyError::TooManyLeafShardBlocks { actual: num_blocks }
554 })?;
555
556 let mut buffer = OwnedAlignedBuffer::with_capacity(
557 u8::SIZE
558 + if num_own_segment_roots > 0 { LocalSegmentIndex::SIZE } else { 0 }
559 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
560 + u32::from(num_blocks) * OwnedLeafShardHeader::MAX_ALLOCATION * 2,
563 );
564
565 let true = buffer.append(&[num_own_segment_roots]) else {
566 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
567 };
568 let mut own_segments = own_segments.peekable();
569 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
570 let true = buffer.append(first_local_segment_index.as_bytes()) else {
571 unreachable!("Checked size above; qed");
572 };
573 }
574 for (_segment_index, own_segment_root) in own_segments {
575 let true = buffer.append(own_segment_root.as_ref()) else {
576 unreachable!("Checked size above; qed");
577 };
578 }
579 {
581 let true = buffer.append(&num_blocks.to_le_bytes()) else {
582 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
583 };
584 let mut own_segments_roots_num_cursor = buffer.len() as usize;
585 for _ in 0..num_blocks {
586 let true = buffer.append(&[0]) else {
587 unreachable!("Checked size above; qed");
588 };
589 }
590 let true = align_to_8_with_padding(&mut buffer) else {
591 unreachable!("Checked size above; qed");
592 };
593 for leaf_shard_block in leaf_shard_blocks {
594 if let Some(segments) = &leaf_shard_block.segments {
595 let num_own_segment_roots = segments.own_segments.segment_roots.len();
596 let num_own_segment_roots =
597 u8::try_from(num_own_segment_roots).map_err(|_error| {
598 OwnedIntermediateShardBodyError::TooManyLeafShardOwnSegmentRoots {
599 actual: num_own_segment_roots,
600 }
601 })?;
602 buffer.as_mut_slice()[own_segments_roots_num_cursor] = num_own_segment_roots;
603 }
604 own_segments_roots_num_cursor += 1;
605
606 OwnedLeafShardHeader::from_parts_into(
607 leaf_shard_block.header.prefix,
608 leaf_shard_block.header.result,
609 leaf_shard_block.header.consensus_info,
610 leaf_shard_block.header.beacon_chain_info(),
611 &mut buffer,
612 );
613 let true = align_to_8_with_padding(&mut buffer) else {
614 unreachable!("Checked size above; qed");
615 };
616
617 if let Some(segments) = &leaf_shard_block.segments {
618 let true =
619 buffer.append(segments.own_segments.first_local_segment_index.as_bytes())
620 else {
621 unreachable!("Checked size above; qed");
622 };
623 let true = buffer.append(segments.segment_roots_proof) else {
624 unreachable!("Checked size above; qed");
625 };
626 let true = buffer.append(
627 SegmentRoot::repr_from_slice(segments.own_segments.segment_roots)
628 .as_flattened(),
629 ) else {
630 unreachable!("Checked size above; qed");
631 };
632 }
633 }
634 }
635
636 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
638 }
639
640 #[inline]
642 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
643 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
646 let Some((body, extra_bytes)) = IntermediateShardBody::try_from_bytes(buffer) else {
647 return Err(());
648 };
649 if !extra_bytes.is_empty() {
650 return Err(());
651 }
652
653 Ok(body)
654 })
655 .map_err(move |()| buffer)?;
656
657 Ok(Self {
658 inner: Arc::new(inner),
659 })
660 }
661
662 #[inline(always)]
664 pub fn buffer(&self) -> &SharedAlignedBuffer {
665 self.inner.backing_cart()
666 }
667
668 #[inline(always)]
670 pub fn ref_count(&self) -> usize {
671 self.inner.strong_count()
672 }
673
674 #[inline(always)]
676 pub fn body(&self) -> &IntermediateShardBody<'_> {
677 self.inner.get()
678 }
679}
680
681#[derive(Debug, thiserror::Error)]
683pub enum OwnedLeafShardBodyError {
684 #[error("Too many own segment roots: {actual}")]
686 TooManyOwnSegmentRoots {
687 actual: usize,
689 },
690 #[error("Block body is too large")]
692 BlockBodyIsTooLarge,
693 #[error("Too many transactions")]
695 TooManyTransactions,
696 #[error("Failed to add transaction: {error}")]
698 FailedToAddTransaction {
699 error: OwnedTransactionError,
701 },
702}
703
704impl From<AddTransactionError> for OwnedLeafShardBodyError {
705 fn from(value: AddTransactionError) -> Self {
706 match value {
707 AddTransactionError::BlockBodyIsTooLarge => {
708 OwnedLeafShardBodyError::BlockBodyIsTooLarge
709 }
710 AddTransactionError::TooManyTransactions => {
711 OwnedLeafShardBodyError::TooManyTransactions
712 }
713 AddTransactionError::FailedToAddTransaction { error } => {
714 OwnedLeafShardBodyError::FailedToAddTransaction { error }
715 }
716 }
717 }
718}
719
720#[derive(Debug, Clone)]
725pub struct OwnedLeafShardBody {
726 inner: Arc<Yoke<LeafShardBody<'static>, SharedAlignedBuffer>>,
727}
728
729impl GenericOwnedBlockBody for OwnedLeafShardBody {
730 const SHARD_KIND: RealShardKind = RealShardKind::LeafShard;
731
732 type Body<'a> = LeafShardBody<'a>;
733
734 #[inline(always)]
735 fn buffer(&self) -> &SharedAlignedBuffer {
736 self.buffer()
737 }
738
739 #[inline(always)]
740 fn ref_count(&self) -> usize {
741 self.ref_count()
742 }
743
744 #[inline(always)]
745 fn body(&self) -> &Self::Body<'_> {
746 self.body()
747 }
748}
749
750impl OwnedLeafShardBody {
751 pub fn init<OS>(
754 own_segments: OS,
755 ) -> Result<OwnedLeafShardBlockBodyBuilder, OwnedLeafShardBodyError>
756 where
757 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
758 {
759 let num_own_segment_roots = own_segments.size_hint().0;
760 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
761 OwnedLeafShardBodyError::TooManyOwnSegmentRoots {
762 actual: num_own_segment_roots,
763 }
764 })?;
765
766 let mut buffer = OwnedAlignedBuffer::with_capacity(
767 u8::SIZE
768 + if num_own_segment_roots > 0 {
769 LocalSegmentIndex::SIZE
770 } else {
771 0
772 }
773 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32,
774 );
775
776 let true = buffer.append(&[num_own_segment_roots]) else {
777 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
778 };
779 let mut own_segments = own_segments.peekable();
780 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
781 let true = buffer.append(first_local_segment_index.as_bytes()) else {
782 unreachable!("Checked size above; qed");
783 };
784 }
785 for (_segment_index, own_segment_root) in own_segments {
786 let true = buffer.append(own_segment_root.as_ref()) else {
787 unreachable!("Checked size above; qed");
788 };
789 }
790
791 let num_transactions_offset = buffer.len() as usize;
792 let true = buffer.append(&0u32.to_le_bytes()) else {
793 unreachable!("Checked size above; qed");
794 };
795
796 Ok(OwnedLeafShardBlockBodyBuilder {
797 transaction_builder: TransactionBuilder::new(num_transactions_offset, buffer),
798 })
799 }
800
801 #[inline]
803 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
804 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
807 let Some((body, extra_bytes)) = LeafShardBody::try_from_bytes(buffer) else {
808 return Err(());
809 };
810 if !extra_bytes.is_empty() {
811 return Err(());
812 }
813
814 Ok(body)
815 })
816 .map_err(move |()| buffer)?;
817
818 Ok(Self {
819 inner: Arc::new(inner),
820 })
821 }
822
823 #[inline(always)]
825 pub fn buffer(&self) -> &SharedAlignedBuffer {
826 self.inner.backing_cart()
827 }
828
829 #[inline(always)]
831 pub fn ref_count(&self) -> usize {
832 self.inner.strong_count()
833 }
834
835 #[inline(always)]
837 pub fn body(&self) -> &LeafShardBody<'_> {
838 self.inner.get()
839 }
840}
841
842#[derive(Debug, Clone)]
844pub struct OwnedLeafShardBlockBodyBuilder {
845 transaction_builder: TransactionBuilder,
846}
847
848impl OwnedLeafShardBlockBodyBuilder {
849 #[inline(always)]
851 pub fn add_transaction<T>(&mut self, transaction: T) -> Result<(), OwnedLeafShardBodyError>
852 where
853 T: WritableBodyTransaction,
854 {
855 self.transaction_builder.add_transaction(transaction)?;
856
857 Ok(())
858 }
859
860 pub fn finish(self) -> OwnedLeafShardBody {
862 OwnedLeafShardBody::from_buffer(self.transaction_builder.finish().into_shared())
864 .expect("Known to be created correctly; qed")
865 }
866}
867
868#[derive(Debug, Clone, From)]
873pub enum OwnedBlockBody {
874 BeaconChain(OwnedBeaconChainBody),
876 IntermediateShard(OwnedIntermediateShardBody),
878 LeafShard(OwnedLeafShardBody),
880}
881
882impl OwnedBlockBody {
883 #[inline]
885 pub fn from_buffer(
886 buffer: SharedAlignedBuffer,
887 shard_kind: RealShardKind,
888 ) -> Result<Self, SharedAlignedBuffer> {
889 Ok(match shard_kind {
890 RealShardKind::BeaconChain => {
891 Self::BeaconChain(OwnedBeaconChainBody::from_buffer(buffer)?)
892 }
893 RealShardKind::IntermediateShard => {
894 Self::IntermediateShard(OwnedIntermediateShardBody::from_buffer(buffer)?)
895 }
896 RealShardKind::LeafShard => Self::LeafShard(OwnedLeafShardBody::from_buffer(buffer)?),
897 })
898 }
899
900 #[inline]
902 pub fn buffer(&self) -> &SharedAlignedBuffer {
903 match self {
904 Self::BeaconChain(owned_body) => owned_body.buffer(),
905 Self::IntermediateShard(owned_body) => owned_body.buffer(),
906 Self::LeafShard(owned_body) => owned_body.buffer(),
907 }
908 }
909
910 #[inline(always)]
912 pub fn ref_count(&self) -> usize {
913 match self {
914 Self::BeaconChain(owned_body) => owned_body.ref_count(),
915 Self::IntermediateShard(owned_body) => owned_body.ref_count(),
916 Self::LeafShard(owned_body) => owned_body.ref_count(),
917 }
918 }
919
920 #[inline]
922 pub fn body(&self) -> BlockBody<'_> {
923 match self {
924 Self::BeaconChain(owned_body) => BlockBody::BeaconChain(*owned_body.body()),
925 Self::IntermediateShard(owned_body) => BlockBody::IntermediateShard(*owned_body.body()),
926 Self::LeafShard(owned_body) => BlockBody::LeafShard(*owned_body.body()),
927 }
928 }
929}
930
931#[inline(always)]
935#[must_use]
936fn align_to_8_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
937 let alignment = align_of::<u64>();
938 let unaligned_by = buffer.len() as usize % alignment;
939 if unaligned_by > 0 {
940 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
942
943 if !buffer.append(&0u64.to_le_bytes()[..padding_bytes]) {
944 return false;
945 }
946 }
947
948 true
949}
950
951#[inline(always)]
955#[must_use]
956fn align_to_16_bytes_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
957 let alignment = align_of::<u128>();
958 let unaligned_by = buffer.len() as usize % alignment;
959 if unaligned_by > 0 {
960 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
962
963 if !buffer.append(&0u128.to_le_bytes()[..padding_bytes]) {
964 return false;
965 }
966 }
967
968 true
969}