1use crate::block::body::{
4 BeaconChainBody, BlockBody, GenericBlockBody, IntermediateShardBlockInfo,
5 IntermediateShardBody, LeafShardBlockInfo, LeafShardBody,
6};
7use crate::block::header::owned::{
8 OwnedIntermediateShardHeader, OwnedIntermediateShardHeaderError, OwnedLeafShardHeader,
9};
10use crate::pot::PotCheckpoints;
11use crate::segments::{LocalSegmentIndex, SegmentRoot};
12use crate::shard::RealShardKind;
13use crate::transaction::Transaction;
14use crate::transaction::owned::{OwnedTransaction, OwnedTransactionError};
15use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
16use ab_io_type::trivial_type::TrivialType;
17use core::fmt;
18use core::iter::TrustedLen;
19use derive_more::From;
20use rclite::Arc;
21use yoke::Yoke;
22
23pub trait GenericOwnedBlockBody:
25 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockBody> + 'static
26{
27 const SHARD_KIND: RealShardKind;
29
30 type Body<'a>: GenericBlockBody<'a>
32 where
33 Self: 'a;
34
35 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer>;
37
38 fn buffer(&self) -> &SharedAlignedBuffer;
40
41 fn ref_count(&self) -> usize;
43
44 fn body(&self) -> &Self::Body<'_>;
46}
47
48#[derive(Debug, thiserror::Error)]
50enum AddTransactionError {
51 #[error("Block body is too large")]
53 BlockBodyIsTooLarge,
54 #[error("Too many transactions")]
56 TooManyTransactions,
57 #[error("Failed to add transaction: {error}")]
59 FailedToAddTransaction {
60 #[from]
62 error: OwnedTransactionError,
63 },
64}
65
66pub trait WritableBodyTransaction {
68 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError>;
70}
71
72impl WritableBodyTransaction for Transaction<'_> {
73 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
74 OwnedTransaction::from_parts_into(
75 self.header,
76 self.read_slots,
77 self.write_slots,
78 self.payload,
79 self.seal,
80 buffer,
81 )
82 }
83}
84
85impl WritableBodyTransaction for &OwnedTransaction {
86 fn write_into(&self, buffer: &mut OwnedAlignedBuffer) -> Result<(), OwnedTransactionError> {
87 if buffer.append(self.buffer().as_slice()) {
88 Ok(())
89 } else {
90 Err(OwnedTransactionError::TransactionTooLarge)
91 }
92 }
93}
94
95#[derive(Debug, Clone)]
96struct TransactionBuilder {
97 num_transactions_offset: usize,
98 buffer: OwnedAlignedBuffer,
99}
100
101impl TransactionBuilder {
102 fn new(num_transactions_offset: usize, buffer: OwnedAlignedBuffer) -> Self {
103 Self {
104 num_transactions_offset,
105 buffer,
106 }
107 }
108
109 fn add_transaction<T>(&mut self, transaction: T) -> Result<(), AddTransactionError>
111 where
112 T: WritableBodyTransaction,
113 {
114 if self.inc_transaction_count()? == 1 && !align_to_16_bytes_with_padding(&mut self.buffer) {
117 self.dec_transaction_count();
118 return Err(AddTransactionError::BlockBodyIsTooLarge);
119 }
120
121 let old_buffer_len = self.buffer.len();
122
123 transaction
124 .write_into(&mut self.buffer)
125 .inspect_err(|_error| {
126 self.dec_transaction_count();
127 })?;
128
129 if !align_to_16_bytes_with_padding(&mut self.buffer) {
130 self.dec_transaction_count();
131 unsafe {
133 self.buffer.set_len(old_buffer_len);
134 }
135 return Err(AddTransactionError::BlockBodyIsTooLarge);
136 }
137
138 Ok(())
139 }
140
141 #[inline(always)]
143 fn finish(self) -> OwnedAlignedBuffer {
144 self.buffer
145 }
146
147 #[inline(always)]
149 fn inc_transaction_count(&mut self) -> Result<u32, AddTransactionError> {
150 unsafe {
153 let num_transactions_ptr = self
154 .buffer
155 .as_mut_ptr()
156 .add(self.num_transactions_offset)
157 .cast::<u32>();
158 let num_transactions = num_transactions_ptr.read_unaligned();
159 let num_transactions = num_transactions
160 .checked_add(1)
161 .ok_or(AddTransactionError::TooManyTransactions)?;
162 num_transactions_ptr.write_unaligned(num_transactions);
163 Ok(num_transactions)
164 }
165 }
166
167 #[inline(always)]
169 fn dec_transaction_count(&mut self) {
170 unsafe {
173 let num_transactions_ptr = self
174 .buffer
175 .as_mut_ptr()
176 .add(self.num_transactions_offset)
177 .cast::<u32>();
178 let num_transactions = num_transactions_ptr.read_unaligned();
179 let num_transactions = num_transactions.saturating_sub(1);
180 num_transactions_ptr.write_unaligned(num_transactions);
181 }
182 }
183}
184
185#[derive(Debug, thiserror::Error)]
187pub enum OwnedBeaconChainBodyError {
188 #[error("Too many PoT checkpoints: {actual}")]
190 TooManyPotCheckpoints {
191 actual: usize,
193 },
194 #[error("Too many own segment roots: {actual}")]
196 TooManyOwnSegmentRoots {
197 actual: usize,
199 },
200 #[error("Too many intermediate shard blocks: {actual}")]
202 TooManyIntermediateShardBlocks {
203 actual: usize,
205 },
206 #[error("Too many intermediate shard own segment roots: {actual}")]
208 TooManyIntermediateShardOwnSegmentRoots {
209 actual: usize,
211 },
212 #[error("Too many leaf shard blocks with segments: {actual}")]
214 TooManyLeafShardBlocksWithSegments {
215 actual: usize,
217 },
218 #[error("Too many leaf shard block segments: {actual}")]
220 TooManyLeafShardBlockSegments {
221 actual: usize,
223 },
224 #[error("Too many intermediate shard child segment roots: {actual}")]
226 TooManyIntermediateShardChildSegmentRoots {
227 actual: usize,
229 },
230 #[error("Failed to intermediate shard header: {error}")]
232 FailedToAddIntermediateShard {
233 #[from]
235 error: OwnedIntermediateShardHeaderError,
236 },
237 #[error("Block body is too large")]
239 BlockBodyIsTooLarge,
240}
241
242#[derive(Debug, Clone)]
247pub struct OwnedBeaconChainBody {
248 inner: Arc<Yoke<BeaconChainBody<'static>, SharedAlignedBuffer>>,
249}
250
251impl GenericOwnedBlockBody for OwnedBeaconChainBody {
252 const SHARD_KIND: RealShardKind = RealShardKind::BeaconChain;
253
254 type Body<'a> = BeaconChainBody<'a>;
255
256 #[inline(always)]
257 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
258 Self::from_buffer(buffer)
259 }
260
261 #[inline(always)]
262 fn buffer(&self) -> &SharedAlignedBuffer {
263 self.buffer()
264 }
265
266 #[inline(always)]
267 fn ref_count(&self) -> usize {
268 self.ref_count()
269 }
270
271 #[inline(always)]
272 fn body(&self) -> &Self::Body<'_> {
273 self.body()
274 }
275}
276
277impl OwnedBeaconChainBody {
278 pub fn new<'a, OS, ISB>(
281 own_segments: OS,
282 intermediate_shard_blocks: ISB,
283 pot_checkpoints: &[PotCheckpoints],
284 ) -> Result<Self, OwnedBeaconChainBodyError>
285 where
286 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
287 ISB: TrustedLen<Item = IntermediateShardBlockInfo<'a>> + 'a,
290 {
291 let num_pot_checkpoints = pot_checkpoints.len();
292 let num_pot_checkpoints = u32::try_from(num_pot_checkpoints).map_err(|_error| {
293 OwnedBeaconChainBodyError::TooManyPotCheckpoints {
294 actual: num_pot_checkpoints,
295 }
296 })?;
297 let num_own_segment_roots = own_segments.size_hint().0;
298 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
299 OwnedBeaconChainBodyError::TooManyOwnSegmentRoots {
300 actual: num_own_segment_roots,
301 }
302 })?;
303 let num_blocks = intermediate_shard_blocks.size_hint().0;
304 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
305 OwnedBeaconChainBodyError::TooManyIntermediateShardBlocks { actual: num_blocks }
306 })?;
307
308 let mut buffer = OwnedAlignedBuffer::with_capacity(
309 u32::SIZE
310 + u8::SIZE
311 + if num_own_segment_roots > 0 { LocalSegmentIndex::SIZE } else { 0 }
312 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
313 + u16::SIZE
314 + u32::from(num_blocks) * OwnedIntermediateShardHeader::max_allocation_for(&[]) * 2,
317 );
318
319 let true = buffer.append(&num_pot_checkpoints.to_le_bytes()) else {
320 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
321 };
322
323 let true = buffer.append(&[num_own_segment_roots]) else {
324 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
325 };
326 let mut own_segments = own_segments.peekable();
327 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
328 let true = buffer.append(first_local_segment_index.as_bytes()) else {
329 unreachable!("Checked size above; qed");
330 };
331 }
332 for (_segment_index, own_segment_root) in own_segments {
333 let true = buffer.append(own_segment_root.as_ref()) else {
334 unreachable!("Checked size above; qed");
335 };
336 }
337 {
339 let true = buffer.append(&num_blocks.to_le_bytes()) else {
340 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
341 };
342 let mut segments_roots_num_cursor = buffer.len() as usize;
343 for _ in 0..num_blocks {
344 let true = buffer.append(&[0, 0]) else {
347 unreachable!("Checked size above; qed");
348 };
349 }
350 let true = align_to_8_with_padding(&mut buffer) else {
351 unreachable!("Checked size above; qed");
352 };
353 for intermediate_shard_block in intermediate_shard_blocks {
354 let num_own_segment_roots = intermediate_shard_block
355 .own_segments
356 .as_ref()
357 .map(|own_segments| own_segments.segment_roots.len())
358 .unwrap_or_default();
359 let num_own_segment_roots =
360 u8::try_from(num_own_segment_roots).map_err(|_error| {
361 OwnedBeaconChainBodyError::TooManyIntermediateShardOwnSegmentRoots {
362 actual: num_own_segment_roots,
363 }
364 })?;
365 let num_leaf_shard_blocks_with_segments = intermediate_shard_block
366 .leaf_shards_segments()
367 .size_hint()
368 .0;
369 let num_leaf_shard_blocks_with_segments =
370 u8::try_from(num_leaf_shard_blocks_with_segments).map_err(|_error| {
371 OwnedBeaconChainBodyError::TooManyLeafShardBlocksWithSegments {
372 actual: num_leaf_shard_blocks_with_segments,
373 }
374 })?;
375
376 buffer.as_mut_slice()[segments_roots_num_cursor..][..2]
377 .copy_from_slice(&[num_own_segment_roots, num_leaf_shard_blocks_with_segments]);
378 segments_roots_num_cursor += 2;
379
380 OwnedIntermediateShardHeader::from_parts_into(
381 intermediate_shard_block.header.prefix,
382 intermediate_shard_block.header.result,
383 intermediate_shard_block.header.consensus_info,
384 intermediate_shard_block.header.beacon_chain_info(),
385 intermediate_shard_block.header.child_shard_blocks(),
386 &mut buffer,
387 )?;
388
389 if let Some(segments_proof) = &intermediate_shard_block.segments_proof
390 && !buffer.append(*segments_proof)
391 {
392 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
393 }
394
395 if let Some(own_segments) = &intermediate_shard_block.own_segments {
396 if !buffer.append(own_segments.first_local_segment_index.as_bytes()) {
397 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
398 }
399 if !buffer.append(
400 SegmentRoot::repr_from_slice(own_segments.segment_roots).as_flattened(),
401 ) {
402 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
403 }
404 }
405
406 for (_shard_index, own_segments) in intermediate_shard_block.leaf_shards_segments()
407 {
408 let num_own_segment_roots = own_segments.segment_roots.len();
409 let num_own_segment_roots =
410 u8::try_from(num_own_segment_roots).map_err(|_error| {
411 OwnedBeaconChainBodyError::TooManyLeafShardBlockSegments {
412 actual: num_own_segment_roots,
413 }
414 })?;
415 if !buffer.append(&[num_own_segment_roots]) {
416 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
417 }
418 }
419 for (shard_index, own_segments) in intermediate_shard_block.leaf_shards_segments() {
420 if !buffer.append(shard_index.as_bytes()) {
421 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
422 }
423 if !buffer.append(own_segments.first_local_segment_index.as_bytes()) {
424 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
425 }
426 if !buffer.append(
427 SegmentRoot::repr_from_slice(own_segments.segment_roots).as_flattened(),
428 ) {
429 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
430 }
431 }
432
433 if !align_to_8_with_padding(&mut buffer) {
434 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
435 }
436 }
437 }
438
439 let true = buffer.append(PotCheckpoints::bytes_from_slice(pot_checkpoints).as_flattened())
440 else {
441 return Err(OwnedBeaconChainBodyError::BlockBodyIsTooLarge);
442 };
443
444 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
446 }
447
448 #[inline]
450 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
451 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
454 let Some((body, extra_bytes)) = BeaconChainBody::try_from_bytes(buffer) else {
455 return Err(());
456 };
457 if !extra_bytes.is_empty() {
458 return Err(());
459 }
460
461 Ok(body)
462 })
463 .map_err(move |()| buffer)?;
464
465 Ok(Self {
466 inner: Arc::new(inner),
467 })
468 }
469
470 #[inline(always)]
472 pub fn buffer(&self) -> &SharedAlignedBuffer {
473 self.inner.backing_cart()
474 }
475
476 #[inline(always)]
478 pub fn ref_count(&self) -> usize {
479 self.inner.strong_count()
480 }
481
482 #[inline(always)]
484 pub fn body(&self) -> &BeaconChainBody<'_> {
485 self.inner.get()
486 }
487}
488
489#[derive(Debug, thiserror::Error)]
491pub enum OwnedIntermediateShardBodyError {
492 #[error("Too many own segment roots: {actual}")]
494 TooManyOwnSegmentRoots {
495 actual: usize,
497 },
498 #[error("Too many leaf shard blocks: {actual}")]
500 TooManyLeafShardBlocks {
501 actual: usize,
503 },
504 #[error("Too many leaf shard own segment roots: {actual}")]
506 TooManyLeafShardOwnSegmentRoots {
507 actual: usize,
509 },
510}
511
512#[derive(Debug, Clone)]
517pub struct OwnedIntermediateShardBody {
518 inner: Arc<Yoke<IntermediateShardBody<'static>, SharedAlignedBuffer>>,
519}
520
521impl GenericOwnedBlockBody for OwnedIntermediateShardBody {
522 const SHARD_KIND: RealShardKind = RealShardKind::IntermediateShard;
523
524 type Body<'a> = IntermediateShardBody<'a>;
525
526 #[inline(always)]
527 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
528 Self::from_buffer(buffer)
529 }
530
531 #[inline(always)]
532 fn buffer(&self) -> &SharedAlignedBuffer {
533 self.buffer()
534 }
535
536 #[inline(always)]
537 fn ref_count(&self) -> usize {
538 self.ref_count()
539 }
540
541 #[inline(always)]
542 fn body(&self) -> &Self::Body<'_> {
543 self.body()
544 }
545}
546
547impl OwnedIntermediateShardBody {
548 pub fn new<'a, OS, LSB>(
551 own_segments: OS,
552 leaf_shard_blocks: LSB,
553 ) -> Result<Self, OwnedIntermediateShardBodyError>
554 where
555 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
556 LSB: TrustedLen<Item = LeafShardBlockInfo<'a>> + 'a,
557 {
558 let num_own_segment_roots = own_segments.size_hint().0;
559 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
560 OwnedIntermediateShardBodyError::TooManyOwnSegmentRoots {
561 actual: num_own_segment_roots,
562 }
563 })?;
564 let num_blocks = leaf_shard_blocks.size_hint().0;
565 let num_blocks = u8::try_from(num_blocks).map_err(|_error| {
566 OwnedIntermediateShardBodyError::TooManyLeafShardBlocks { actual: num_blocks }
567 })?;
568
569 let mut buffer = OwnedAlignedBuffer::with_capacity(
570 u8::SIZE
571 + if num_own_segment_roots > 0 { LocalSegmentIndex::SIZE } else { 0 }
572 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32
573 + u32::from(num_blocks) * OwnedLeafShardHeader::MAX_ALLOCATION * 2,
576 );
577
578 let true = buffer.append(&[num_own_segment_roots]) else {
579 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
580 };
581 let mut own_segments = own_segments.peekable();
582 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
583 let true = buffer.append(first_local_segment_index.as_bytes()) else {
584 unreachable!("Checked size above; qed");
585 };
586 }
587 for (_segment_index, own_segment_root) in own_segments {
588 let true = buffer.append(own_segment_root.as_ref()) else {
589 unreachable!("Checked size above; qed");
590 };
591 }
592 {
594 let true = buffer.append(&num_blocks.to_le_bytes()) else {
595 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
596 };
597 let mut own_segments_roots_num_cursor = buffer.len() as usize;
598 for _ in 0..num_blocks {
599 let true = buffer.append(&[0]) else {
600 unreachable!("Checked size above; qed");
601 };
602 }
603 let true = align_to_8_with_padding(&mut buffer) else {
604 unreachable!("Checked size above; qed");
605 };
606 for leaf_shard_block in leaf_shard_blocks {
607 if let Some(segments) = &leaf_shard_block.segments {
608 let num_own_segment_roots = segments.own_segments.segment_roots.len();
609 let num_own_segment_roots =
610 u8::try_from(num_own_segment_roots).map_err(|_error| {
611 OwnedIntermediateShardBodyError::TooManyLeafShardOwnSegmentRoots {
612 actual: num_own_segment_roots,
613 }
614 })?;
615 buffer.as_mut_slice()[own_segments_roots_num_cursor] = num_own_segment_roots;
616 }
617 own_segments_roots_num_cursor += 1;
618
619 OwnedLeafShardHeader::from_parts_into(
620 leaf_shard_block.header.prefix,
621 leaf_shard_block.header.result,
622 leaf_shard_block.header.consensus_info,
623 leaf_shard_block.header.beacon_chain_info(),
624 &mut buffer,
625 );
626 let true = align_to_8_with_padding(&mut buffer) else {
627 unreachable!("Checked size above; qed");
628 };
629
630 if let Some(segments) = &leaf_shard_block.segments {
631 let true =
632 buffer.append(segments.own_segments.first_local_segment_index.as_bytes())
633 else {
634 unreachable!("Checked size above; qed");
635 };
636 let true = buffer.append(segments.segment_roots_proof) else {
637 unreachable!("Checked size above; qed");
638 };
639 let true = buffer.append(
640 SegmentRoot::repr_from_slice(segments.own_segments.segment_roots)
641 .as_flattened(),
642 ) else {
643 unreachable!("Checked size above; qed");
644 };
645 }
646 }
647 }
648
649 Ok(Self::from_buffer(buffer.into_shared()).expect("Known to be created correctly; qed"))
651 }
652
653 #[inline]
655 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
656 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
659 let Some((body, extra_bytes)) = IntermediateShardBody::try_from_bytes(buffer) else {
660 return Err(());
661 };
662 if !extra_bytes.is_empty() {
663 return Err(());
664 }
665
666 Ok(body)
667 })
668 .map_err(move |()| buffer)?;
669
670 Ok(Self {
671 inner: Arc::new(inner),
672 })
673 }
674
675 #[inline(always)]
677 pub fn buffer(&self) -> &SharedAlignedBuffer {
678 self.inner.backing_cart()
679 }
680
681 #[inline(always)]
683 pub fn ref_count(&self) -> usize {
684 self.inner.strong_count()
685 }
686
687 #[inline(always)]
689 pub fn body(&self) -> &IntermediateShardBody<'_> {
690 self.inner.get()
691 }
692}
693
694#[derive(Debug, thiserror::Error)]
696pub enum OwnedLeafShardBodyError {
697 #[error("Too many own segment roots: {actual}")]
699 TooManyOwnSegmentRoots {
700 actual: usize,
702 },
703 #[error("Block body is too large")]
705 BlockBodyIsTooLarge,
706 #[error("Too many transactions")]
708 TooManyTransactions,
709 #[error("Failed to add transaction: {error}")]
711 FailedToAddTransaction {
712 error: OwnedTransactionError,
714 },
715}
716
717impl From<AddTransactionError> for OwnedLeafShardBodyError {
718 fn from(value: AddTransactionError) -> Self {
719 match value {
720 AddTransactionError::BlockBodyIsTooLarge => {
721 OwnedLeafShardBodyError::BlockBodyIsTooLarge
722 }
723 AddTransactionError::TooManyTransactions => {
724 OwnedLeafShardBodyError::TooManyTransactions
725 }
726 AddTransactionError::FailedToAddTransaction { error } => {
727 OwnedLeafShardBodyError::FailedToAddTransaction { error }
728 }
729 }
730 }
731}
732
733#[derive(Debug, Clone)]
738pub struct OwnedLeafShardBody {
739 inner: Arc<Yoke<LeafShardBody<'static>, SharedAlignedBuffer>>,
740}
741
742impl GenericOwnedBlockBody for OwnedLeafShardBody {
743 const SHARD_KIND: RealShardKind = RealShardKind::LeafShard;
744
745 type Body<'a> = LeafShardBody<'a>;
746
747 #[inline(always)]
748 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
749 Self::from_buffer(buffer)
750 }
751
752 #[inline(always)]
753 fn buffer(&self) -> &SharedAlignedBuffer {
754 self.buffer()
755 }
756
757 #[inline(always)]
758 fn ref_count(&self) -> usize {
759 self.ref_count()
760 }
761
762 #[inline(always)]
763 fn body(&self) -> &Self::Body<'_> {
764 self.body()
765 }
766}
767
768impl OwnedLeafShardBody {
769 pub fn init<OS>(
772 own_segments: OS,
773 ) -> Result<OwnedLeafShardBlockBodyBuilder, OwnedLeafShardBodyError>
774 where
775 OS: TrustedLen<Item = (LocalSegmentIndex, SegmentRoot)>,
776 {
777 let num_own_segment_roots = own_segments.size_hint().0;
778 let num_own_segment_roots = u8::try_from(num_own_segment_roots).map_err(|_error| {
779 OwnedLeafShardBodyError::TooManyOwnSegmentRoots {
780 actual: num_own_segment_roots,
781 }
782 })?;
783
784 let mut buffer = OwnedAlignedBuffer::with_capacity(
785 u8::SIZE
786 + if num_own_segment_roots > 0 {
787 LocalSegmentIndex::SIZE
788 } else {
789 0
790 }
791 + u32::from(num_own_segment_roots) * SegmentRoot::SIZE as u32,
792 );
793
794 let true = buffer.append(&[num_own_segment_roots]) else {
795 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
796 };
797 let mut own_segments = own_segments.peekable();
798 if let Some((first_local_segment_index, _own_segment_root)) = own_segments.peek() {
799 let true = buffer.append(first_local_segment_index.as_bytes()) else {
800 unreachable!("Checked size above; qed");
801 };
802 }
803 for (_segment_index, own_segment_root) in own_segments {
804 let true = buffer.append(own_segment_root.as_ref()) else {
805 unreachable!("Checked size above; qed");
806 };
807 }
808
809 let num_transactions_offset = buffer.len() as usize;
810 let true = buffer.append(&0u32.to_le_bytes()) else {
811 unreachable!("Checked size above; qed");
812 };
813
814 Ok(OwnedLeafShardBlockBodyBuilder {
815 transaction_builder: TransactionBuilder::new(num_transactions_offset, buffer),
816 })
817 }
818
819 #[inline]
821 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
822 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
825 let Some((body, extra_bytes)) = LeafShardBody::try_from_bytes(buffer) else {
826 return Err(());
827 };
828 if !extra_bytes.is_empty() {
829 return Err(());
830 }
831
832 Ok(body)
833 })
834 .map_err(move |()| buffer)?;
835
836 Ok(Self {
837 inner: Arc::new(inner),
838 })
839 }
840
841 #[inline(always)]
843 pub fn buffer(&self) -> &SharedAlignedBuffer {
844 self.inner.backing_cart()
845 }
846
847 #[inline(always)]
849 pub fn ref_count(&self) -> usize {
850 self.inner.strong_count()
851 }
852
853 #[inline(always)]
855 pub fn body(&self) -> &LeafShardBody<'_> {
856 self.inner.get()
857 }
858}
859
860#[derive(Debug, Clone)]
862pub struct OwnedLeafShardBlockBodyBuilder {
863 transaction_builder: TransactionBuilder,
864}
865
866impl OwnedLeafShardBlockBodyBuilder {
867 #[inline(always)]
869 pub fn add_transaction<T>(&mut self, transaction: T) -> Result<(), OwnedLeafShardBodyError>
870 where
871 T: WritableBodyTransaction,
872 {
873 self.transaction_builder.add_transaction(transaction)?;
874
875 Ok(())
876 }
877
878 pub fn finish(self) -> OwnedLeafShardBody {
880 OwnedLeafShardBody::from_buffer(self.transaction_builder.finish().into_shared())
882 .expect("Known to be created correctly; qed")
883 }
884}
885
886#[derive(Debug, Clone, From)]
891pub enum OwnedBlockBody {
892 BeaconChain(OwnedBeaconChainBody),
894 IntermediateShard(OwnedIntermediateShardBody),
896 LeafShard(OwnedLeafShardBody),
898}
899
900impl OwnedBlockBody {
901 #[inline]
903 pub fn from_buffer(
904 buffer: SharedAlignedBuffer,
905 shard_kind: RealShardKind,
906 ) -> Result<Self, SharedAlignedBuffer> {
907 Ok(match shard_kind {
908 RealShardKind::BeaconChain => {
909 Self::BeaconChain(OwnedBeaconChainBody::from_buffer(buffer)?)
910 }
911 RealShardKind::IntermediateShard => {
912 Self::IntermediateShard(OwnedIntermediateShardBody::from_buffer(buffer)?)
913 }
914 RealShardKind::LeafShard => Self::LeafShard(OwnedLeafShardBody::from_buffer(buffer)?),
915 })
916 }
917
918 #[inline]
920 pub fn buffer(&self) -> &SharedAlignedBuffer {
921 match self {
922 Self::BeaconChain(owned_body) => owned_body.buffer(),
923 Self::IntermediateShard(owned_body) => owned_body.buffer(),
924 Self::LeafShard(owned_body) => owned_body.buffer(),
925 }
926 }
927
928 #[inline(always)]
930 pub fn ref_count(&self) -> usize {
931 match self {
932 Self::BeaconChain(owned_body) => owned_body.ref_count(),
933 Self::IntermediateShard(owned_body) => owned_body.ref_count(),
934 Self::LeafShard(owned_body) => owned_body.ref_count(),
935 }
936 }
937
938 #[inline]
940 pub fn body(&self) -> BlockBody<'_> {
941 match self {
942 Self::BeaconChain(owned_body) => BlockBody::BeaconChain(*owned_body.body()),
943 Self::IntermediateShard(owned_body) => BlockBody::IntermediateShard(*owned_body.body()),
944 Self::LeafShard(owned_body) => BlockBody::LeafShard(*owned_body.body()),
945 }
946 }
947}
948
949#[inline(always)]
953#[must_use]
954fn align_to_8_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
955 let alignment = align_of::<u64>();
956 let unaligned_by = buffer.len() as usize % alignment;
957 if unaligned_by > 0 {
958 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
960
961 if !buffer.append(&0u64.to_le_bytes()[..padding_bytes]) {
962 return false;
963 }
964 }
965
966 true
967}
968
969#[inline(always)]
973#[must_use]
974fn align_to_16_bytes_with_padding(buffer: &mut OwnedAlignedBuffer) -> bool {
975 let alignment = align_of::<u128>();
976 let unaligned_by = buffer.len() as usize % alignment;
977 if unaligned_by > 0 {
978 let padding_bytes = unsafe { alignment.unchecked_sub(unaligned_by) };
980
981 if !buffer.append(&0u128.to_le_bytes()[..padding_bytes]) {
982 return false;
983 }
984 }
985
986 true
987}