ab_core_primitives/block/header/
owned.rs1use crate::block::BlockRoot;
4use crate::block::header::{
5 BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6 BlockHeaderConsensusParameters, BlockHeaderFixedConsensusParameters, BlockHeaderPrefix,
7 BlockHeaderResult, BlockHeaderSeal, BlockHeaderSealType, GenericBlockHeader,
8 IntermediateShardHeader, LeafShardHeader,
9};
10use crate::hashes::Blake3Hash;
11use crate::shard::{NumShardsUnchecked, RealShardKind};
12use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
13use ab_io_type::trivial_type::TrivialType;
14use core::fmt;
15use derive_more::From;
16use rclite::Arc;
17use yoke::Yoke;
18
19pub trait GenericOwnedBlockHeader:
21 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockHeader> + 'static
22{
23 const SHARD_KIND: RealShardKind;
25
26 type Header<'a>: GenericBlockHeader<'a>
28 where
29 Self: 'a;
30
31 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer>;
33
34 fn buffer(&self) -> &SharedAlignedBuffer;
36
37 fn ref_count(&self) -> usize;
39
40 fn header(&self) -> &Self::Header<'_>;
42}
43
44fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
45 match seal {
46 BlockHeaderSeal::Ed25519(seal) => {
47 let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
48 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
49 };
50 let true = buffer.append(seal.as_bytes()) else {
51 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
52 };
53 }
54 }
55}
56
57#[derive(Debug, thiserror::Error)]
59pub enum OwnedBeaconChainHeaderError {
60 #[error("Too many child shard blocks: {actual}")]
62 TooManyChildShardBlocks {
63 actual: usize,
65 },
66}
67
68#[derive(Debug, Clone)]
73pub struct OwnedBeaconChainHeader {
74 inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
75}
76
77impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
78 const SHARD_KIND: RealShardKind = RealShardKind::BeaconChain;
79
80 type Header<'a> = BeaconChainHeader<'a>;
81
82 #[inline(always)]
83 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
84 Self::from_buffer(buffer)
85 }
86
87 #[inline(always)]
88 fn buffer(&self) -> &SharedAlignedBuffer {
89 self.buffer()
90 }
91
92 #[inline(always)]
93 fn ref_count(&self) -> usize {
94 self.ref_count()
95 }
96
97 #[inline(always)]
98 fn header(&self) -> &Self::Header<'_> {
99 self.header()
100 }
101}
102
103impl OwnedBeaconChainHeader {
104 #[inline(always)]
106 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
107 BlockHeaderPrefix::SIZE
108 + BlockHeaderResult::SIZE
109 + BlockHeaderConsensusInfo::SIZE
110 + (
111 u16::SIZE
113 + <[u8; 2]>::SIZE
115 + size_of_val(child_shard_blocks) as u32
116 )
117 + BlockHeaderConsensusParameters::MAX_SIZE
118 + BlockHeaderSeal::MAX_SIZE
119 }
120
121 pub fn from_parts(
123 prefix: &BlockHeaderPrefix,
124 result: &BlockHeaderResult,
125 consensus_info: &BlockHeaderConsensusInfo,
126 child_shard_blocks: &[BlockRoot],
127 consensus_parameters: &BlockHeaderConsensusParameters<'_>,
128 ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
129 let mut buffer =
130 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
131
132 Self::from_parts_into(
133 prefix,
134 result,
135 consensus_info,
136 child_shard_blocks,
137 consensus_parameters,
138 &mut buffer,
139 )?;
140
141 Ok(OwnedBeaconChainHeaderUnsealed { buffer })
142 }
143
144 pub fn from_parts_into(
146 prefix: &BlockHeaderPrefix,
147 result: &BlockHeaderResult,
148 consensus_info: &BlockHeaderConsensusInfo,
149 child_shard_blocks: &[BlockRoot],
150 consensus_parameters: &BlockHeaderConsensusParameters<'_>,
151 buffer: &mut OwnedAlignedBuffer,
152 ) -> Result<(), OwnedBeaconChainHeaderError> {
153 let BlockHeaderConsensusParameters {
154 fixed_parameters,
155 super_segment_root,
156 next_solution_range,
157 pot_parameters_change,
158 } = consensus_parameters;
159 let BlockHeaderFixedConsensusParameters {
160 solution_range,
161 slot_iterations,
162 num_shards,
163 } = fixed_parameters;
164
165 let num_blocks = child_shard_blocks.len();
166 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
167 OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
168 })?;
169 let true = buffer.append(prefix.as_bytes()) else {
170 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
171 };
172 let true = buffer.append(result.as_bytes()) else {
173 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
174 };
175 let true = buffer.append(consensus_info.as_bytes()) else {
176 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
177 };
178 {
180 let true = buffer.append(&num_blocks.to_le_bytes()) else {
181 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
182 };
183 let true = buffer.append(&[0; 2]) else {
184 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
185 };
186 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
187 else {
188 unreachable!("Checked size above; qed");
189 };
190 }
191 {
194 let true = buffer.append(&solution_range.to_bytes()) else {
195 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
196 };
197 let true = buffer.append(&slot_iterations.get().to_le_bytes()) else {
198 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
199 };
200 let true = buffer.append(NumShardsUnchecked::from(*num_shards).as_bytes()) else {
201 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
202 };
203
204 let bitflags = {
205 let mut bitflags = 0u8;
206
207 if super_segment_root.is_some() {
208 bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
209 }
210 if next_solution_range.is_some() {
211 bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
212 }
213 if pot_parameters_change.is_some() {
214 bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
215 }
216
217 bitflags
218 };
219
220 let true = buffer.append(&[bitflags]) else {
221 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
222 };
223
224 if let Some(super_segment_root) = super_segment_root {
225 let true = buffer.append(super_segment_root.as_ref()) else {
226 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
227 };
228 }
229
230 if let Some(next_solution_range) = next_solution_range {
231 let true = buffer.append(&next_solution_range.to_bytes()) else {
232 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
233 };
234 }
235
236 if let Some(pot_parameters_change) = pot_parameters_change {
237 let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
238 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
239 };
240 let true =
241 buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
242 else {
243 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
244 };
245 let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
246 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
247 };
248 }
249 }
250
251 Ok(())
252 }
253
254 #[inline]
256 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
257 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
260 let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
261 return Err(());
262 };
263 if !extra_bytes.is_empty() {
264 return Err(());
265 }
266
267 Ok(header)
268 })
269 .map_err(move |()| buffer)?;
270
271 Ok(Self {
272 inner: Arc::new(inner),
273 })
274 }
275
276 #[inline(always)]
278 pub fn buffer(&self) -> &SharedAlignedBuffer {
279 self.inner.backing_cart()
280 }
281
282 #[inline(always)]
284 pub fn ref_count(&self) -> usize {
285 self.inner.strong_count()
286 }
287
288 #[inline(always)]
290 pub fn header(&self) -> &BeaconChainHeader<'_> {
291 self.inner.get()
292 }
293}
294
295#[derive(Debug, Clone)]
297pub struct OwnedBeaconChainHeaderUnsealed {
298 buffer: OwnedAlignedBuffer,
299}
300
301impl OwnedBeaconChainHeaderUnsealed {
302 #[inline(always)]
304 pub fn pre_seal_hash(&self) -> Blake3Hash {
305 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
307 }
308
309 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
311 let Self { mut buffer } = self;
312 append_seal(&mut buffer, seal);
313
314 OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
317 .expect("Known to be created correctly; qed")
318 }
319}
320
321#[derive(Debug, thiserror::Error)]
323pub enum OwnedIntermediateShardHeaderError {
324 #[error("Too many child shard blocks: {actual}")]
326 TooManyChildShardBlocks {
327 actual: usize,
329 },
330}
331
332#[derive(Debug, Clone)]
337pub struct OwnedIntermediateShardHeader {
338 inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
339}
340
341impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
342 const SHARD_KIND: RealShardKind = RealShardKind::IntermediateShard;
343
344 type Header<'a> = IntermediateShardHeader<'a>;
345
346 #[inline(always)]
347 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
348 Self::from_buffer(buffer)
349 }
350
351 #[inline(always)]
352 fn buffer(&self) -> &SharedAlignedBuffer {
353 self.buffer()
354 }
355
356 #[inline(always)]
357 fn ref_count(&self) -> usize {
358 self.ref_count()
359 }
360
361 #[inline(always)]
362 fn header(&self) -> &Self::Header<'_> {
363 self.header()
364 }
365}
366
367impl OwnedIntermediateShardHeader {
368 #[inline(always)]
370 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
371 BlockHeaderPrefix::SIZE
372 + BlockHeaderResult::SIZE
373 + BlockHeaderConsensusInfo::SIZE
374 + BlockHeaderBeaconChainInfo::SIZE
375 + (
376 u16::SIZE
378 + <[u8; 2]>::SIZE
380 + size_of_val(child_shard_blocks) as u32
381 )
382 + BlockHeaderSeal::MAX_SIZE
383 }
384
385 pub fn from_parts(
387 prefix: &BlockHeaderPrefix,
388 result: &BlockHeaderResult,
389 consensus_info: &BlockHeaderConsensusInfo,
390 beacon_chain_info: &BlockHeaderBeaconChainInfo,
391 child_shard_blocks: &[BlockRoot],
392 ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
393 let mut buffer =
394 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
395
396 Self::from_parts_into(
397 prefix,
398 result,
399 consensus_info,
400 beacon_chain_info,
401 child_shard_blocks,
402 &mut buffer,
403 )?;
404
405 Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
406 }
407
408 pub fn from_parts_into(
410 prefix: &BlockHeaderPrefix,
411 result: &BlockHeaderResult,
412 consensus_info: &BlockHeaderConsensusInfo,
413 beacon_chain_info: &BlockHeaderBeaconChainInfo,
414 child_shard_blocks: &[BlockRoot],
415 buffer: &mut OwnedAlignedBuffer,
416 ) -> Result<(), OwnedIntermediateShardHeaderError> {
417 let num_blocks = child_shard_blocks.len();
418 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
419 OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
420 })?;
421 let true = buffer.append(prefix.as_bytes()) else {
422 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
423 };
424 let true = buffer.append(result.as_bytes()) else {
425 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
426 };
427 let true = buffer.append(consensus_info.as_bytes()) else {
428 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
429 };
430 let true = buffer.append(beacon_chain_info.as_bytes()) else {
431 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
432 };
433 {
435 let true = buffer.append(&num_blocks.to_le_bytes()) else {
436 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
437 };
438 let true = buffer.append(&[0; 2]) else {
439 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
440 };
441 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
442 else {
443 unreachable!("Checked size above; qed");
444 };
445 }
446
447 Ok(())
448 }
449
450 #[inline]
452 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
453 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
456 let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
457 else {
458 return Err(());
459 };
460 if !extra_bytes.is_empty() {
461 return Err(());
462 }
463
464 Ok(header)
465 })
466 .map_err(move |()| buffer)?;
467
468 Ok(Self {
469 inner: Arc::new(inner),
470 })
471 }
472
473 #[inline(always)]
475 pub fn buffer(&self) -> &SharedAlignedBuffer {
476 self.inner.backing_cart()
477 }
478
479 #[inline(always)]
481 pub fn ref_count(&self) -> usize {
482 self.inner.strong_count()
483 }
484
485 #[inline(always)]
487 pub fn header(&self) -> &IntermediateShardHeader<'_> {
488 self.inner.get()
489 }
490}
491
492#[derive(Debug, Clone)]
494pub struct OwnedIntermediateShardHeaderUnsealed {
495 buffer: OwnedAlignedBuffer,
496}
497
498impl OwnedIntermediateShardHeaderUnsealed {
499 #[inline(always)]
501 pub fn pre_seal_hash(&self) -> Blake3Hash {
502 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
504 }
505
506 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
508 let Self { mut buffer } = self;
509 append_seal(&mut buffer, seal);
510
511 OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
515 .expect("Known to be created correctly; qed")
516 }
517}
518
519#[derive(Debug, Clone)]
524pub struct OwnedLeafShardHeader {
525 inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
526}
527
528impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
529 const SHARD_KIND: RealShardKind = RealShardKind::LeafShard;
530
531 type Header<'a> = LeafShardHeader<'a>;
532
533 #[inline(always)]
534 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
535 Self::from_buffer(buffer)
536 }
537
538 #[inline(always)]
539 fn buffer(&self) -> &SharedAlignedBuffer {
540 self.buffer()
541 }
542
543 #[inline(always)]
544 fn ref_count(&self) -> usize {
545 self.ref_count()
546 }
547
548 #[inline(always)]
549 fn header(&self) -> &Self::Header<'_> {
550 self.header()
551 }
552}
553
554impl OwnedLeafShardHeader {
555 pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
557 + BlockHeaderResult::SIZE
558 + BlockHeaderConsensusInfo::SIZE
559 + BlockHeaderBeaconChainInfo::SIZE
560 + BlockHeaderSeal::MAX_SIZE;
561
562 pub fn from_parts(
564 prefix: &BlockHeaderPrefix,
565 result: &BlockHeaderResult,
566 consensus_info: &BlockHeaderConsensusInfo,
567 beacon_chain_info: &BlockHeaderBeaconChainInfo,
568 ) -> OwnedLeafShardHeaderUnsealed {
569 let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
570
571 Self::from_parts_into(
572 prefix,
573 result,
574 consensus_info,
575 beacon_chain_info,
576 &mut buffer,
577 );
578
579 OwnedLeafShardHeaderUnsealed { buffer }
580 }
581
582 pub fn from_parts_into(
584 prefix: &BlockHeaderPrefix,
585 result: &BlockHeaderResult,
586 consensus_info: &BlockHeaderConsensusInfo,
587 beacon_chain_info: &BlockHeaderBeaconChainInfo,
588 buffer: &mut OwnedAlignedBuffer,
589 ) {
590 let true = buffer.append(prefix.as_bytes()) else {
591 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
592 };
593 let true = buffer.append(result.as_bytes()) else {
594 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
595 };
596 let true = buffer.append(consensus_info.as_bytes()) else {
597 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
598 };
599 let true = buffer.append(beacon_chain_info.as_bytes()) else {
600 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
601 };
602 }
603
604 #[inline]
606 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
607 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
610 let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
611 return Err(());
612 };
613 if !extra_bytes.is_empty() {
614 return Err(());
615 }
616
617 Ok(header)
618 })
619 .map_err(move |()| buffer)?;
620
621 Ok(Self {
622 inner: Arc::new(inner),
623 })
624 }
625
626 #[inline(always)]
628 pub fn buffer(&self) -> &SharedAlignedBuffer {
629 self.inner.backing_cart()
630 }
631
632 #[inline(always)]
634 pub fn ref_count(&self) -> usize {
635 self.inner.strong_count()
636 }
637
638 #[inline(always)]
640 pub fn header(&self) -> &LeafShardHeader<'_> {
641 self.inner.get()
642 }
643}
644
645#[derive(Debug, Clone)]
647pub struct OwnedLeafShardHeaderUnsealed {
648 buffer: OwnedAlignedBuffer,
649}
650
651impl OwnedLeafShardHeaderUnsealed {
652 #[inline(always)]
654 pub fn pre_seal_hash(&self) -> Blake3Hash {
655 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
657 }
658
659 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
661 let Self { mut buffer } = self;
662 append_seal(&mut buffer, seal);
663
664 OwnedLeafShardHeader::from_buffer(buffer.into_shared())
667 .expect("Known to be created correctly; qed")
668 }
669}
670
671#[derive(Debug, Clone, From)]
676pub enum OwnedBlockHeader {
677 BeaconChain(OwnedBeaconChainHeader),
679 IntermediateShard(OwnedIntermediateShardHeader),
681 LeafShard(OwnedLeafShardHeader),
683}
684
685impl OwnedBlockHeader {
686 #[inline]
688 pub fn from_buffer(
689 buffer: SharedAlignedBuffer,
690 shard_kind: RealShardKind,
691 ) -> Result<Self, SharedAlignedBuffer> {
692 Ok(match shard_kind {
693 RealShardKind::BeaconChain => {
694 Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
695 }
696 RealShardKind::IntermediateShard => {
697 Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
698 }
699 RealShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
700 })
701 }
702
703 #[inline]
705 pub fn buffer(&self) -> &SharedAlignedBuffer {
706 match self {
707 Self::BeaconChain(owned_header) => owned_header.buffer(),
708 Self::IntermediateShard(owned_header) => owned_header.buffer(),
709 Self::LeafShard(owned_header) => owned_header.buffer(),
710 }
711 }
712
713 #[inline]
715 pub fn ref_count(&self) -> usize {
716 match self {
717 Self::BeaconChain(owned_header) => owned_header.ref_count(),
718 Self::IntermediateShard(owned_header) => owned_header.ref_count(),
719 Self::LeafShard(owned_header) => owned_header.ref_count(),
720 }
721 }
722
723 #[inline]
725 pub fn header(&self) -> BlockHeader<'_> {
726 match self {
727 Self::BeaconChain(owned_header) => {
728 BlockHeader::BeaconChain(owned_header.header().clone())
729 }
730 Self::IntermediateShard(owned_header) => {
731 BlockHeader::IntermediateShard(owned_header.header().clone())
732 }
733 Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
734 }
735 }
736}