ab_core_primitives/block/header/
owned.rs1use crate::block::BlockRoot;
4use crate::block::header::{
5 BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6 BlockHeaderConsensusParameters, BlockHeaderPrefix, BlockHeaderResult, BlockHeaderSeal,
7 BlockHeaderSealType, GenericBlockHeader, IntermediateShardHeader, LeafShardHeader,
8};
9use crate::hashes::Blake3Hash;
10use crate::shard::ShardKind;
11use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
12use ab_io_type::trivial_type::TrivialType;
13use core::fmt;
14use derive_more::From;
15use rclite::Arc;
16use yoke::Yoke;
17
18pub trait GenericOwnedBlockHeader:
20 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockHeader> + 'static
21{
22 const SHARD_KIND: ShardKind;
24
25 type Header<'a>: GenericBlockHeader<'a>
27 where
28 Self: 'a;
29
30 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer>;
32
33 fn buffer(&self) -> &SharedAlignedBuffer;
35
36 fn ref_count(&self) -> usize;
38
39 fn header(&self) -> &Self::Header<'_>;
41}
42
43fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
44 match seal {
45 BlockHeaderSeal::Ed25519(seal) => {
46 let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
47 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
48 };
49 let true = buffer.append(seal.as_bytes()) else {
50 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
51 };
52 }
53 }
54}
55
56#[derive(Debug, thiserror::Error)]
58pub enum OwnedBeaconChainHeaderError {
59 #[error("Too many child shard blocks: {actual}")]
61 TooManyChildShardBlocks {
62 actual: usize,
64 },
65}
66
67#[derive(Debug, Clone)]
72pub struct OwnedBeaconChainHeader {
73 inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
74}
75
76impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
77 const SHARD_KIND: ShardKind = ShardKind::BeaconChain;
78
79 type Header<'a> = BeaconChainHeader<'a>;
80
81 #[inline(always)]
82 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
83 Self::from_buffer(buffer)
84 }
85
86 #[inline(always)]
87 fn buffer(&self) -> &SharedAlignedBuffer {
88 self.buffer()
89 }
90
91 #[inline(always)]
92 fn ref_count(&self) -> usize {
93 self.ref_count()
94 }
95
96 #[inline(always)]
97 fn header(&self) -> &Self::Header<'_> {
98 self.header()
99 }
100}
101
102impl OwnedBeaconChainHeader {
103 #[inline(always)]
105 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
106 BlockHeaderPrefix::SIZE
107 + BlockHeaderResult::SIZE
108 + BlockHeaderConsensusInfo::SIZE
109 + (
110 u16::SIZE
112 + <[u8; 2]>::SIZE
114 + size_of_val(child_shard_blocks) as u32
115 )
116 + BlockHeaderConsensusParameters::MAX_SIZE
117 + BlockHeaderSeal::MAX_SIZE
118 }
119
120 pub fn from_parts(
122 prefix: &BlockHeaderPrefix,
123 result: &BlockHeaderResult,
124 consensus_info: &BlockHeaderConsensusInfo,
125 child_shard_blocks: &[BlockRoot],
126 consensus_parameters: BlockHeaderConsensusParameters<'_>,
127 ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
128 let mut buffer =
129 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
130
131 Self::from_parts_into(
132 prefix,
133 result,
134 consensus_info,
135 child_shard_blocks,
136 consensus_parameters,
137 &mut buffer,
138 )?;
139
140 Ok(OwnedBeaconChainHeaderUnsealed { buffer })
141 }
142
143 pub fn from_parts_into(
145 prefix: &BlockHeaderPrefix,
146 result: &BlockHeaderResult,
147 consensus_info: &BlockHeaderConsensusInfo,
148 child_shard_blocks: &[BlockRoot],
149 consensus_parameters: BlockHeaderConsensusParameters<'_>,
150 buffer: &mut OwnedAlignedBuffer,
151 ) -> Result<(), OwnedBeaconChainHeaderError> {
152 let num_blocks = child_shard_blocks.len();
153 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
154 OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
155 })?;
156 let true = buffer.append(prefix.as_bytes()) else {
157 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
158 };
159 let true = buffer.append(result.as_bytes()) else {
160 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
161 };
162 let true = buffer.append(consensus_info.as_bytes()) else {
163 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
164 };
165 {
167 let true = buffer.append(&num_blocks.to_le_bytes()) else {
168 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
169 };
170 let true = buffer.append(&[0; 2]) else {
171 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
172 };
173 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
174 else {
175 unreachable!("Checked size above; qed");
176 };
177 }
178 {
181 let true = buffer.append(
182 &consensus_parameters
183 .fixed_parameters
184 .solution_range
185 .to_bytes(),
186 ) else {
187 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
188 };
189 let true = buffer.append(
190 &consensus_parameters
191 .fixed_parameters
192 .slot_iterations
193 .get()
194 .to_le_bytes(),
195 ) else {
196 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
197 };
198
199 let bitflags = {
200 let mut bitflags = 0u8;
201
202 if consensus_parameters.super_segment_root.is_some() {
203 bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
204 }
205 if consensus_parameters.next_solution_range.is_some() {
206 bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
207 }
208 if consensus_parameters.pot_parameters_change.is_some() {
209 bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
210 }
211
212 bitflags
213 };
214
215 let true = buffer.append(&[bitflags]) else {
216 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
217 };
218
219 if let Some(super_segment_root) = consensus_parameters.super_segment_root {
220 let true = buffer.append(super_segment_root.as_ref()) else {
221 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
222 };
223 }
224
225 if let Some(next_solution_range) = consensus_parameters.next_solution_range {
226 let true = buffer.append(&next_solution_range.to_bytes()) else {
227 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
228 };
229 }
230
231 if let Some(pot_parameters_change) = consensus_parameters.pot_parameters_change {
232 let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
233 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
234 };
235 let true =
236 buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
237 else {
238 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
239 };
240 let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
241 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
242 };
243 }
244 }
245
246 Ok(())
247 }
248
249 #[inline]
251 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
252 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
255 let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
256 return Err(());
257 };
258 if !extra_bytes.is_empty() {
259 return Err(());
260 }
261
262 Ok(header)
263 })
264 .map_err(move |()| buffer)?;
265
266 Ok(Self {
267 inner: Arc::new(inner),
268 })
269 }
270
271 #[inline(always)]
273 pub fn buffer(&self) -> &SharedAlignedBuffer {
274 self.inner.backing_cart()
275 }
276
277 #[inline(always)]
279 pub fn ref_count(&self) -> usize {
280 self.inner.strong_count()
281 }
282
283 #[inline(always)]
285 pub fn header(&self) -> &BeaconChainHeader<'_> {
286 self.inner.get()
287 }
288}
289
290#[derive(Debug, Clone)]
292pub struct OwnedBeaconChainHeaderUnsealed {
293 buffer: OwnedAlignedBuffer,
294}
295
296impl OwnedBeaconChainHeaderUnsealed {
297 #[inline(always)]
299 pub fn pre_seal_hash(&self) -> Blake3Hash {
300 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
302 }
303
304 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
306 let Self { mut buffer } = self;
307 append_seal(&mut buffer, seal);
308
309 OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
312 .expect("Known to be created correctly; qed")
313 }
314}
315
316#[derive(Debug, thiserror::Error)]
318pub enum OwnedIntermediateShardHeaderError {
319 #[error("Too many child shard blocks: {actual}")]
321 TooManyChildShardBlocks {
322 actual: usize,
324 },
325}
326
327#[derive(Debug, Clone)]
332pub struct OwnedIntermediateShardHeader {
333 inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
334}
335
336impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
337 const SHARD_KIND: ShardKind = ShardKind::IntermediateShard;
338
339 type Header<'a> = IntermediateShardHeader<'a>;
340
341 #[inline(always)]
342 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
343 Self::from_buffer(buffer)
344 }
345
346 #[inline(always)]
347 fn buffer(&self) -> &SharedAlignedBuffer {
348 self.buffer()
349 }
350
351 #[inline(always)]
352 fn ref_count(&self) -> usize {
353 self.ref_count()
354 }
355
356 #[inline(always)]
357 fn header(&self) -> &Self::Header<'_> {
358 self.header()
359 }
360}
361
362impl OwnedIntermediateShardHeader {
363 #[inline(always)]
365 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
366 BlockHeaderPrefix::SIZE
367 + BlockHeaderResult::SIZE
368 + BlockHeaderConsensusInfo::SIZE
369 + BlockHeaderBeaconChainInfo::SIZE
370 + (
371 u16::SIZE
373 + <[u8; 2]>::SIZE
375 + size_of_val(child_shard_blocks) as u32
376 )
377 + BlockHeaderSeal::MAX_SIZE
378 }
379
380 pub fn from_parts(
382 prefix: &BlockHeaderPrefix,
383 result: &BlockHeaderResult,
384 consensus_info: &BlockHeaderConsensusInfo,
385 beacon_chain_info: &BlockHeaderBeaconChainInfo,
386 child_shard_blocks: &[BlockRoot],
387 ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
388 let mut buffer =
389 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
390
391 Self::from_parts_into(
392 prefix,
393 result,
394 consensus_info,
395 beacon_chain_info,
396 child_shard_blocks,
397 &mut buffer,
398 )?;
399
400 Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
401 }
402
403 pub fn from_parts_into(
405 prefix: &BlockHeaderPrefix,
406 result: &BlockHeaderResult,
407 consensus_info: &BlockHeaderConsensusInfo,
408 beacon_chain_info: &BlockHeaderBeaconChainInfo,
409 child_shard_blocks: &[BlockRoot],
410 buffer: &mut OwnedAlignedBuffer,
411 ) -> Result<(), OwnedIntermediateShardHeaderError> {
412 let num_blocks = child_shard_blocks.len();
413 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
414 OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
415 })?;
416 let true = buffer.append(prefix.as_bytes()) else {
417 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
418 };
419 let true = buffer.append(result.as_bytes()) else {
420 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
421 };
422 let true = buffer.append(consensus_info.as_bytes()) else {
423 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
424 };
425 let true = buffer.append(beacon_chain_info.as_bytes()) else {
426 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
427 };
428 {
430 let true = buffer.append(&num_blocks.to_le_bytes()) else {
431 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
432 };
433 let true = buffer.append(&[0; 2]) else {
434 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
435 };
436 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
437 else {
438 unreachable!("Checked size above; qed");
439 };
440 }
441
442 Ok(())
443 }
444
445 #[inline]
447 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
448 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
451 let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
452 else {
453 return Err(());
454 };
455 if !extra_bytes.is_empty() {
456 return Err(());
457 }
458
459 Ok(header)
460 })
461 .map_err(move |()| buffer)?;
462
463 Ok(Self {
464 inner: Arc::new(inner),
465 })
466 }
467
468 #[inline(always)]
470 pub fn buffer(&self) -> &SharedAlignedBuffer {
471 self.inner.backing_cart()
472 }
473
474 #[inline(always)]
476 pub fn ref_count(&self) -> usize {
477 self.inner.strong_count()
478 }
479
480 #[inline(always)]
482 pub fn header(&self) -> &IntermediateShardHeader<'_> {
483 self.inner.get()
484 }
485}
486
487#[derive(Debug, Clone)]
489pub struct OwnedIntermediateShardHeaderUnsealed {
490 buffer: OwnedAlignedBuffer,
491}
492
493impl OwnedIntermediateShardHeaderUnsealed {
494 #[inline(always)]
496 pub fn pre_seal_hash(&self) -> Blake3Hash {
497 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
499 }
500
501 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
503 let Self { mut buffer } = self;
504 append_seal(&mut buffer, seal);
505
506 OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
510 .expect("Known to be created correctly; qed")
511 }
512}
513
514#[derive(Debug, Clone)]
519pub struct OwnedLeafShardHeader {
520 inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
521}
522
523impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
524 const SHARD_KIND: ShardKind = ShardKind::LeafShard;
525
526 type Header<'a> = LeafShardHeader<'a>;
527
528 #[inline(always)]
529 fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
530 Self::from_buffer(buffer)
531 }
532
533 #[inline(always)]
534 fn buffer(&self) -> &SharedAlignedBuffer {
535 self.buffer()
536 }
537
538 #[inline(always)]
539 fn ref_count(&self) -> usize {
540 self.ref_count()
541 }
542
543 #[inline(always)]
544 fn header(&self) -> &Self::Header<'_> {
545 self.header()
546 }
547}
548
549impl OwnedLeafShardHeader {
550 pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
552 + BlockHeaderResult::SIZE
553 + BlockHeaderConsensusInfo::SIZE
554 + BlockHeaderBeaconChainInfo::SIZE
555 + BlockHeaderSeal::MAX_SIZE;
556
557 pub fn from_parts(
559 prefix: &BlockHeaderPrefix,
560 result: &BlockHeaderResult,
561 consensus_info: &BlockHeaderConsensusInfo,
562 beacon_chain_info: &BlockHeaderBeaconChainInfo,
563 ) -> OwnedLeafShardHeaderUnsealed {
564 let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
565
566 Self::from_parts_into(
567 prefix,
568 result,
569 consensus_info,
570 beacon_chain_info,
571 &mut buffer,
572 );
573
574 OwnedLeafShardHeaderUnsealed { buffer }
575 }
576
577 pub fn from_parts_into(
579 prefix: &BlockHeaderPrefix,
580 result: &BlockHeaderResult,
581 consensus_info: &BlockHeaderConsensusInfo,
582 beacon_chain_info: &BlockHeaderBeaconChainInfo,
583 buffer: &mut OwnedAlignedBuffer,
584 ) {
585 let true = buffer.append(prefix.as_bytes()) else {
586 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
587 };
588 let true = buffer.append(result.as_bytes()) else {
589 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
590 };
591 let true = buffer.append(consensus_info.as_bytes()) else {
592 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
593 };
594 let true = buffer.append(beacon_chain_info.as_bytes()) else {
595 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
596 };
597 }
598
599 #[inline]
601 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
602 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
605 let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
606 return Err(());
607 };
608 if !extra_bytes.is_empty() {
609 return Err(());
610 }
611
612 Ok(header)
613 })
614 .map_err(move |()| buffer)?;
615
616 Ok(Self {
617 inner: Arc::new(inner),
618 })
619 }
620
621 #[inline(always)]
623 pub fn buffer(&self) -> &SharedAlignedBuffer {
624 self.inner.backing_cart()
625 }
626
627 #[inline(always)]
629 pub fn ref_count(&self) -> usize {
630 self.inner.strong_count()
631 }
632
633 #[inline(always)]
635 pub fn header(&self) -> &LeafShardHeader<'_> {
636 self.inner.get()
637 }
638}
639
640#[derive(Debug, Clone)]
642pub struct OwnedLeafShardHeaderUnsealed {
643 buffer: OwnedAlignedBuffer,
644}
645
646impl OwnedLeafShardHeaderUnsealed {
647 #[inline(always)]
649 pub fn pre_seal_hash(&self) -> Blake3Hash {
650 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
652 }
653
654 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
656 let Self { mut buffer } = self;
657 append_seal(&mut buffer, seal);
658
659 OwnedLeafShardHeader::from_buffer(buffer.into_shared())
662 .expect("Known to be created correctly; qed")
663 }
664}
665
666#[derive(Debug, Clone, From)]
671pub enum OwnedBlockHeader {
672 BeaconChain(OwnedBeaconChainHeader),
674 IntermediateShard(OwnedIntermediateShardHeader),
676 LeafShard(OwnedLeafShardHeader),
678}
679
680impl OwnedBlockHeader {
681 #[inline]
683 pub fn from_buffer(
684 buffer: SharedAlignedBuffer,
685 shard_kind: ShardKind,
686 ) -> Result<Self, SharedAlignedBuffer> {
687 Ok(match shard_kind {
688 ShardKind::BeaconChain => {
689 Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
690 }
691 ShardKind::IntermediateShard => {
692 Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
693 }
694 ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
695 ShardKind::Phantom | ShardKind::Invalid => {
696 return Err(buffer);
698 }
699 })
700 }
701
702 #[inline]
704 pub fn buffer(&self) -> &SharedAlignedBuffer {
705 match self {
706 Self::BeaconChain(owned_header) => owned_header.buffer(),
707 Self::IntermediateShard(owned_header) => owned_header.buffer(),
708 Self::LeafShard(owned_header) => owned_header.buffer(),
709 }
710 }
711
712 #[inline]
714 pub fn ref_count(&self) -> usize {
715 match self {
716 Self::BeaconChain(owned_header) => owned_header.ref_count(),
717 Self::IntermediateShard(owned_header) => owned_header.ref_count(),
718 Self::LeafShard(owned_header) => owned_header.ref_count(),
719 }
720 }
721
722 #[inline]
724 pub fn header(&self) -> BlockHeader<'_> {
725 match self {
726 Self::BeaconChain(owned_header) => {
727 BlockHeader::BeaconChain(owned_header.header().clone())
728 }
729 Self::IntermediateShard(owned_header) => {
730 BlockHeader::IntermediateShard(owned_header.header().clone())
731 }
732 Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
733 }
734 }
735}