ab_core_primitives/block/header/
owned.rs1use crate::block::BlockRoot;
4use crate::block::header::{
5 BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6 BlockHeaderConsensusParameters, BlockHeaderPrefix, BlockHeaderResult, BlockHeaderSeal,
7 BlockHeaderSealType, GenericBlockHeader, IntermediateShardHeader, LeafShardHeader,
8};
9use crate::hashes::Blake3Hash;
10use crate::shard::ShardKind;
11use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
12use ab_io_type::trivial_type::TrivialType;
13use core::fmt;
14use derive_more::From;
15use rclite::Arc;
16use yoke::Yoke;
17
18pub trait GenericOwnedBlockHeader:
20 Clone + fmt::Debug + Send + Sync + Into<OwnedBlockHeader> + 'static
21{
22 const SHARD_KIND: ShardKind;
24
25 type Header<'a>: GenericBlockHeader<'a>
27 where
28 Self: 'a;
29
30 fn buffer(&self) -> &SharedAlignedBuffer;
32
33 fn ref_count(&self) -> usize;
35
36 fn header(&self) -> &Self::Header<'_>;
38}
39
40fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
41 match seal {
42 BlockHeaderSeal::Ed25519(seal) => {
43 let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
44 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
45 };
46 let true = buffer.append(seal.as_bytes()) else {
47 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
48 };
49 }
50 }
51}
52
53#[derive(Debug, thiserror::Error)]
55pub enum OwnedBeaconChainHeaderError {
56 #[error("Too many child shard blocks: {actual}")]
58 TooManyChildShardBlocks {
59 actual: usize,
61 },
62}
63
64#[derive(Debug, Clone)]
69pub struct OwnedBeaconChainHeader {
70 inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
71}
72
73impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
74 const SHARD_KIND: ShardKind = ShardKind::BeaconChain;
75
76 type Header<'a> = BeaconChainHeader<'a>;
77
78 #[inline(always)]
79 fn buffer(&self) -> &SharedAlignedBuffer {
80 self.buffer()
81 }
82
83 #[inline(always)]
84 fn ref_count(&self) -> usize {
85 self.ref_count()
86 }
87
88 #[inline(always)]
89 fn header(&self) -> &Self::Header<'_> {
90 self.header()
91 }
92}
93
94impl OwnedBeaconChainHeader {
95 #[inline(always)]
97 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
98 BlockHeaderPrefix::SIZE
99 + BlockHeaderResult::SIZE
100 + BlockHeaderConsensusInfo::SIZE
101 + (
102 u16::SIZE
104 + <[u8; 2]>::SIZE
106 + size_of_val(child_shard_blocks) as u32
107 )
108 + BlockHeaderConsensusParameters::MAX_SIZE
109 + BlockHeaderSeal::MAX_SIZE
110 }
111
112 pub fn from_parts(
114 prefix: &BlockHeaderPrefix,
115 result: &BlockHeaderResult,
116 consensus_info: &BlockHeaderConsensusInfo,
117 child_shard_blocks: &[BlockRoot],
118 consensus_parameters: BlockHeaderConsensusParameters<'_>,
119 ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
120 let mut buffer =
121 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
122
123 Self::from_parts_into(
124 prefix,
125 result,
126 consensus_info,
127 child_shard_blocks,
128 consensus_parameters,
129 &mut buffer,
130 )?;
131
132 Ok(OwnedBeaconChainHeaderUnsealed { buffer })
133 }
134
135 pub fn from_parts_into(
137 prefix: &BlockHeaderPrefix,
138 result: &BlockHeaderResult,
139 consensus_info: &BlockHeaderConsensusInfo,
140 child_shard_blocks: &[BlockRoot],
141 consensus_parameters: BlockHeaderConsensusParameters<'_>,
142 buffer: &mut OwnedAlignedBuffer,
143 ) -> Result<(), OwnedBeaconChainHeaderError> {
144 let num_blocks = child_shard_blocks.len();
145 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
146 OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
147 })?;
148 let true = buffer.append(prefix.as_bytes()) else {
149 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
150 };
151 let true = buffer.append(result.as_bytes()) else {
152 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
153 };
154 let true = buffer.append(consensus_info.as_bytes()) else {
155 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
156 };
157 {
159 let true = buffer.append(&num_blocks.to_le_bytes()) else {
160 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
161 };
162 let true = buffer.append(&[0; 2]) else {
163 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
164 };
165 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
166 else {
167 unreachable!("Checked size above; qed");
168 };
169 }
170 {
173 let true = buffer.append(
174 &consensus_parameters
175 .fixed_parameters
176 .solution_range
177 .to_bytes(),
178 ) else {
179 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
180 };
181 let true = buffer.append(
182 &consensus_parameters
183 .fixed_parameters
184 .slot_iterations
185 .get()
186 .to_le_bytes(),
187 ) else {
188 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
189 };
190
191 let bitflags = {
192 let mut bitflags = 0u8;
193
194 if consensus_parameters.super_segment_root.is_some() {
195 bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
196 }
197 if consensus_parameters.next_solution_range.is_some() {
198 bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
199 }
200 if consensus_parameters.pot_parameters_change.is_some() {
201 bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
202 }
203
204 bitflags
205 };
206
207 let true = buffer.append(&[bitflags]) else {
208 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
209 };
210
211 if let Some(super_segment_root) = consensus_parameters.super_segment_root {
212 let true = buffer.append(super_segment_root.as_ref()) else {
213 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
214 };
215 }
216
217 if let Some(next_solution_range) = consensus_parameters.next_solution_range {
218 let true = buffer.append(&next_solution_range.to_bytes()) else {
219 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
220 };
221 }
222
223 if let Some(pot_parameters_change) = consensus_parameters.pot_parameters_change {
224 let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
225 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
226 };
227 let true =
228 buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
229 else {
230 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
231 };
232 let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
233 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
234 };
235 }
236 }
237
238 Ok(())
239 }
240
241 #[inline]
243 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
244 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
247 let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
248 return Err(());
249 };
250 if !extra_bytes.is_empty() {
251 return Err(());
252 }
253
254 Ok(header)
255 })
256 .map_err(move |()| buffer)?;
257
258 Ok(Self {
259 inner: Arc::new(inner),
260 })
261 }
262
263 #[inline(always)]
265 pub fn buffer(&self) -> &SharedAlignedBuffer {
266 self.inner.backing_cart()
267 }
268
269 #[inline(always)]
271 pub fn ref_count(&self) -> usize {
272 self.inner.strong_count()
273 }
274
275 #[inline(always)]
277 pub fn header(&self) -> &BeaconChainHeader<'_> {
278 self.inner.get()
279 }
280}
281
282#[derive(Debug, Clone)]
284pub struct OwnedBeaconChainHeaderUnsealed {
285 buffer: OwnedAlignedBuffer,
286}
287
288impl OwnedBeaconChainHeaderUnsealed {
289 #[inline(always)]
291 pub fn pre_seal_hash(&self) -> Blake3Hash {
292 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
294 }
295
296 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
298 let Self { mut buffer } = self;
299 append_seal(&mut buffer, seal);
300
301 OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
304 .expect("Known to be created correctly; qed")
305 }
306}
307
308#[derive(Debug, thiserror::Error)]
310pub enum OwnedIntermediateShardHeaderError {
311 #[error("Too many child shard blocks: {actual}")]
313 TooManyChildShardBlocks {
314 actual: usize,
316 },
317}
318
319#[derive(Debug, Clone)]
324pub struct OwnedIntermediateShardHeader {
325 inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
326}
327
328impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
329 const SHARD_KIND: ShardKind = ShardKind::IntermediateShard;
330
331 type Header<'a> = IntermediateShardHeader<'a>;
332
333 #[inline(always)]
334 fn buffer(&self) -> &SharedAlignedBuffer {
335 self.buffer()
336 }
337
338 #[inline(always)]
339 fn ref_count(&self) -> usize {
340 self.ref_count()
341 }
342
343 #[inline(always)]
344 fn header(&self) -> &Self::Header<'_> {
345 self.header()
346 }
347}
348
349impl OwnedIntermediateShardHeader {
350 #[inline(always)]
352 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
353 BlockHeaderPrefix::SIZE
354 + BlockHeaderResult::SIZE
355 + BlockHeaderConsensusInfo::SIZE
356 + BlockHeaderBeaconChainInfo::SIZE
357 + (
358 u16::SIZE
360 + <[u8; 2]>::SIZE
362 + size_of_val(child_shard_blocks) as u32
363 )
364 + BlockHeaderSeal::MAX_SIZE
365 }
366
367 pub fn from_parts(
369 prefix: &BlockHeaderPrefix,
370 result: &BlockHeaderResult,
371 consensus_info: &BlockHeaderConsensusInfo,
372 beacon_chain_info: &BlockHeaderBeaconChainInfo,
373 child_shard_blocks: &[BlockRoot],
374 ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
375 let mut buffer =
376 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
377
378 Self::from_parts_into(
379 prefix,
380 result,
381 consensus_info,
382 beacon_chain_info,
383 child_shard_blocks,
384 &mut buffer,
385 )?;
386
387 Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
388 }
389
390 pub fn from_parts_into(
392 prefix: &BlockHeaderPrefix,
393 result: &BlockHeaderResult,
394 consensus_info: &BlockHeaderConsensusInfo,
395 beacon_chain_info: &BlockHeaderBeaconChainInfo,
396 child_shard_blocks: &[BlockRoot],
397 buffer: &mut OwnedAlignedBuffer,
398 ) -> Result<(), OwnedIntermediateShardHeaderError> {
399 let num_blocks = child_shard_blocks.len();
400 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
401 OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
402 })?;
403 let true = buffer.append(prefix.as_bytes()) else {
404 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
405 };
406 let true = buffer.append(result.as_bytes()) else {
407 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
408 };
409 let true = buffer.append(consensus_info.as_bytes()) else {
410 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
411 };
412 let true = buffer.append(beacon_chain_info.as_bytes()) else {
413 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
414 };
415 {
417 let true = buffer.append(&num_blocks.to_le_bytes()) else {
418 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
419 };
420 let true = buffer.append(&[0; 2]) else {
421 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
422 };
423 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
424 else {
425 unreachable!("Checked size above; qed");
426 };
427 }
428
429 Ok(())
430 }
431
432 #[inline]
434 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
435 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
438 let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
439 else {
440 return Err(());
441 };
442 if !extra_bytes.is_empty() {
443 return Err(());
444 }
445
446 Ok(header)
447 })
448 .map_err(move |()| buffer)?;
449
450 Ok(Self {
451 inner: Arc::new(inner),
452 })
453 }
454
455 #[inline(always)]
457 pub fn buffer(&self) -> &SharedAlignedBuffer {
458 self.inner.backing_cart()
459 }
460
461 #[inline(always)]
463 pub fn ref_count(&self) -> usize {
464 self.inner.strong_count()
465 }
466
467 #[inline(always)]
469 pub fn header(&self) -> &IntermediateShardHeader<'_> {
470 self.inner.get()
471 }
472}
473
474#[derive(Debug, Clone)]
476pub struct OwnedIntermediateShardHeaderUnsealed {
477 buffer: OwnedAlignedBuffer,
478}
479
480impl OwnedIntermediateShardHeaderUnsealed {
481 #[inline(always)]
483 pub fn pre_seal_hash(&self) -> Blake3Hash {
484 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
486 }
487
488 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
490 let Self { mut buffer } = self;
491 append_seal(&mut buffer, seal);
492
493 OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
497 .expect("Known to be created correctly; qed")
498 }
499}
500
501#[derive(Debug, Clone)]
506pub struct OwnedLeafShardHeader {
507 inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
508}
509
510impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
511 const SHARD_KIND: ShardKind = ShardKind::LeafShard;
512
513 type Header<'a> = LeafShardHeader<'a>;
514
515 #[inline(always)]
516 fn buffer(&self) -> &SharedAlignedBuffer {
517 self.buffer()
518 }
519
520 #[inline(always)]
521 fn ref_count(&self) -> usize {
522 self.ref_count()
523 }
524
525 #[inline(always)]
526 fn header(&self) -> &Self::Header<'_> {
527 self.header()
528 }
529}
530
531impl OwnedLeafShardHeader {
532 pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
534 + BlockHeaderResult::SIZE
535 + BlockHeaderConsensusInfo::SIZE
536 + BlockHeaderBeaconChainInfo::SIZE
537 + BlockHeaderSeal::MAX_SIZE;
538
539 pub fn from_parts(
541 prefix: &BlockHeaderPrefix,
542 result: &BlockHeaderResult,
543 consensus_info: &BlockHeaderConsensusInfo,
544 beacon_chain_info: &BlockHeaderBeaconChainInfo,
545 ) -> OwnedLeafShardHeaderUnsealed {
546 let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
547
548 Self::from_parts_into(
549 prefix,
550 result,
551 consensus_info,
552 beacon_chain_info,
553 &mut buffer,
554 );
555
556 OwnedLeafShardHeaderUnsealed { buffer }
557 }
558
559 pub fn from_parts_into(
561 prefix: &BlockHeaderPrefix,
562 result: &BlockHeaderResult,
563 consensus_info: &BlockHeaderConsensusInfo,
564 beacon_chain_info: &BlockHeaderBeaconChainInfo,
565 buffer: &mut OwnedAlignedBuffer,
566 ) {
567 let true = buffer.append(prefix.as_bytes()) else {
568 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
569 };
570 let true = buffer.append(result.as_bytes()) else {
571 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
572 };
573 let true = buffer.append(consensus_info.as_bytes()) else {
574 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
575 };
576 let true = buffer.append(beacon_chain_info.as_bytes()) else {
577 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
578 };
579 }
580
581 #[inline]
583 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
584 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
587 let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
588 return Err(());
589 };
590 if !extra_bytes.is_empty() {
591 return Err(());
592 }
593
594 Ok(header)
595 })
596 .map_err(move |()| buffer)?;
597
598 Ok(Self {
599 inner: Arc::new(inner),
600 })
601 }
602
603 #[inline(always)]
605 pub fn buffer(&self) -> &SharedAlignedBuffer {
606 self.inner.backing_cart()
607 }
608
609 #[inline(always)]
611 pub fn ref_count(&self) -> usize {
612 self.inner.strong_count()
613 }
614
615 #[inline(always)]
617 pub fn header(&self) -> &LeafShardHeader<'_> {
618 self.inner.get()
619 }
620}
621
622#[derive(Debug, Clone)]
624pub struct OwnedLeafShardHeaderUnsealed {
625 buffer: OwnedAlignedBuffer,
626}
627
628impl OwnedLeafShardHeaderUnsealed {
629 #[inline(always)]
631 pub fn pre_seal_hash(&self) -> Blake3Hash {
632 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
634 }
635
636 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
638 let Self { mut buffer } = self;
639 append_seal(&mut buffer, seal);
640
641 OwnedLeafShardHeader::from_buffer(buffer.into_shared())
644 .expect("Known to be created correctly; qed")
645 }
646}
647
648#[derive(Debug, Clone, From)]
653pub enum OwnedBlockHeader {
654 BeaconChain(OwnedBeaconChainHeader),
656 IntermediateShard(OwnedIntermediateShardHeader),
658 LeafShard(OwnedLeafShardHeader),
660}
661
662impl OwnedBlockHeader {
663 #[inline]
665 pub fn from_buffer(
666 buffer: SharedAlignedBuffer,
667 shard_kind: ShardKind,
668 ) -> Result<Self, SharedAlignedBuffer> {
669 Ok(match shard_kind {
670 ShardKind::BeaconChain => {
671 Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
672 }
673 ShardKind::IntermediateShard => {
674 Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
675 }
676 ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
677 ShardKind::Phantom | ShardKind::Invalid => {
678 return Err(buffer);
680 }
681 })
682 }
683
684 #[inline]
686 pub fn buffer(&self) -> &SharedAlignedBuffer {
687 match self {
688 Self::BeaconChain(owned_header) => owned_header.buffer(),
689 Self::IntermediateShard(owned_header) => owned_header.buffer(),
690 Self::LeafShard(owned_header) => owned_header.buffer(),
691 }
692 }
693
694 #[inline]
696 pub fn ref_count(&self) -> usize {
697 match self {
698 Self::BeaconChain(owned_header) => owned_header.ref_count(),
699 Self::IntermediateShard(owned_header) => owned_header.ref_count(),
700 Self::LeafShard(owned_header) => owned_header.ref_count(),
701 }
702 }
703
704 #[inline]
706 pub fn header(&self) -> BlockHeader<'_> {
707 match self {
708 Self::BeaconChain(owned_header) => {
709 BlockHeader::BeaconChain(owned_header.header().clone())
710 }
711 Self::IntermediateShard(owned_header) => {
712 BlockHeader::IntermediateShard(owned_header.header().clone())
713 }
714 Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
715 }
716 }
717}