ab_core_primitives/block/header/
owned.rs

1//! Data structures related to the owned version of [`BlockHeader`]
2
3use crate::block::BlockRoot;
4use crate::block::header::{
5    BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6    BlockHeaderConsensusParameters, BlockHeaderPrefix, BlockHeaderResult, BlockHeaderSeal,
7    BlockHeaderSealType, GenericBlockHeader, IntermediateShardHeader, LeafShardHeader,
8};
9use crate::hashes::Blake3Hash;
10use crate::shard::ShardKind;
11use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
12use ab_io_type::trivial_type::TrivialType;
13use core::fmt;
14use derive_more::From;
15use rclite::Arc;
16use yoke::Yoke;
17
18/// Generic owned block header
19pub trait GenericOwnedBlockHeader:
20    Clone + fmt::Debug + Send + Sync + Into<OwnedBlockHeader> + 'static
21{
22    /// Shard kind
23    const SHARD_KIND: ShardKind;
24
25    /// Block header
26    type Header<'a>: GenericBlockHeader<'a>
27    where
28        Self: 'a;
29
30    /// Create an owned header from a buffer
31    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer>;
32
33    /// Inner buffer with block header contents
34    fn buffer(&self) -> &SharedAlignedBuffer;
35
36    /// Number of clones in memory
37    fn ref_count(&self) -> usize;
38
39    /// Get a regular block header out of the owned version
40    fn header(&self) -> &Self::Header<'_>;
41}
42
43fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
44    match seal {
45        BlockHeaderSeal::Ed25519(seal) => {
46            let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
47                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
48            };
49            let true = buffer.append(seal.as_bytes()) else {
50                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
51            };
52        }
53    }
54}
55
56/// Errors for [`OwnedBeaconChainHeader`]
57#[derive(Debug, thiserror::Error)]
58pub enum OwnedBeaconChainHeaderError {
59    /// Too many child shard blocks
60    #[error("Too many child shard blocks: {actual}")]
61    TooManyChildShardBlocks {
62        /// Actual number of child shard blocks
63        actual: usize,
64    },
65}
66
67/// An owned version of [`BeaconChainHeader`].
68///
69/// It is correctly aligned in memory and well suited for sending and receiving over the network
70/// efficiently or storing in memory or on disk.
71#[derive(Debug, Clone)]
72pub struct OwnedBeaconChainHeader {
73    inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
74}
75
76impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
77    const SHARD_KIND: ShardKind = ShardKind::BeaconChain;
78
79    type Header<'a> = BeaconChainHeader<'a>;
80
81    #[inline(always)]
82    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
83        Self::from_buffer(buffer)
84    }
85
86    #[inline(always)]
87    fn buffer(&self) -> &SharedAlignedBuffer {
88        self.buffer()
89    }
90
91    #[inline(always)]
92    fn ref_count(&self) -> usize {
93        self.ref_count()
94    }
95
96    #[inline(always)]
97    fn header(&self) -> &Self::Header<'_> {
98        self.header()
99    }
100}
101
102impl OwnedBeaconChainHeader {
103    /// Max allocation needed by this header
104    #[inline(always)]
105    pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
106        BlockHeaderPrefix::SIZE
107            + BlockHeaderResult::SIZE
108            + BlockHeaderConsensusInfo::SIZE
109            + (
110                // Number of child shard blocks
111                u16::SIZE
112                // Padding
113                + <[u8; 2]>::SIZE
114                + size_of_val(child_shard_blocks) as u32
115            )
116            + BlockHeaderConsensusParameters::MAX_SIZE
117            + BlockHeaderSeal::MAX_SIZE
118    }
119
120    /// Create a new [`OwnedBeaconChainHeader`] from its parts
121    pub fn from_parts(
122        prefix: &BlockHeaderPrefix,
123        result: &BlockHeaderResult,
124        consensus_info: &BlockHeaderConsensusInfo,
125        child_shard_blocks: &[BlockRoot],
126        consensus_parameters: BlockHeaderConsensusParameters<'_>,
127    ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
128        let mut buffer =
129            OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
130
131        Self::from_parts_into(
132            prefix,
133            result,
134            consensus_info,
135            child_shard_blocks,
136            consensus_parameters,
137            &mut buffer,
138        )?;
139
140        Ok(OwnedBeaconChainHeaderUnsealed { buffer })
141    }
142
143    /// Create an owned header from its parts and write it into the provided buffer
144    pub fn from_parts_into(
145        prefix: &BlockHeaderPrefix,
146        result: &BlockHeaderResult,
147        consensus_info: &BlockHeaderConsensusInfo,
148        child_shard_blocks: &[BlockRoot],
149        consensus_parameters: BlockHeaderConsensusParameters<'_>,
150        buffer: &mut OwnedAlignedBuffer,
151    ) -> Result<(), OwnedBeaconChainHeaderError> {
152        let num_blocks = child_shard_blocks.len();
153        let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
154            OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
155        })?;
156        let true = buffer.append(prefix.as_bytes()) else {
157            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
158        };
159        let true = buffer.append(result.as_bytes()) else {
160            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
161        };
162        let true = buffer.append(consensus_info.as_bytes()) else {
163            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
164        };
165        // TODO: Would be nice for `BlockHeaderChildShardBlocks` to have API to write this by itself
166        {
167            let true = buffer.append(&num_blocks.to_le_bytes()) else {
168                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
169            };
170            let true = buffer.append(&[0; 2]) else {
171                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
172            };
173            let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
174            else {
175                unreachable!("Checked size above; qed");
176            };
177        }
178        // TODO: Would be nice for `BlockHeaderBeaconChainParameters` to have API to write this by
179        //  itself
180        {
181            let true = buffer.append(
182                &consensus_parameters
183                    .fixed_parameters
184                    .solution_range
185                    .to_bytes(),
186            ) else {
187                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
188            };
189            let true = buffer.append(
190                &consensus_parameters
191                    .fixed_parameters
192                    .slot_iterations
193                    .get()
194                    .to_le_bytes(),
195            ) else {
196                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
197            };
198
199            let bitflags = {
200                let mut bitflags = 0u8;
201
202                if consensus_parameters.super_segment_root.is_some() {
203                    bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
204                }
205                if consensus_parameters.next_solution_range.is_some() {
206                    bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
207                }
208                if consensus_parameters.pot_parameters_change.is_some() {
209                    bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
210                }
211
212                bitflags
213            };
214
215            let true = buffer.append(&[bitflags]) else {
216                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
217            };
218
219            if let Some(super_segment_root) = consensus_parameters.super_segment_root {
220                let true = buffer.append(super_segment_root.as_ref()) else {
221                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
222                };
223            }
224
225            if let Some(next_solution_range) = consensus_parameters.next_solution_range {
226                let true = buffer.append(&next_solution_range.to_bytes()) else {
227                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
228                };
229            }
230
231            if let Some(pot_parameters_change) = consensus_parameters.pot_parameters_change {
232                let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
233                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
234                };
235                let true =
236                    buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
237                else {
238                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
239                };
240                let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
241                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
242                };
243            }
244        }
245
246        Ok(())
247    }
248
249    /// Create an owned header from a buffer
250    #[inline]
251    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
252        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
253        //  https://github.com/unicode-org/icu4x/issues/6665
254        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
255            let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
256                return Err(());
257            };
258            if !extra_bytes.is_empty() {
259                return Err(());
260            }
261
262            Ok(header)
263        })
264        .map_err(move |()| buffer)?;
265
266        Ok(Self {
267            inner: Arc::new(inner),
268        })
269    }
270
271    /// Inner buffer with block header contents
272    #[inline(always)]
273    pub fn buffer(&self) -> &SharedAlignedBuffer {
274        self.inner.backing_cart()
275    }
276
277    /// Number of clones in memory
278    #[inline(always)]
279    pub fn ref_count(&self) -> usize {
280        self.inner.strong_count()
281    }
282
283    /// Get [`BeaconChainHeader`] out of [`OwnedBeaconChainHeader`]
284    #[inline(always)]
285    pub fn header(&self) -> &BeaconChainHeader<'_> {
286        self.inner.get()
287    }
288}
289
290/// Owned beacon chain block header, which is not sealed yet
291#[derive(Debug, Clone)]
292pub struct OwnedBeaconChainHeaderUnsealed {
293    buffer: OwnedAlignedBuffer,
294}
295
296impl OwnedBeaconChainHeaderUnsealed {
297    /// Hash of the block before seal is applied to it
298    #[inline(always)]
299    pub fn pre_seal_hash(&self) -> Blake3Hash {
300        // TODO: Keyed hash with `block_header_seal` as a key
301        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
302    }
303
304    /// Add seal and return [`OwnedBeaconChainHeader`]
305    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
306        let Self { mut buffer } = self;
307        append_seal(&mut buffer, seal);
308
309        // TODO: Avoid extra parsing here, for this `OwnedBeaconChainHeader::from_parts_into()` must
310        //  return references to parts. Or at least add unchecked version of `from_buffer()`
311        OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
312            .expect("Known to be created correctly; qed")
313    }
314}
315
316/// Errors for [`OwnedIntermediateShardHeader`]
317#[derive(Debug, thiserror::Error)]
318pub enum OwnedIntermediateShardHeaderError {
319    /// Too many child shard blocks
320    #[error("Too many child shard blocks: {actual}")]
321    TooManyChildShardBlocks {
322        /// Actual number of child shard blocks
323        actual: usize,
324    },
325}
326
327/// An owned version of [`IntermediateShardHeader`].
328///
329/// It is correctly aligned in memory and well suited for sending and receiving over the network
330/// efficiently or storing in memory or on disk.
331#[derive(Debug, Clone)]
332pub struct OwnedIntermediateShardHeader {
333    inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
334}
335
336impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
337    const SHARD_KIND: ShardKind = ShardKind::IntermediateShard;
338
339    type Header<'a> = IntermediateShardHeader<'a>;
340
341    #[inline(always)]
342    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
343        Self::from_buffer(buffer)
344    }
345
346    #[inline(always)]
347    fn buffer(&self) -> &SharedAlignedBuffer {
348        self.buffer()
349    }
350
351    #[inline(always)]
352    fn ref_count(&self) -> usize {
353        self.ref_count()
354    }
355
356    #[inline(always)]
357    fn header(&self) -> &Self::Header<'_> {
358        self.header()
359    }
360}
361
362impl OwnedIntermediateShardHeader {
363    /// Max allocation needed by this header
364    #[inline(always)]
365    pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
366        BlockHeaderPrefix::SIZE
367            + BlockHeaderResult::SIZE
368            + BlockHeaderConsensusInfo::SIZE
369            + BlockHeaderBeaconChainInfo::SIZE
370            + (
371                // Number of child shard blocks
372                u16::SIZE
373                // Padding
374                + <[u8; 2]>::SIZE
375                + size_of_val(child_shard_blocks) as u32
376            )
377            + BlockHeaderSeal::MAX_SIZE
378    }
379
380    /// Create a new [`OwnedIntermediateShardHeader`] from its parts
381    pub fn from_parts(
382        prefix: &BlockHeaderPrefix,
383        result: &BlockHeaderResult,
384        consensus_info: &BlockHeaderConsensusInfo,
385        beacon_chain_info: &BlockHeaderBeaconChainInfo,
386        child_shard_blocks: &[BlockRoot],
387    ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
388        let mut buffer =
389            OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
390
391        Self::from_parts_into(
392            prefix,
393            result,
394            consensus_info,
395            beacon_chain_info,
396            child_shard_blocks,
397            &mut buffer,
398        )?;
399
400        Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
401    }
402
403    /// Create an owned header from its parts and write it into the provided buffer
404    pub fn from_parts_into(
405        prefix: &BlockHeaderPrefix,
406        result: &BlockHeaderResult,
407        consensus_info: &BlockHeaderConsensusInfo,
408        beacon_chain_info: &BlockHeaderBeaconChainInfo,
409        child_shard_blocks: &[BlockRoot],
410        buffer: &mut OwnedAlignedBuffer,
411    ) -> Result<(), OwnedIntermediateShardHeaderError> {
412        let num_blocks = child_shard_blocks.len();
413        let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
414            OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
415        })?;
416        let true = buffer.append(prefix.as_bytes()) else {
417            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
418        };
419        let true = buffer.append(result.as_bytes()) else {
420            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
421        };
422        let true = buffer.append(consensus_info.as_bytes()) else {
423            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
424        };
425        let true = buffer.append(beacon_chain_info.as_bytes()) else {
426            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
427        };
428        // TODO: Would be nice for `BlockHeaderChildShardBlocks` to have API to write this by itself
429        {
430            let true = buffer.append(&num_blocks.to_le_bytes()) else {
431                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
432            };
433            let true = buffer.append(&[0; 2]) else {
434                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
435            };
436            let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
437            else {
438                unreachable!("Checked size above; qed");
439            };
440        }
441
442        Ok(())
443    }
444
445    /// Create an owned header from a buffer
446    #[inline]
447    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
448        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
449        //  https://github.com/unicode-org/icu4x/issues/6665
450        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
451            let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
452            else {
453                return Err(());
454            };
455            if !extra_bytes.is_empty() {
456                return Err(());
457            }
458
459            Ok(header)
460        })
461        .map_err(move |()| buffer)?;
462
463        Ok(Self {
464            inner: Arc::new(inner),
465        })
466    }
467
468    /// Inner buffer with block header contents
469    #[inline(always)]
470    pub fn buffer(&self) -> &SharedAlignedBuffer {
471        self.inner.backing_cart()
472    }
473
474    /// Number of clones in memory
475    #[inline(always)]
476    pub fn ref_count(&self) -> usize {
477        self.inner.strong_count()
478    }
479
480    /// Get [`IntermediateShardHeader`] out of [`OwnedIntermediateShardHeader`]
481    #[inline(always)]
482    pub fn header(&self) -> &IntermediateShardHeader<'_> {
483        self.inner.get()
484    }
485}
486
487/// Owned intermediate shard block header, which is not sealed yet
488#[derive(Debug, Clone)]
489pub struct OwnedIntermediateShardHeaderUnsealed {
490    buffer: OwnedAlignedBuffer,
491}
492
493impl OwnedIntermediateShardHeaderUnsealed {
494    /// Hash of the block before seal is applied to it
495    #[inline(always)]
496    pub fn pre_seal_hash(&self) -> Blake3Hash {
497        // TODO: Keyed hash with `block_header_seal` as a key
498        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
499    }
500
501    /// Add seal and return [`OwnedIntermediateShardHeader`]
502    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
503        let Self { mut buffer } = self;
504        append_seal(&mut buffer, seal);
505
506        // TODO: Avoid extra parsing here, for this
507        //  `OwnedIntermediateShardHeader::from_parts_into()` must return references to parts. Or
508        //  at least add unchecked version of `from_buffer()`
509        OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
510            .expect("Known to be created correctly; qed")
511    }
512}
513
514/// An owned version of [`LeafShardHeader`].
515///
516/// It is correctly aligned in memory and well suited for sending and receiving over the network
517/// efficiently or storing in memory or on disk.
518#[derive(Debug, Clone)]
519pub struct OwnedLeafShardHeader {
520    inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
521}
522
523impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
524    const SHARD_KIND: ShardKind = ShardKind::LeafShard;
525
526    type Header<'a> = LeafShardHeader<'a>;
527
528    #[inline(always)]
529    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
530        Self::from_buffer(buffer)
531    }
532
533    #[inline(always)]
534    fn buffer(&self) -> &SharedAlignedBuffer {
535        self.buffer()
536    }
537
538    #[inline(always)]
539    fn ref_count(&self) -> usize {
540        self.ref_count()
541    }
542
543    #[inline(always)]
544    fn header(&self) -> &Self::Header<'_> {
545        self.header()
546    }
547}
548
549impl OwnedLeafShardHeader {
550    /// Max allocation needed by this header
551    pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
552        + BlockHeaderResult::SIZE
553        + BlockHeaderConsensusInfo::SIZE
554        + BlockHeaderBeaconChainInfo::SIZE
555        + BlockHeaderSeal::MAX_SIZE;
556
557    /// Create a new [`OwnedLeafShardHeader`] from its parts
558    pub fn from_parts(
559        prefix: &BlockHeaderPrefix,
560        result: &BlockHeaderResult,
561        consensus_info: &BlockHeaderConsensusInfo,
562        beacon_chain_info: &BlockHeaderBeaconChainInfo,
563    ) -> OwnedLeafShardHeaderUnsealed {
564        let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
565
566        Self::from_parts_into(
567            prefix,
568            result,
569            consensus_info,
570            beacon_chain_info,
571            &mut buffer,
572        );
573
574        OwnedLeafShardHeaderUnsealed { buffer }
575    }
576
577    /// Create an owned header from its parts and write it into the provided buffer
578    pub fn from_parts_into(
579        prefix: &BlockHeaderPrefix,
580        result: &BlockHeaderResult,
581        consensus_info: &BlockHeaderConsensusInfo,
582        beacon_chain_info: &BlockHeaderBeaconChainInfo,
583        buffer: &mut OwnedAlignedBuffer,
584    ) {
585        let true = buffer.append(prefix.as_bytes()) else {
586            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
587        };
588        let true = buffer.append(result.as_bytes()) else {
589            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
590        };
591        let true = buffer.append(consensus_info.as_bytes()) else {
592            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
593        };
594        let true = buffer.append(beacon_chain_info.as_bytes()) else {
595            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
596        };
597    }
598
599    /// Create an owned header from a buffer
600    #[inline]
601    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
602        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
603        //  https://github.com/unicode-org/icu4x/issues/6665
604        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
605            let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
606                return Err(());
607            };
608            if !extra_bytes.is_empty() {
609                return Err(());
610            }
611
612            Ok(header)
613        })
614        .map_err(move |()| buffer)?;
615
616        Ok(Self {
617            inner: Arc::new(inner),
618        })
619    }
620
621    /// Inner buffer with block header contents
622    #[inline(always)]
623    pub fn buffer(&self) -> &SharedAlignedBuffer {
624        self.inner.backing_cart()
625    }
626
627    /// Number of clones in memory
628    #[inline(always)]
629    pub fn ref_count(&self) -> usize {
630        self.inner.strong_count()
631    }
632
633    /// Get [`LeafShardHeader`] out of [`OwnedLeafShardHeader`]
634    #[inline(always)]
635    pub fn header(&self) -> &LeafShardHeader<'_> {
636        self.inner.get()
637    }
638}
639
640/// Owned leaf shard block header, which is not sealed yet
641#[derive(Debug, Clone)]
642pub struct OwnedLeafShardHeaderUnsealed {
643    buffer: OwnedAlignedBuffer,
644}
645
646impl OwnedLeafShardHeaderUnsealed {
647    /// Hash of the block before seal is applied to it
648    #[inline(always)]
649    pub fn pre_seal_hash(&self) -> Blake3Hash {
650        // TODO: Keyed hash with `block_header_seal` as a key
651        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
652    }
653
654    /// Add seal and return [`OwnedLeafShardHeader`]
655    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
656        let Self { mut buffer } = self;
657        append_seal(&mut buffer, seal);
658
659        // TODO: Avoid extra parsing here, for this `OwnedLeafShardHeader::from_parts_into()` must
660        //  return references to parts. Or at least add unchecked version of `from_buffer()`
661        OwnedLeafShardHeader::from_buffer(buffer.into_shared())
662            .expect("Known to be created correctly; qed")
663    }
664}
665
666/// An owned version of [`BlockHeader`].
667///
668/// It is correctly aligned in memory and well suited for sending and receiving over the network
669/// efficiently or storing in memory or on disk.
670#[derive(Debug, Clone, From)]
671pub enum OwnedBlockHeader {
672    /// Block header corresponds to the beacon chain
673    BeaconChain(OwnedBeaconChainHeader),
674    /// Block header corresponds to an intermediate shard
675    IntermediateShard(OwnedIntermediateShardHeader),
676    /// Block header corresponds to a leaf shard
677    LeafShard(OwnedLeafShardHeader),
678}
679
680impl OwnedBlockHeader {
681    /// Create an owned header from a buffer
682    #[inline]
683    pub fn from_buffer(
684        buffer: SharedAlignedBuffer,
685        shard_kind: ShardKind,
686    ) -> Result<Self, SharedAlignedBuffer> {
687        Ok(match shard_kind {
688            ShardKind::BeaconChain => {
689                Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
690            }
691            ShardKind::IntermediateShard => {
692                Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
693            }
694            ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
695            ShardKind::Phantom | ShardKind::Invalid => {
696                // Blocks for such shards do not exist
697                return Err(buffer);
698            }
699        })
700    }
701
702    /// Inner buffer block header contents
703    #[inline]
704    pub fn buffer(&self) -> &SharedAlignedBuffer {
705        match self {
706            Self::BeaconChain(owned_header) => owned_header.buffer(),
707            Self::IntermediateShard(owned_header) => owned_header.buffer(),
708            Self::LeafShard(owned_header) => owned_header.buffer(),
709        }
710    }
711
712    /// Number of clones in memory
713    #[inline]
714    pub fn ref_count(&self) -> usize {
715        match self {
716            Self::BeaconChain(owned_header) => owned_header.ref_count(),
717            Self::IntermediateShard(owned_header) => owned_header.ref_count(),
718            Self::LeafShard(owned_header) => owned_header.ref_count(),
719        }
720    }
721
722    /// Get [`BlockHeader`] out of [`OwnedBlockHeader`]
723    #[inline]
724    pub fn header(&self) -> BlockHeader<'_> {
725        match self {
726            Self::BeaconChain(owned_header) => {
727                BlockHeader::BeaconChain(owned_header.header().clone())
728            }
729            Self::IntermediateShard(owned_header) => {
730                BlockHeader::IntermediateShard(owned_header.header().clone())
731            }
732            Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
733        }
734    }
735}