ab_core_primitives/block/header/
owned.rs

1//! Data structures related to the owned version of [`BlockHeader`]
2
3use crate::block::BlockRoot;
4use crate::block::header::{
5    BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6    BlockHeaderConsensusParameters, BlockHeaderFixedConsensusParameters, BlockHeaderPrefix,
7    BlockHeaderResult, BlockHeaderSeal, BlockHeaderSealType, GenericBlockHeader,
8    IntermediateShardHeader, LeafShardHeader,
9};
10use crate::hashes::Blake3Hash;
11use crate::shard::{NumShardsUnchecked, RealShardKind};
12use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
13use ab_io_type::trivial_type::TrivialType;
14use core::fmt;
15use derive_more::From;
16use rclite::Arc;
17use yoke::Yoke;
18
19/// Generic owned block header
20pub trait GenericOwnedBlockHeader:
21    Clone + fmt::Debug + Send + Sync + Into<OwnedBlockHeader> + 'static
22{
23    /// Shard kind
24    const SHARD_KIND: RealShardKind;
25
26    /// Block header
27    type Header<'a>: GenericBlockHeader<'a>
28    where
29        Self: 'a;
30
31    /// Create an owned header from a buffer
32    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer>;
33
34    /// Inner buffer with block header contents
35    fn buffer(&self) -> &SharedAlignedBuffer;
36
37    /// Number of clones in memory
38    fn ref_count(&self) -> usize;
39
40    /// Get a regular block header out of the owned version
41    fn header(&self) -> &Self::Header<'_>;
42}
43
44fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
45    match seal {
46        BlockHeaderSeal::Ed25519(seal) => {
47            let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
48                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
49            };
50            let true = buffer.append(seal.as_bytes()) else {
51                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
52            };
53        }
54    }
55}
56
57/// Errors for [`OwnedBeaconChainHeader`]
58#[derive(Debug, thiserror::Error)]
59pub enum OwnedBeaconChainHeaderError {
60    /// Too many child shard blocks
61    #[error("Too many child shard blocks: {actual}")]
62    TooManyChildShardBlocks {
63        /// Actual number of child shard blocks
64        actual: usize,
65    },
66}
67
68/// An owned version of [`BeaconChainHeader`].
69///
70/// It is correctly aligned in memory and well suited for sending and receiving over the network
71/// efficiently or storing in memory or on disk.
72#[derive(Debug, Clone)]
73pub struct OwnedBeaconChainHeader {
74    inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
75}
76
77impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
78    const SHARD_KIND: RealShardKind = RealShardKind::BeaconChain;
79
80    type Header<'a> = BeaconChainHeader<'a>;
81
82    #[inline(always)]
83    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
84        Self::from_buffer(buffer)
85    }
86
87    #[inline(always)]
88    fn buffer(&self) -> &SharedAlignedBuffer {
89        self.buffer()
90    }
91
92    #[inline(always)]
93    fn ref_count(&self) -> usize {
94        self.ref_count()
95    }
96
97    #[inline(always)]
98    fn header(&self) -> &Self::Header<'_> {
99        self.header()
100    }
101}
102
103impl OwnedBeaconChainHeader {
104    /// Max allocation needed by this header
105    #[inline(always)]
106    pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
107        BlockHeaderPrefix::SIZE
108            + BlockHeaderResult::SIZE
109            + BlockHeaderConsensusInfo::SIZE
110            + (
111                // Number of child shard blocks
112                u16::SIZE
113                // Padding
114                + <[u8; 2]>::SIZE
115                + size_of_val(child_shard_blocks) as u32
116            )
117            + BlockHeaderConsensusParameters::MAX_SIZE
118            + BlockHeaderSeal::MAX_SIZE
119    }
120
121    /// Create a new [`OwnedBeaconChainHeader`] from its parts
122    pub fn from_parts(
123        prefix: &BlockHeaderPrefix,
124        result: &BlockHeaderResult,
125        consensus_info: &BlockHeaderConsensusInfo,
126        child_shard_blocks: &[BlockRoot],
127        consensus_parameters: &BlockHeaderConsensusParameters<'_>,
128    ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
129        let mut buffer =
130            OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
131
132        Self::from_parts_into(
133            prefix,
134            result,
135            consensus_info,
136            child_shard_blocks,
137            consensus_parameters,
138            &mut buffer,
139        )?;
140
141        Ok(OwnedBeaconChainHeaderUnsealed { buffer })
142    }
143
144    /// Create an owned header from its parts and write it into the provided buffer
145    pub fn from_parts_into(
146        prefix: &BlockHeaderPrefix,
147        result: &BlockHeaderResult,
148        consensus_info: &BlockHeaderConsensusInfo,
149        child_shard_blocks: &[BlockRoot],
150        consensus_parameters: &BlockHeaderConsensusParameters<'_>,
151        buffer: &mut OwnedAlignedBuffer,
152    ) -> Result<(), OwnedBeaconChainHeaderError> {
153        let BlockHeaderConsensusParameters {
154            fixed_parameters,
155            super_segment_root,
156            next_solution_range,
157            pot_parameters_change,
158        } = consensus_parameters;
159        let BlockHeaderFixedConsensusParameters {
160            solution_range,
161            slot_iterations,
162            num_shards,
163        } = fixed_parameters;
164
165        let num_blocks = child_shard_blocks.len();
166        let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
167            OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
168        })?;
169        let true = buffer.append(prefix.as_bytes()) else {
170            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
171        };
172        let true = buffer.append(result.as_bytes()) else {
173            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
174        };
175        let true = buffer.append(consensus_info.as_bytes()) else {
176            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
177        };
178        // TODO: Would be nice for `BlockHeaderChildShardBlocks` to have API to write this by itself
179        {
180            let true = buffer.append(&num_blocks.to_le_bytes()) else {
181                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
182            };
183            let true = buffer.append(&[0; 2]) else {
184                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
185            };
186            let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
187            else {
188                unreachable!("Checked size above; qed");
189            };
190        }
191        // TODO: Would be nice for `BlockHeaderBeaconChainParameters` to have API to write this by
192        //  itself
193        {
194            let true = buffer.append(&solution_range.to_bytes()) else {
195                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
196            };
197            let true = buffer.append(&slot_iterations.get().to_le_bytes()) else {
198                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
199            };
200            let true = buffer.append(NumShardsUnchecked::from(*num_shards).as_bytes()) else {
201                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
202            };
203
204            let bitflags = {
205                let mut bitflags = 0u8;
206
207                if super_segment_root.is_some() {
208                    bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
209                }
210                if next_solution_range.is_some() {
211                    bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
212                }
213                if pot_parameters_change.is_some() {
214                    bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
215                }
216
217                bitflags
218            };
219
220            let true = buffer.append(&[bitflags]) else {
221                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
222            };
223
224            if let Some(super_segment_root) = super_segment_root {
225                let true = buffer.append(super_segment_root.as_ref()) else {
226                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
227                };
228            }
229
230            if let Some(next_solution_range) = next_solution_range {
231                let true = buffer.append(&next_solution_range.to_bytes()) else {
232                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
233                };
234            }
235
236            if let Some(pot_parameters_change) = pot_parameters_change {
237                let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
238                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
239                };
240                let true =
241                    buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
242                else {
243                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
244                };
245                let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
246                    unreachable!("Fixed size data structures that are guaranteed to fit; qed");
247                };
248            }
249        }
250
251        Ok(())
252    }
253
254    /// Create an owned header from a buffer
255    #[inline]
256    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
257        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
258        //  https://github.com/unicode-org/icu4x/issues/6665
259        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
260            let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
261                return Err(());
262            };
263            if !extra_bytes.is_empty() {
264                return Err(());
265            }
266
267            Ok(header)
268        })
269        .map_err(move |()| buffer)?;
270
271        Ok(Self {
272            inner: Arc::new(inner),
273        })
274    }
275
276    /// Inner buffer with block header contents
277    #[inline(always)]
278    pub fn buffer(&self) -> &SharedAlignedBuffer {
279        self.inner.backing_cart()
280    }
281
282    /// Number of clones in memory
283    #[inline(always)]
284    pub fn ref_count(&self) -> usize {
285        self.inner.strong_count()
286    }
287
288    /// Get [`BeaconChainHeader`] out of [`OwnedBeaconChainHeader`]
289    #[inline(always)]
290    pub fn header(&self) -> &BeaconChainHeader<'_> {
291        self.inner.get()
292    }
293}
294
295/// Owned beacon chain block header, which is not sealed yet
296#[derive(Debug, Clone)]
297pub struct OwnedBeaconChainHeaderUnsealed {
298    buffer: OwnedAlignedBuffer,
299}
300
301impl OwnedBeaconChainHeaderUnsealed {
302    /// Hash of the block before seal is applied to it
303    #[inline(always)]
304    pub fn pre_seal_hash(&self) -> Blake3Hash {
305        // TODO: Keyed hash with `block_header_seal` as a key
306        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
307    }
308
309    /// Add seal and return [`OwnedBeaconChainHeader`]
310    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
311        let Self { mut buffer } = self;
312        append_seal(&mut buffer, seal);
313
314        // TODO: Avoid extra parsing here, for this `OwnedBeaconChainHeader::from_parts_into()` must
315        //  return references to parts. Or at least add unchecked version of `from_buffer()`
316        OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
317            .expect("Known to be created correctly; qed")
318    }
319}
320
321/// Errors for [`OwnedIntermediateShardHeader`]
322#[derive(Debug, thiserror::Error)]
323pub enum OwnedIntermediateShardHeaderError {
324    /// Too many child shard blocks
325    #[error("Too many child shard blocks: {actual}")]
326    TooManyChildShardBlocks {
327        /// Actual number of child shard blocks
328        actual: usize,
329    },
330}
331
332/// An owned version of [`IntermediateShardHeader`].
333///
334/// It is correctly aligned in memory and well suited for sending and receiving over the network
335/// efficiently or storing in memory or on disk.
336#[derive(Debug, Clone)]
337pub struct OwnedIntermediateShardHeader {
338    inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
339}
340
341impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
342    const SHARD_KIND: RealShardKind = RealShardKind::IntermediateShard;
343
344    type Header<'a> = IntermediateShardHeader<'a>;
345
346    #[inline(always)]
347    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
348        Self::from_buffer(buffer)
349    }
350
351    #[inline(always)]
352    fn buffer(&self) -> &SharedAlignedBuffer {
353        self.buffer()
354    }
355
356    #[inline(always)]
357    fn ref_count(&self) -> usize {
358        self.ref_count()
359    }
360
361    #[inline(always)]
362    fn header(&self) -> &Self::Header<'_> {
363        self.header()
364    }
365}
366
367impl OwnedIntermediateShardHeader {
368    /// Max allocation needed by this header
369    #[inline(always)]
370    pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
371        BlockHeaderPrefix::SIZE
372            + BlockHeaderResult::SIZE
373            + BlockHeaderConsensusInfo::SIZE
374            + BlockHeaderBeaconChainInfo::SIZE
375            + (
376                // Number of child shard blocks
377                u16::SIZE
378                // Padding
379                + <[u8; 2]>::SIZE
380                + size_of_val(child_shard_blocks) as u32
381            )
382            + BlockHeaderSeal::MAX_SIZE
383    }
384
385    /// Create a new [`OwnedIntermediateShardHeader`] from its parts
386    pub fn from_parts(
387        prefix: &BlockHeaderPrefix,
388        result: &BlockHeaderResult,
389        consensus_info: &BlockHeaderConsensusInfo,
390        beacon_chain_info: &BlockHeaderBeaconChainInfo,
391        child_shard_blocks: &[BlockRoot],
392    ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
393        let mut buffer =
394            OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
395
396        Self::from_parts_into(
397            prefix,
398            result,
399            consensus_info,
400            beacon_chain_info,
401            child_shard_blocks,
402            &mut buffer,
403        )?;
404
405        Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
406    }
407
408    /// Create an owned header from its parts and write it into the provided buffer
409    pub fn from_parts_into(
410        prefix: &BlockHeaderPrefix,
411        result: &BlockHeaderResult,
412        consensus_info: &BlockHeaderConsensusInfo,
413        beacon_chain_info: &BlockHeaderBeaconChainInfo,
414        child_shard_blocks: &[BlockRoot],
415        buffer: &mut OwnedAlignedBuffer,
416    ) -> Result<(), OwnedIntermediateShardHeaderError> {
417        let num_blocks = child_shard_blocks.len();
418        let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
419            OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
420        })?;
421        let true = buffer.append(prefix.as_bytes()) else {
422            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
423        };
424        let true = buffer.append(result.as_bytes()) else {
425            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
426        };
427        let true = buffer.append(consensus_info.as_bytes()) else {
428            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
429        };
430        let true = buffer.append(beacon_chain_info.as_bytes()) else {
431            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
432        };
433        // TODO: Would be nice for `BlockHeaderChildShardBlocks` to have API to write this by itself
434        {
435            let true = buffer.append(&num_blocks.to_le_bytes()) else {
436                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
437            };
438            let true = buffer.append(&[0; 2]) else {
439                unreachable!("Fixed size data structures that are guaranteed to fit; qed");
440            };
441            let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
442            else {
443                unreachable!("Checked size above; qed");
444            };
445        }
446
447        Ok(())
448    }
449
450    /// Create an owned header from a buffer
451    #[inline]
452    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
453        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
454        //  https://github.com/unicode-org/icu4x/issues/6665
455        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
456            let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
457            else {
458                return Err(());
459            };
460            if !extra_bytes.is_empty() {
461                return Err(());
462            }
463
464            Ok(header)
465        })
466        .map_err(move |()| buffer)?;
467
468        Ok(Self {
469            inner: Arc::new(inner),
470        })
471    }
472
473    /// Inner buffer with block header contents
474    #[inline(always)]
475    pub fn buffer(&self) -> &SharedAlignedBuffer {
476        self.inner.backing_cart()
477    }
478
479    /// Number of clones in memory
480    #[inline(always)]
481    pub fn ref_count(&self) -> usize {
482        self.inner.strong_count()
483    }
484
485    /// Get [`IntermediateShardHeader`] out of [`OwnedIntermediateShardHeader`]
486    #[inline(always)]
487    pub fn header(&self) -> &IntermediateShardHeader<'_> {
488        self.inner.get()
489    }
490}
491
492/// Owned intermediate shard block header, which is not sealed yet
493#[derive(Debug, Clone)]
494pub struct OwnedIntermediateShardHeaderUnsealed {
495    buffer: OwnedAlignedBuffer,
496}
497
498impl OwnedIntermediateShardHeaderUnsealed {
499    /// Hash of the block before seal is applied to it
500    #[inline(always)]
501    pub fn pre_seal_hash(&self) -> Blake3Hash {
502        // TODO: Keyed hash with `block_header_seal` as a key
503        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
504    }
505
506    /// Add seal and return [`OwnedIntermediateShardHeader`]
507    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
508        let Self { mut buffer } = self;
509        append_seal(&mut buffer, seal);
510
511        // TODO: Avoid extra parsing here, for this
512        //  `OwnedIntermediateShardHeader::from_parts_into()` must return references to parts. Or
513        //  at least add unchecked version of `from_buffer()`
514        OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
515            .expect("Known to be created correctly; qed")
516    }
517}
518
519/// An owned version of [`LeafShardHeader`].
520///
521/// It is correctly aligned in memory and well suited for sending and receiving over the network
522/// efficiently or storing in memory or on disk.
523#[derive(Debug, Clone)]
524pub struct OwnedLeafShardHeader {
525    inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
526}
527
528impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
529    const SHARD_KIND: RealShardKind = RealShardKind::LeafShard;
530
531    type Header<'a> = LeafShardHeader<'a>;
532
533    #[inline(always)]
534    fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
535        Self::from_buffer(buffer)
536    }
537
538    #[inline(always)]
539    fn buffer(&self) -> &SharedAlignedBuffer {
540        self.buffer()
541    }
542
543    #[inline(always)]
544    fn ref_count(&self) -> usize {
545        self.ref_count()
546    }
547
548    #[inline(always)]
549    fn header(&self) -> &Self::Header<'_> {
550        self.header()
551    }
552}
553
554impl OwnedLeafShardHeader {
555    /// Max allocation needed by this header
556    pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
557        + BlockHeaderResult::SIZE
558        + BlockHeaderConsensusInfo::SIZE
559        + BlockHeaderBeaconChainInfo::SIZE
560        + BlockHeaderSeal::MAX_SIZE;
561
562    /// Create a new [`OwnedLeafShardHeader`] from its parts
563    pub fn from_parts(
564        prefix: &BlockHeaderPrefix,
565        result: &BlockHeaderResult,
566        consensus_info: &BlockHeaderConsensusInfo,
567        beacon_chain_info: &BlockHeaderBeaconChainInfo,
568    ) -> OwnedLeafShardHeaderUnsealed {
569        let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
570
571        Self::from_parts_into(
572            prefix,
573            result,
574            consensus_info,
575            beacon_chain_info,
576            &mut buffer,
577        );
578
579        OwnedLeafShardHeaderUnsealed { buffer }
580    }
581
582    /// Create an owned header from its parts and write it into the provided buffer
583    pub fn from_parts_into(
584        prefix: &BlockHeaderPrefix,
585        result: &BlockHeaderResult,
586        consensus_info: &BlockHeaderConsensusInfo,
587        beacon_chain_info: &BlockHeaderBeaconChainInfo,
588        buffer: &mut OwnedAlignedBuffer,
589    ) {
590        let true = buffer.append(prefix.as_bytes()) else {
591            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
592        };
593        let true = buffer.append(result.as_bytes()) else {
594            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
595        };
596        let true = buffer.append(consensus_info.as_bytes()) else {
597            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
598        };
599        let true = buffer.append(beacon_chain_info.as_bytes()) else {
600            unreachable!("Fixed size data structures that are guaranteed to fit; qed");
601        };
602    }
603
604    /// Create an owned header from a buffer
605    #[inline]
606    pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
607        // TODO: Cloning is cheap, but will not be necessary if/when this is resolved:
608        //  https://github.com/unicode-org/icu4x/issues/6665
609        let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
610            let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
611                return Err(());
612            };
613            if !extra_bytes.is_empty() {
614                return Err(());
615            }
616
617            Ok(header)
618        })
619        .map_err(move |()| buffer)?;
620
621        Ok(Self {
622            inner: Arc::new(inner),
623        })
624    }
625
626    /// Inner buffer with block header contents
627    #[inline(always)]
628    pub fn buffer(&self) -> &SharedAlignedBuffer {
629        self.inner.backing_cart()
630    }
631
632    /// Number of clones in memory
633    #[inline(always)]
634    pub fn ref_count(&self) -> usize {
635        self.inner.strong_count()
636    }
637
638    /// Get [`LeafShardHeader`] out of [`OwnedLeafShardHeader`]
639    #[inline(always)]
640    pub fn header(&self) -> &LeafShardHeader<'_> {
641        self.inner.get()
642    }
643}
644
645/// Owned leaf shard block header, which is not sealed yet
646#[derive(Debug, Clone)]
647pub struct OwnedLeafShardHeaderUnsealed {
648    buffer: OwnedAlignedBuffer,
649}
650
651impl OwnedLeafShardHeaderUnsealed {
652    /// Hash of the block before seal is applied to it
653    #[inline(always)]
654    pub fn pre_seal_hash(&self) -> Blake3Hash {
655        // TODO: Keyed hash with `block_header_seal` as a key
656        Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
657    }
658
659    /// Add seal and return [`OwnedLeafShardHeader`]
660    pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
661        let Self { mut buffer } = self;
662        append_seal(&mut buffer, seal);
663
664        // TODO: Avoid extra parsing here, for this `OwnedLeafShardHeader::from_parts_into()` must
665        //  return references to parts. Or at least add unchecked version of `from_buffer()`
666        OwnedLeafShardHeader::from_buffer(buffer.into_shared())
667            .expect("Known to be created correctly; qed")
668    }
669}
670
671/// An owned version of [`BlockHeader`].
672///
673/// It is correctly aligned in memory and well suited for sending and receiving over the network
674/// efficiently or storing in memory or on disk.
675#[derive(Debug, Clone, From)]
676pub enum OwnedBlockHeader {
677    /// Block header corresponds to the beacon chain
678    BeaconChain(OwnedBeaconChainHeader),
679    /// Block header corresponds to an intermediate shard
680    IntermediateShard(OwnedIntermediateShardHeader),
681    /// Block header corresponds to a leaf shard
682    LeafShard(OwnedLeafShardHeader),
683}
684
685impl OwnedBlockHeader {
686    /// Create an owned header from a buffer
687    #[inline]
688    pub fn from_buffer(
689        buffer: SharedAlignedBuffer,
690        shard_kind: RealShardKind,
691    ) -> Result<Self, SharedAlignedBuffer> {
692        Ok(match shard_kind {
693            RealShardKind::BeaconChain => {
694                Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
695            }
696            RealShardKind::IntermediateShard => {
697                Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
698            }
699            RealShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
700        })
701    }
702
703    /// Inner buffer block header contents
704    #[inline]
705    pub fn buffer(&self) -> &SharedAlignedBuffer {
706        match self {
707            Self::BeaconChain(owned_header) => owned_header.buffer(),
708            Self::IntermediateShard(owned_header) => owned_header.buffer(),
709            Self::LeafShard(owned_header) => owned_header.buffer(),
710        }
711    }
712
713    /// Number of clones in memory
714    #[inline]
715    pub fn ref_count(&self) -> usize {
716        match self {
717            Self::BeaconChain(owned_header) => owned_header.ref_count(),
718            Self::IntermediateShard(owned_header) => owned_header.ref_count(),
719            Self::LeafShard(owned_header) => owned_header.ref_count(),
720        }
721    }
722
723    /// Get [`BlockHeader`] out of [`OwnedBlockHeader`]
724    #[inline]
725    pub fn header(&self) -> BlockHeader<'_> {
726        match self {
727            Self::BeaconChain(owned_header) => {
728                BlockHeader::BeaconChain(owned_header.header().clone())
729            }
730            Self::IntermediateShard(owned_header) => {
731                BlockHeader::IntermediateShard(owned_header.header().clone())
732            }
733            Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
734        }
735    }
736}