ab_core_primitives/block/header/
owned.rs1use crate::block::BlockRoot;
4use crate::block::header::{
5 BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderBeaconChainParameters,
6 BlockHeaderConsensusInfo, BlockHeaderPrefix, BlockHeaderResult, BlockHeaderSealRef,
7 BlockHeaderSealType, GenericBlockHeader, IntermediateShardHeader, LeafShardHeader,
8};
9use crate::hashes::Blake3Hash;
10use crate::shard::ShardKind;
11use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
12use ab_io_type::trivial_type::TrivialType;
13use derive_more::From;
14
15pub trait GenericOwnedBlockHeader {
17 type Header<'a>: GenericBlockHeader<'a>
19 where
20 Self: 'a;
21
22 fn header(&self) -> Self::Header<'_>;
24}
25
26fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSealRef<'_>) {
27 match seal {
28 BlockHeaderSealRef::Ed25519(seal) => {
29 let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
30 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
31 };
32 let true = buffer.append(seal.as_bytes()) else {
33 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
34 };
35 }
36 }
37}
38
39#[derive(Debug, thiserror::Error)]
41pub enum OwnedBeaconChainHeaderError {
42 #[error("Too many child shard blocks: {actual}")]
44 TooManyChildShardBlocks {
45 actual: usize,
47 },
48}
49
50#[derive(Debug, Clone)]
55pub struct OwnedBeaconChainHeader {
56 buffer: SharedAlignedBuffer,
57 }
61
62impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
63 type Header<'a> = BeaconChainHeader<'a>;
64
65 #[inline(always)]
66 fn header(&self) -> Self::Header<'_> {
67 self.header()
68 }
69}
70
71impl OwnedBeaconChainHeader {
72 #[inline(always)]
74 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
75 BlockHeaderPrefix::SIZE
76 + BlockHeaderResult::SIZE
77 + BlockHeaderConsensusInfo::SIZE
78 + (
79 u16::SIZE
81 + <[u8; 2]>::SIZE
83 + size_of_val(child_shard_blocks) as u32
84 )
85 + BlockHeaderBeaconChainParameters::MAX_SIZE
86 + BlockHeaderSealRef::MAX_SIZE
87 }
88
89 pub fn from_parts(
91 prefix: &BlockHeaderPrefix,
92 result: &BlockHeaderResult,
93 consensus_info: &BlockHeaderConsensusInfo,
94 child_shard_blocks: &[BlockRoot],
95 consensus_parameters: BlockHeaderBeaconChainParameters<'_>,
96 ) -> Result<OwnedBeaconChainBlockHeaderUnsealed, OwnedBeaconChainHeaderError> {
97 let mut buffer =
98 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
99
100 Self::from_parts_into(
101 prefix,
102 result,
103 consensus_info,
104 child_shard_blocks,
105 consensus_parameters,
106 &mut buffer,
107 )?;
108
109 Ok(OwnedBeaconChainBlockHeaderUnsealed { buffer })
110 }
111
112 pub fn from_parts_into(
114 prefix: &BlockHeaderPrefix,
115 result: &BlockHeaderResult,
116 consensus_info: &BlockHeaderConsensusInfo,
117 child_shard_blocks: &[BlockRoot],
118 consensus_parameters: BlockHeaderBeaconChainParameters<'_>,
119 buffer: &mut OwnedAlignedBuffer,
120 ) -> Result<(), OwnedBeaconChainHeaderError> {
121 let num_blocks = child_shard_blocks.len();
122 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
123 OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
124 })?;
125 let true = buffer.append(prefix.as_bytes()) else {
126 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
127 };
128 let true = buffer.append(result.as_bytes()) else {
129 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
130 };
131 let true = buffer.append(consensus_info.as_bytes()) else {
132 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
133 };
134 {
136 let true = buffer.append(&num_blocks.to_le_bytes()) else {
137 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
138 };
139 let true = buffer.append(&[0; 2]) else {
140 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
141 };
142 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
143 else {
144 unreachable!("Checked size above; qed");
145 };
146 }
147 {
150 let true = buffer.append(
151 &consensus_parameters
152 .fixed_parameters
153 .solution_range
154 .to_bytes(),
155 ) else {
156 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
157 };
158 let true = buffer.append(
159 &consensus_parameters
160 .fixed_parameters
161 .slot_iterations
162 .get()
163 .to_le_bytes(),
164 ) else {
165 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
166 };
167
168 let bitflags = {
169 let mut bitflags = 0u8;
170
171 if consensus_parameters.super_segment_root.is_some() {
172 bitflags |= BlockHeaderBeaconChainParameters::SUPER_SEGMENT_ROOT_MASK;
173 }
174 if consensus_parameters.next_solution_range.is_some() {
175 bitflags |= BlockHeaderBeaconChainParameters::NEXT_SOLUTION_RANGE_MASK;
176 }
177 if consensus_parameters.pot_parameters_change.is_some() {
178 bitflags |= BlockHeaderBeaconChainParameters::POT_PARAMETERS_CHANGE_MASK;
179 }
180
181 bitflags
182 };
183
184 let true = buffer.append(&[bitflags]) else {
185 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
186 };
187
188 if let Some(super_segment_root) = consensus_parameters.super_segment_root {
189 let true = buffer.append(super_segment_root.as_ref()) else {
190 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
191 };
192 }
193
194 if let Some(next_solution_range) = consensus_parameters.next_solution_range {
195 let true = buffer.append(&next_solution_range.to_bytes()) else {
196 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
197 };
198 }
199
200 if let Some(pot_parameters_change) = consensus_parameters.pot_parameters_change {
201 let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
202 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
203 };
204 let true =
205 buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
206 else {
207 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
208 };
209 let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
210 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
211 };
212 }
213 }
214
215 Ok(())
216 }
217
218 #[inline]
220 pub fn from_header(header: BeaconChainHeader<'_>) -> Result<Self, OwnedBeaconChainHeaderError> {
221 let unsealed = Self::from_parts(
222 header.shared.prefix,
223 header.shared.result,
224 header.shared.consensus_info,
225 &header.child_shard_blocks,
226 header.consensus_parameters,
227 )?;
228
229 Ok(unsealed.with_seal(header.shared.seal))
230 }
231
232 #[inline]
234 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
235 let Some((_header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer.as_slice())
236 else {
237 return Err(buffer);
238 };
239 if !extra_bytes.is_empty() {
240 return Err(buffer);
241 }
242
243 Ok(Self { buffer })
244 }
245
246 pub fn buffer(&self) -> &SharedAlignedBuffer {
248 &self.buffer
249 }
250
251 pub fn header(&self) -> BeaconChainHeader<'_> {
253 BeaconChainHeader::try_from_bytes_unchecked(self.buffer.as_slice())
254 .expect("Constructor ensures validity; qed")
255 .0
256 }
257}
258
259#[derive(Debug, Clone)]
261pub struct OwnedBeaconChainBlockHeaderUnsealed {
262 buffer: OwnedAlignedBuffer,
263}
264
265impl OwnedBeaconChainBlockHeaderUnsealed {
266 #[inline(always)]
268 pub fn pre_seal_hash(&self) -> Blake3Hash {
269 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
271 }
272
273 pub fn with_seal(self, seal: BlockHeaderSealRef<'_>) -> OwnedBeaconChainHeader {
275 let Self { mut buffer } = self;
276 append_seal(&mut buffer, seal);
277
278 OwnedBeaconChainHeader {
279 buffer: buffer.into_shared(),
280 }
281 }
282}
283
284#[derive(Debug, thiserror::Error)]
286pub enum OwnedIntermediateShardHeaderError {
287 #[error("Too many child shard blocks: {actual}")]
289 TooManyChildShardBlocks {
290 actual: usize,
292 },
293}
294
295#[derive(Debug, Clone)]
300pub struct OwnedIntermediateShardHeader {
301 buffer: SharedAlignedBuffer,
302}
303
304impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
305 type Header<'a> = IntermediateShardHeader<'a>;
306
307 #[inline(always)]
308 fn header(&self) -> Self::Header<'_> {
309 self.header()
310 }
311}
312
313impl OwnedIntermediateShardHeader {
314 #[inline(always)]
316 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
317 BlockHeaderPrefix::SIZE
318 + BlockHeaderResult::SIZE
319 + BlockHeaderConsensusInfo::SIZE
320 + BlockHeaderBeaconChainInfo::SIZE
321 + (
322 u16::SIZE
324 + <[u8; 2]>::SIZE
326 + size_of_val(child_shard_blocks) as u32
327 )
328 + BlockHeaderSealRef::MAX_SIZE
329 }
330
331 pub fn from_parts(
333 prefix: &BlockHeaderPrefix,
334 result: &BlockHeaderResult,
335 consensus_info: &BlockHeaderConsensusInfo,
336 beacon_chain_info: &BlockHeaderBeaconChainInfo,
337 child_shard_blocks: &[BlockRoot],
338 ) -> Result<OwnedIntermediateShardBlockHeaderUnsealed, OwnedIntermediateShardHeaderError> {
339 let mut buffer =
340 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
341
342 Self::from_parts_into(
343 prefix,
344 result,
345 consensus_info,
346 beacon_chain_info,
347 child_shard_blocks,
348 &mut buffer,
349 )?;
350
351 Ok(OwnedIntermediateShardBlockHeaderUnsealed { buffer })
352 }
353
354 pub fn from_parts_into(
356 prefix: &BlockHeaderPrefix,
357 result: &BlockHeaderResult,
358 consensus_info: &BlockHeaderConsensusInfo,
359 beacon_chain_info: &BlockHeaderBeaconChainInfo,
360 child_shard_blocks: &[BlockRoot],
361 buffer: &mut OwnedAlignedBuffer,
362 ) -> Result<(), OwnedIntermediateShardHeaderError> {
363 let num_blocks = child_shard_blocks.len();
364 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
365 OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
366 })?;
367 let true = buffer.append(prefix.as_bytes()) else {
368 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
369 };
370 let true = buffer.append(result.as_bytes()) else {
371 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
372 };
373 let true = buffer.append(consensus_info.as_bytes()) else {
374 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
375 };
376 let true = buffer.append(beacon_chain_info.as_bytes()) else {
377 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
378 };
379 {
381 let true = buffer.append(&num_blocks.to_le_bytes()) else {
382 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
383 };
384 let true = buffer.append(&[0; 2]) else {
385 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
386 };
387 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
388 else {
389 unreachable!("Checked size above; qed");
390 };
391 }
392
393 Ok(())
394 }
395
396 #[inline]
398 pub fn from_header(
399 header: IntermediateShardHeader<'_>,
400 ) -> Result<Self, OwnedIntermediateShardHeaderError> {
401 let unsealed = Self::from_parts(
402 header.shared.prefix,
403 header.shared.result,
404 header.shared.consensus_info,
405 header.beacon_chain_info,
406 &header.child_shard_blocks,
407 )?;
408
409 Ok(unsealed.with_seal(header.shared.seal))
410 }
411
412 #[inline]
414 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
415 let Some((_header, extra_bytes)) =
416 IntermediateShardHeader::try_from_bytes(buffer.as_slice())
417 else {
418 return Err(buffer);
419 };
420 if !extra_bytes.is_empty() {
421 return Err(buffer);
422 }
423
424 Ok(Self { buffer })
425 }
426
427 pub fn buffer(&self) -> &SharedAlignedBuffer {
429 &self.buffer
430 }
431
432 pub fn header(&self) -> IntermediateShardHeader<'_> {
434 IntermediateShardHeader::try_from_bytes_unchecked(self.buffer.as_slice())
435 .expect("Constructor ensures validity; qed")
436 .0
437 }
438}
439
440#[derive(Debug, Clone)]
442pub struct OwnedIntermediateShardBlockHeaderUnsealed {
443 buffer: OwnedAlignedBuffer,
444}
445
446impl OwnedIntermediateShardBlockHeaderUnsealed {
447 #[inline(always)]
449 pub fn pre_seal_hash(&self) -> Blake3Hash {
450 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
452 }
453
454 pub fn with_seal(self, seal: BlockHeaderSealRef<'_>) -> OwnedIntermediateShardHeader {
456 let Self { mut buffer } = self;
457 append_seal(&mut buffer, seal);
458
459 OwnedIntermediateShardHeader {
460 buffer: buffer.into_shared(),
461 }
462 }
463}
464
465#[derive(Debug, Clone)]
470pub struct OwnedLeafShardHeader {
471 buffer: SharedAlignedBuffer,
472}
473
474impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
475 type Header<'a> = LeafShardHeader<'a>;
476
477 #[inline(always)]
478 fn header(&self) -> Self::Header<'_> {
479 self.header()
480 }
481}
482
483impl OwnedLeafShardHeader {
484 pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
486 + BlockHeaderResult::SIZE
487 + BlockHeaderConsensusInfo::SIZE
488 + BlockHeaderBeaconChainInfo::SIZE
489 + BlockHeaderSealRef::MAX_SIZE;
490
491 pub fn from_parts(
493 prefix: &BlockHeaderPrefix,
494 result: &BlockHeaderResult,
495 consensus_info: &BlockHeaderConsensusInfo,
496 beacon_chain_info: &BlockHeaderBeaconChainInfo,
497 ) -> OwnedLeafShardBlockHeaderUnsealed {
498 let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
499
500 Self::from_parts_into(
501 prefix,
502 result,
503 consensus_info,
504 beacon_chain_info,
505 &mut buffer,
506 );
507
508 OwnedLeafShardBlockHeaderUnsealed { buffer }
509 }
510
511 pub fn from_parts_into(
513 prefix: &BlockHeaderPrefix,
514 result: &BlockHeaderResult,
515 consensus_info: &BlockHeaderConsensusInfo,
516 beacon_chain_info: &BlockHeaderBeaconChainInfo,
517 buffer: &mut OwnedAlignedBuffer,
518 ) {
519 let true = buffer.append(prefix.as_bytes()) else {
520 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
521 };
522 let true = buffer.append(result.as_bytes()) else {
523 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
524 };
525 let true = buffer.append(consensus_info.as_bytes()) else {
526 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
527 };
528 let true = buffer.append(beacon_chain_info.as_bytes()) else {
529 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
530 };
531 }
532
533 #[inline]
535 pub fn from_header(header: LeafShardHeader<'_>) -> Self {
536 let unsealed = Self::from_parts(
537 header.shared.prefix,
538 header.shared.result,
539 header.shared.consensus_info,
540 header.beacon_chain_info,
541 );
542
543 unsealed.with_seal(header.shared.seal)
544 }
545
546 #[inline]
548 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
549 let Some((_header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer.as_slice())
550 else {
551 return Err(buffer);
552 };
553 if !extra_bytes.is_empty() {
554 return Err(buffer);
555 }
556
557 Ok(Self { buffer })
558 }
559
560 pub fn buffer(&self) -> &SharedAlignedBuffer {
562 &self.buffer
563 }
564
565 pub fn header(&self) -> LeafShardHeader<'_> {
567 LeafShardHeader::try_from_bytes_unchecked(self.buffer.as_slice())
568 .expect("Constructor ensures validity; qed")
569 .0
570 }
571}
572
573#[derive(Debug, Clone)]
575pub struct OwnedLeafShardBlockHeaderUnsealed {
576 buffer: OwnedAlignedBuffer,
577}
578
579impl OwnedLeafShardBlockHeaderUnsealed {
580 #[inline(always)]
582 pub fn pre_seal_hash(&self) -> Blake3Hash {
583 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
585 }
586
587 pub fn with_seal(self, seal: BlockHeaderSealRef<'_>) -> OwnedLeafShardHeader {
589 let Self { mut buffer } = self;
590 append_seal(&mut buffer, seal);
591
592 OwnedLeafShardHeader {
593 buffer: buffer.into_shared(),
594 }
595 }
596}
597
598#[derive(Debug, thiserror::Error)]
600pub enum OwnedBlockHeaderError {
601 #[error("Beacon chain block header error: {0}")]
603 BeaconChain(#[from] OwnedBeaconChainHeaderError),
604 #[error("Intermediate shard block header error: {0}")]
606 IntermediateShard(#[from] OwnedIntermediateShardHeaderError),
607}
608
609#[derive(Debug, Clone, From)]
614pub enum OwnedBlockHeader {
615 BeaconChain(OwnedBeaconChainHeader),
617 IntermediateShard(OwnedIntermediateShardHeader),
619 LeafShard(OwnedLeafShardHeader),
621}
622
623impl GenericOwnedBlockHeader for OwnedBlockHeader {
624 type Header<'a> = BlockHeader<'a>;
625
626 #[inline(always)]
627 fn header(&self) -> Self::Header<'_> {
628 self.header()
629 }
630}
631
632impl OwnedBlockHeader {
633 #[inline]
635 pub fn from_header(header: BlockHeader<'_>) -> Result<Self, OwnedBlockHeaderError> {
636 Ok(match header {
637 BlockHeader::BeaconChain(header) => {
638 Self::BeaconChain(OwnedBeaconChainHeader::from_header(header)?)
639 }
640 BlockHeader::IntermediateShard(header) => {
641 Self::IntermediateShard(OwnedIntermediateShardHeader::from_header(header)?)
642 }
643 BlockHeader::LeafShard(header) => {
644 Self::LeafShard(OwnedLeafShardHeader::from_header(header))
645 }
646 })
647 }
648
649 #[inline]
651 pub fn from_buffer(
652 buffer: SharedAlignedBuffer,
653 shard_kind: ShardKind,
654 ) -> Result<Self, SharedAlignedBuffer> {
655 let Some((_header, extra_bytes)) =
656 BlockHeader::try_from_bytes(buffer.as_slice(), shard_kind)
657 else {
658 return Err(buffer);
659 };
660 if !extra_bytes.is_empty() {
661 return Err(buffer);
662 }
663
664 Ok(match shard_kind {
665 ShardKind::BeaconChain => Self::BeaconChain(OwnedBeaconChainHeader { buffer }),
666 ShardKind::IntermediateShard => {
667 Self::IntermediateShard(OwnedIntermediateShardHeader { buffer })
668 }
669 ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader { buffer }),
670 ShardKind::Phantom | ShardKind::Invalid => {
671 return Err(buffer);
673 }
674 })
675 }
676
677 pub fn buffer(&self) -> &SharedAlignedBuffer {
679 match self {
680 Self::BeaconChain(owned_header) => owned_header.buffer(),
681 Self::IntermediateShard(owned_header) => owned_header.buffer(),
682 Self::LeafShard(owned_header) => owned_header.buffer(),
683 }
684 }
685
686 pub fn header(&self) -> BlockHeader<'_> {
688 match self {
689 Self::BeaconChain(owned_header) => BlockHeader::BeaconChain(owned_header.header()),
690 Self::IntermediateShard(owned_header) => {
691 BlockHeader::IntermediateShard(owned_header.header())
692 }
693 Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header()),
694 }
695 }
696}