ab_core_primitives/block/header/
owned.rs1use crate::block::BlockRoot;
4use crate::block::header::{
5 BeaconChainHeader, BlockHeader, BlockHeaderBeaconChainInfo, BlockHeaderConsensusInfo,
6 BlockHeaderConsensusParameters, BlockHeaderPrefix, BlockHeaderResult, BlockHeaderSeal,
7 BlockHeaderSealType, GenericBlockHeader, IntermediateShardHeader, LeafShardHeader,
8};
9use crate::hashes::Blake3Hash;
10use crate::shard::ShardKind;
11use ab_aligned_buffer::{OwnedAlignedBuffer, SharedAlignedBuffer};
12use ab_io_type::trivial_type::TrivialType;
13use core::fmt;
14use derive_more::From;
15use rclite::Arc;
16use yoke::Yoke;
17
18pub trait GenericOwnedBlockHeader: Clone + fmt::Debug + 'static {
20 type Header<'a>: GenericBlockHeader<'a>
22 where
23 Self: 'a;
24
25 fn header(&self) -> &Self::Header<'_>;
27}
28
29fn append_seal(buffer: &mut OwnedAlignedBuffer, seal: BlockHeaderSeal<'_>) {
30 match seal {
31 BlockHeaderSeal::Ed25519(seal) => {
32 let true = buffer.append(&[BlockHeaderSealType::Ed25519 as u8]) else {
33 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
34 };
35 let true = buffer.append(seal.as_bytes()) else {
36 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
37 };
38 }
39 }
40}
41
42#[derive(Debug, thiserror::Error)]
44pub enum OwnedBeaconChainHeaderError {
45 #[error("Too many child shard blocks: {actual}")]
47 TooManyChildShardBlocks {
48 actual: usize,
50 },
51}
52
53#[derive(Debug, Clone)]
58pub struct OwnedBeaconChainHeader {
59 inner: Arc<Yoke<BeaconChainHeader<'static>, SharedAlignedBuffer>>,
60}
61
62impl GenericOwnedBlockHeader for OwnedBeaconChainHeader {
63 type Header<'a> = BeaconChainHeader<'a>;
64
65 #[inline(always)]
66 fn header(&self) -> &Self::Header<'_> {
67 self.header()
68 }
69}
70
71impl OwnedBeaconChainHeader {
72 #[inline(always)]
74 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
75 BlockHeaderPrefix::SIZE
76 + BlockHeaderResult::SIZE
77 + BlockHeaderConsensusInfo::SIZE
78 + (
79 u16::SIZE
81 + <[u8; 2]>::SIZE
83 + size_of_val(child_shard_blocks) as u32
84 )
85 + BlockHeaderConsensusParameters::MAX_SIZE
86 + BlockHeaderSeal::MAX_SIZE
87 }
88
89 pub fn from_parts(
91 prefix: &BlockHeaderPrefix,
92 result: &BlockHeaderResult,
93 consensus_info: &BlockHeaderConsensusInfo,
94 child_shard_blocks: &[BlockRoot],
95 consensus_parameters: BlockHeaderConsensusParameters<'_>,
96 ) -> Result<OwnedBeaconChainHeaderUnsealed, OwnedBeaconChainHeaderError> {
97 let mut buffer =
98 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
99
100 Self::from_parts_into(
101 prefix,
102 result,
103 consensus_info,
104 child_shard_blocks,
105 consensus_parameters,
106 &mut buffer,
107 )?;
108
109 Ok(OwnedBeaconChainHeaderUnsealed { buffer })
110 }
111
112 pub fn from_parts_into(
114 prefix: &BlockHeaderPrefix,
115 result: &BlockHeaderResult,
116 consensus_info: &BlockHeaderConsensusInfo,
117 child_shard_blocks: &[BlockRoot],
118 consensus_parameters: BlockHeaderConsensusParameters<'_>,
119 buffer: &mut OwnedAlignedBuffer,
120 ) -> Result<(), OwnedBeaconChainHeaderError> {
121 let num_blocks = child_shard_blocks.len();
122 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
123 OwnedBeaconChainHeaderError::TooManyChildShardBlocks { actual: num_blocks }
124 })?;
125 let true = buffer.append(prefix.as_bytes()) else {
126 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
127 };
128 let true = buffer.append(result.as_bytes()) else {
129 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
130 };
131 let true = buffer.append(consensus_info.as_bytes()) else {
132 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
133 };
134 {
136 let true = buffer.append(&num_blocks.to_le_bytes()) else {
137 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
138 };
139 let true = buffer.append(&[0; 2]) else {
140 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
141 };
142 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
143 else {
144 unreachable!("Checked size above; qed");
145 };
146 }
147 {
150 let true = buffer.append(
151 &consensus_parameters
152 .fixed_parameters
153 .solution_range
154 .to_bytes(),
155 ) else {
156 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
157 };
158 let true = buffer.append(
159 &consensus_parameters
160 .fixed_parameters
161 .slot_iterations
162 .get()
163 .to_le_bytes(),
164 ) else {
165 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
166 };
167
168 let bitflags = {
169 let mut bitflags = 0u8;
170
171 if consensus_parameters.super_segment_root.is_some() {
172 bitflags |= BlockHeaderConsensusParameters::SUPER_SEGMENT_ROOT_MASK;
173 }
174 if consensus_parameters.next_solution_range.is_some() {
175 bitflags |= BlockHeaderConsensusParameters::NEXT_SOLUTION_RANGE_MASK;
176 }
177 if consensus_parameters.pot_parameters_change.is_some() {
178 bitflags |= BlockHeaderConsensusParameters::POT_PARAMETERS_CHANGE_MASK;
179 }
180
181 bitflags
182 };
183
184 let true = buffer.append(&[bitflags]) else {
185 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
186 };
187
188 if let Some(super_segment_root) = consensus_parameters.super_segment_root {
189 let true = buffer.append(super_segment_root.as_ref()) else {
190 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
191 };
192 }
193
194 if let Some(next_solution_range) = consensus_parameters.next_solution_range {
195 let true = buffer.append(&next_solution_range.to_bytes()) else {
196 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
197 };
198 }
199
200 if let Some(pot_parameters_change) = consensus_parameters.pot_parameters_change {
201 let true = buffer.append(&pot_parameters_change.slot.to_bytes()) else {
202 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
203 };
204 let true =
205 buffer.append(&pot_parameters_change.slot_iterations.get().to_le_bytes())
206 else {
207 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
208 };
209 let true = buffer.append(pot_parameters_change.entropy.as_ref()) else {
210 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
211 };
212 }
213 }
214
215 Ok(())
216 }
217
218 #[inline]
220 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
221 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
224 let Some((header, extra_bytes)) = BeaconChainHeader::try_from_bytes(buffer) else {
225 return Err(());
226 };
227 if !extra_bytes.is_empty() {
228 return Err(());
229 }
230
231 Ok(header)
232 })
233 .map_err(move |()| buffer)?;
234
235 Ok(Self {
236 inner: Arc::new(inner),
237 })
238 }
239
240 #[inline(always)]
242 pub fn buffer(&self) -> &SharedAlignedBuffer {
243 self.inner.backing_cart()
244 }
245
246 #[inline(always)]
248 pub fn header(&self) -> &BeaconChainHeader<'_> {
249 self.inner.get()
250 }
251}
252
253#[derive(Debug, Clone)]
255pub struct OwnedBeaconChainHeaderUnsealed {
256 buffer: OwnedAlignedBuffer,
257}
258
259impl OwnedBeaconChainHeaderUnsealed {
260 #[inline(always)]
262 pub fn pre_seal_hash(&self) -> Blake3Hash {
263 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
265 }
266
267 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedBeaconChainHeader {
269 let Self { mut buffer } = self;
270 append_seal(&mut buffer, seal);
271
272 OwnedBeaconChainHeader::from_buffer(buffer.into_shared())
275 .expect("Known to be created correctly; qed")
276 }
277}
278
279#[derive(Debug, thiserror::Error)]
281pub enum OwnedIntermediateShardHeaderError {
282 #[error("Too many child shard blocks: {actual}")]
284 TooManyChildShardBlocks {
285 actual: usize,
287 },
288}
289
290#[derive(Debug, Clone)]
295pub struct OwnedIntermediateShardHeader {
296 inner: Arc<Yoke<IntermediateShardHeader<'static>, SharedAlignedBuffer>>,
297}
298
299impl GenericOwnedBlockHeader for OwnedIntermediateShardHeader {
300 type Header<'a> = IntermediateShardHeader<'a>;
301
302 #[inline(always)]
303 fn header(&self) -> &Self::Header<'_> {
304 self.header()
305 }
306}
307
308impl OwnedIntermediateShardHeader {
309 #[inline(always)]
311 pub const fn max_allocation_for(child_shard_blocks: &[BlockRoot]) -> u32 {
312 BlockHeaderPrefix::SIZE
313 + BlockHeaderResult::SIZE
314 + BlockHeaderConsensusInfo::SIZE
315 + BlockHeaderBeaconChainInfo::SIZE
316 + (
317 u16::SIZE
319 + <[u8; 2]>::SIZE
321 + size_of_val(child_shard_blocks) as u32
322 )
323 + BlockHeaderSeal::MAX_SIZE
324 }
325
326 pub fn from_parts(
328 prefix: &BlockHeaderPrefix,
329 result: &BlockHeaderResult,
330 consensus_info: &BlockHeaderConsensusInfo,
331 beacon_chain_info: &BlockHeaderBeaconChainInfo,
332 child_shard_blocks: &[BlockRoot],
333 ) -> Result<OwnedIntermediateShardHeaderUnsealed, OwnedIntermediateShardHeaderError> {
334 let mut buffer =
335 OwnedAlignedBuffer::with_capacity(Self::max_allocation_for(child_shard_blocks));
336
337 Self::from_parts_into(
338 prefix,
339 result,
340 consensus_info,
341 beacon_chain_info,
342 child_shard_blocks,
343 &mut buffer,
344 )?;
345
346 Ok(OwnedIntermediateShardHeaderUnsealed { buffer })
347 }
348
349 pub fn from_parts_into(
351 prefix: &BlockHeaderPrefix,
352 result: &BlockHeaderResult,
353 consensus_info: &BlockHeaderConsensusInfo,
354 beacon_chain_info: &BlockHeaderBeaconChainInfo,
355 child_shard_blocks: &[BlockRoot],
356 buffer: &mut OwnedAlignedBuffer,
357 ) -> Result<(), OwnedIntermediateShardHeaderError> {
358 let num_blocks = child_shard_blocks.len();
359 let num_blocks = u16::try_from(num_blocks).map_err(|_error| {
360 OwnedIntermediateShardHeaderError::TooManyChildShardBlocks { actual: num_blocks }
361 })?;
362 let true = buffer.append(prefix.as_bytes()) else {
363 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
364 };
365 let true = buffer.append(result.as_bytes()) else {
366 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
367 };
368 let true = buffer.append(consensus_info.as_bytes()) else {
369 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
370 };
371 let true = buffer.append(beacon_chain_info.as_bytes()) else {
372 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
373 };
374 {
376 let true = buffer.append(&num_blocks.to_le_bytes()) else {
377 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
378 };
379 let true = buffer.append(&[0; 2]) else {
380 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
381 };
382 let true = buffer.append(BlockRoot::repr_from_slice(child_shard_blocks).as_flattened())
383 else {
384 unreachable!("Checked size above; qed");
385 };
386 }
387
388 Ok(())
389 }
390
391 #[inline]
393 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
394 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
397 let Some((header, extra_bytes)) = IntermediateShardHeader::try_from_bytes(buffer)
398 else {
399 return Err(());
400 };
401 if !extra_bytes.is_empty() {
402 return Err(());
403 }
404
405 Ok(header)
406 })
407 .map_err(move |()| buffer)?;
408
409 Ok(Self {
410 inner: Arc::new(inner),
411 })
412 }
413
414 #[inline(always)]
416 pub fn buffer(&self) -> &SharedAlignedBuffer {
417 self.inner.backing_cart()
418 }
419 #[inline(always)]
421 pub fn header(&self) -> &IntermediateShardHeader<'_> {
422 self.inner.get()
423 }
424}
425
426#[derive(Debug, Clone)]
428pub struct OwnedIntermediateShardHeaderUnsealed {
429 buffer: OwnedAlignedBuffer,
430}
431
432impl OwnedIntermediateShardHeaderUnsealed {
433 #[inline(always)]
435 pub fn pre_seal_hash(&self) -> Blake3Hash {
436 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
438 }
439
440 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedIntermediateShardHeader {
442 let Self { mut buffer } = self;
443 append_seal(&mut buffer, seal);
444
445 OwnedIntermediateShardHeader::from_buffer(buffer.into_shared())
449 .expect("Known to be created correctly; qed")
450 }
451}
452
453#[derive(Debug, Clone)]
458pub struct OwnedLeafShardHeader {
459 inner: Arc<Yoke<LeafShardHeader<'static>, SharedAlignedBuffer>>,
460}
461
462impl GenericOwnedBlockHeader for OwnedLeafShardHeader {
463 type Header<'a> = LeafShardHeader<'a>;
464
465 #[inline(always)]
466 fn header(&self) -> &Self::Header<'_> {
467 self.header()
468 }
469}
470
471impl OwnedLeafShardHeader {
472 pub const MAX_ALLOCATION: u32 = BlockHeaderPrefix::SIZE
474 + BlockHeaderResult::SIZE
475 + BlockHeaderConsensusInfo::SIZE
476 + BlockHeaderBeaconChainInfo::SIZE
477 + BlockHeaderSeal::MAX_SIZE;
478
479 pub fn from_parts(
481 prefix: &BlockHeaderPrefix,
482 result: &BlockHeaderResult,
483 consensus_info: &BlockHeaderConsensusInfo,
484 beacon_chain_info: &BlockHeaderBeaconChainInfo,
485 ) -> OwnedLeafShardHeaderUnsealed {
486 let mut buffer = OwnedAlignedBuffer::with_capacity(Self::MAX_ALLOCATION);
487
488 Self::from_parts_into(
489 prefix,
490 result,
491 consensus_info,
492 beacon_chain_info,
493 &mut buffer,
494 );
495
496 OwnedLeafShardHeaderUnsealed { buffer }
497 }
498
499 pub fn from_parts_into(
501 prefix: &BlockHeaderPrefix,
502 result: &BlockHeaderResult,
503 consensus_info: &BlockHeaderConsensusInfo,
504 beacon_chain_info: &BlockHeaderBeaconChainInfo,
505 buffer: &mut OwnedAlignedBuffer,
506 ) {
507 let true = buffer.append(prefix.as_bytes()) else {
508 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
509 };
510 let true = buffer.append(result.as_bytes()) else {
511 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
512 };
513 let true = buffer.append(consensus_info.as_bytes()) else {
514 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
515 };
516 let true = buffer.append(beacon_chain_info.as_bytes()) else {
517 unreachable!("Fixed size data structures that are guaranteed to fit; qed");
518 };
519 }
520
521 #[inline]
523 pub fn from_buffer(buffer: SharedAlignedBuffer) -> Result<Self, SharedAlignedBuffer> {
524 let inner = Yoke::try_attach_to_cart(buffer.clone(), |buffer| {
527 let Some((header, extra_bytes)) = LeafShardHeader::try_from_bytes(buffer) else {
528 return Err(());
529 };
530 if !extra_bytes.is_empty() {
531 return Err(());
532 }
533
534 Ok(header)
535 })
536 .map_err(move |()| buffer)?;
537
538 Ok(Self {
539 inner: Arc::new(inner),
540 })
541 }
542
543 #[inline(always)]
545 pub fn buffer(&self) -> &SharedAlignedBuffer {
546 self.inner.backing_cart()
547 }
548 #[inline(always)]
550 pub fn header(&self) -> &LeafShardHeader<'_> {
551 self.inner.get()
552 }
553}
554
555#[derive(Debug, Clone)]
557pub struct OwnedLeafShardHeaderUnsealed {
558 buffer: OwnedAlignedBuffer,
559}
560
561impl OwnedLeafShardHeaderUnsealed {
562 #[inline(always)]
564 pub fn pre_seal_hash(&self) -> Blake3Hash {
565 Blake3Hash::from(blake3::hash(self.buffer.as_slice()))
567 }
568
569 pub fn with_seal(self, seal: BlockHeaderSeal<'_>) -> OwnedLeafShardHeader {
571 let Self { mut buffer } = self;
572 append_seal(&mut buffer, seal);
573
574 OwnedLeafShardHeader::from_buffer(buffer.into_shared())
577 .expect("Known to be created correctly; qed")
578 }
579}
580
581#[derive(Debug, Clone, From)]
586pub enum OwnedBlockHeader {
587 BeaconChain(OwnedBeaconChainHeader),
589 IntermediateShard(OwnedIntermediateShardHeader),
591 LeafShard(OwnedLeafShardHeader),
593}
594
595impl OwnedBlockHeader {
596 #[inline]
598 pub fn from_buffer(
599 buffer: SharedAlignedBuffer,
600 shard_kind: ShardKind,
601 ) -> Result<Self, SharedAlignedBuffer> {
602 Ok(match shard_kind {
603 ShardKind::BeaconChain => {
604 Self::BeaconChain(OwnedBeaconChainHeader::from_buffer(buffer)?)
605 }
606 ShardKind::IntermediateShard => {
607 Self::IntermediateShard(OwnedIntermediateShardHeader::from_buffer(buffer)?)
608 }
609 ShardKind::LeafShard => Self::LeafShard(OwnedLeafShardHeader::from_buffer(buffer)?),
610 ShardKind::Phantom | ShardKind::Invalid => {
611 return Err(buffer);
613 }
614 })
615 }
616
617 #[inline]
619 pub fn buffer(&self) -> &SharedAlignedBuffer {
620 match self {
621 Self::BeaconChain(owned_header) => owned_header.buffer(),
622 Self::IntermediateShard(owned_header) => owned_header.buffer(),
623 Self::LeafShard(owned_header) => owned_header.buffer(),
624 }
625 }
626
627 #[inline]
629 pub fn header(&self) -> BlockHeader<'_> {
630 match self {
631 Self::BeaconChain(owned_header) => {
632 BlockHeader::BeaconChain(owned_header.header().clone())
633 }
634 Self::IntermediateShard(owned_header) => {
635 BlockHeader::IntermediateShard(owned_header.header().clone())
636 }
637 Self::LeafShard(owned_header) => BlockHeader::LeafShard(owned_header.header().clone()),
638 }
639 }
640}