Skip to main content

ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::ed25519::Ed25519PublicKey;
5use crate::hashes::Blake3Hash;
6use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot, SegmentProof};
7use crate::pos::{PosProof, PosSeed};
8use crate::pot::{PotOutput, SlotNumber};
9use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
10use crate::segments::{
11    HistorySize, LocalSegmentIndex, SegmentIndex, SegmentPosition, SegmentRoot, SuperSegmentIndex,
12    SuperSegmentRoot,
13};
14use crate::shard::{NumShards, RealShardKind, ShardIndex, ShardKind};
15use ab_blake3::single_block_keyed_hash;
16use ab_io_type::trivial_type::TrivialType;
17use ab_merkle_tree::balanced::BalancedMerkleTree;
18use blake3::{Hash, OUT_LEN};
19use core::simd::Simd;
20use core::{fmt, mem};
21use derive_more::{
22    Add, AddAssign, AsMut, AsRef, Deref, DerefMut, Display, From, Into, Sub, SubAssign,
23};
24#[cfg(feature = "scale-codec")]
25use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
26#[cfg(feature = "serde")]
27use serde::{Deserialize, Serialize};
28#[cfg(feature = "serde")]
29use serde::{Deserializer, Serializer};
30#[cfg(feature = "serde")]
31use serde_big_array::BigArray;
32
33/// Solution distance
34#[derive(
35    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
36)]
37#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
38#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
39#[repr(C)]
40pub struct SolutionDistance(u64);
41
42impl SolutionDistance {
43    /// Maximum value
44    pub const MAX: Self = Self(u64::MAX / 2);
45
46    // TODO: Remove once `From` is stable
47    /// Create a new instance
48    #[inline(always)]
49    pub const fn from_u64(n: u64) -> Self {
50        Self(n)
51    }
52
53    /// Calculate solution distance for given parameters.
54    ///
55    /// Typically used as a primitive to check whether solution distance is within solution range
56    /// (see [`Self::is_within()`]).
57    pub fn calculate(
58        global_challenge: &Blake3Hash,
59        chunk: &[u8; 32],
60        sector_slot_challenge: &SectorSlotChallenge,
61    ) -> Self {
62        // TODO: Is keyed hash really needed here?
63        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
64            .expect("Less than a single block worth of bytes; qed");
65        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
66            audit_chunk[0],
67            audit_chunk[1],
68            audit_chunk[2],
69            audit_chunk[3],
70            audit_chunk[4],
71            audit_chunk[5],
72            audit_chunk[6],
73            audit_chunk[7],
74        ]);
75        let global_challenge_as_solution_range: SolutionRange =
76            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
77
78        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
79    }
80
81    /// Check if solution distance is within the provided solution range
82    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
83        self.0 <= u64::from(solution_range) / 2
84    }
85}
86
87/// Solution range
88#[derive(
89    Debug,
90    Display,
91    Default,
92    Copy,
93    Clone,
94    Ord,
95    PartialOrd,
96    Eq,
97    PartialEq,
98    Hash,
99    Add,
100    AddAssign,
101    Sub,
102    SubAssign,
103    TrivialType,
104)]
105#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
106#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
107#[repr(C)]
108pub struct SolutionRange(u64);
109
110impl const From<u64> for SolutionRange {
111    #[inline(always)]
112    fn from(value: u64) -> Self {
113        Self(value)
114    }
115}
116
117impl const From<SolutionRange> for u64 {
118    #[inline(always)]
119    fn from(value: SolutionRange) -> Self {
120        value.0
121    }
122}
123
124impl SolutionRange {
125    /// Size in bytes
126    pub const SIZE: usize = size_of::<u64>();
127    /// Minimum value
128    pub const MIN: Self = Self(u64::MIN);
129    /// Maximum value
130    pub const MAX: Self = Self(u64::MAX);
131
132    /// Create a new instance from bytes
133    #[inline(always)]
134    pub fn to_bytes(self) -> [u8; 8] {
135        self.0.to_le_bytes()
136    }
137
138    /// Create a new instance from bytes
139    #[inline(always)]
140    pub fn from_bytes(bytes: [u8; 8]) -> Self {
141        Self(u64::from_le_bytes(bytes))
142    }
143
144    /// Computes the following:
145    /// ```text
146    /// MAX * slot_probability / chunks * s_buckets / pieces
147    /// ```
148    #[inline]
149    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
150        let solution_range = u64::MAX
151            // Account for slot probability
152            / slot_probability.1 * slot_probability.0
153            // Now take the probability of hitting occupied s-bucket in a piece into account
154            / Record::NUM_CHUNKS as u64
155            * Record::NUM_S_BUCKETS as u64;
156
157        // Take the number of pieces into account
158        Self(solution_range / pieces)
159    }
160
161    /// Computes the following:
162    /// ```text
163    /// MAX * slot_probability / chunks * s_buckets / solution_range
164    /// ```
165    #[inline]
166    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
167        let pieces = u64::MAX
168            // Account for slot probability
169            / slot_probability.1 * slot_probability.0
170            // Now take the probability of hitting occupied s-bucket in sector into account
171            / Record::NUM_CHUNKS as u64
172            * Record::NUM_S_BUCKETS as u64;
173
174        // Take solution range into account
175        pieces / self.0
176    }
177
178    /// Expands the global solution range to a solution range that corresponds to a leaf shard.
179    ///
180    /// Global solution range is updated based on the beacon chain information, while a farmer also
181    /// creates intermediate shard and leaf shard solutions with a wider solution range.
182    #[inline]
183    pub const fn to_leaf_shard(self, num_shards: NumShards) -> Self {
184        Self(
185            self.0
186                .saturating_mul(u64::from(num_shards.leaf_shards().get())),
187        )
188    }
189
190    /// Expands the global solution range to a solution range that corresponds to an intermediate
191    /// shard
192    #[inline]
193    pub const fn to_intermediate_shard(self, num_shards: NumShards) -> Self {
194        Self(
195            self.0
196                .saturating_mul(u64::from(num_shards.intermediate_shards().get())),
197        )
198    }
199
200    /// Bidirectional distance between two solution ranges
201    #[inline]
202    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
203        let a = self.0;
204        let b = other.0;
205        let diff = a.wrapping_sub(b);
206        let diff2 = b.wrapping_sub(a);
207        // Find smaller diff between 2 directions
208        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
209    }
210
211    /// Derives next solution range
212    #[inline]
213    pub fn derive_next(
214        self,
215        slots_in_last_interval: SlotNumber,
216        slot_probability: (u64, u64),
217        retarget_interval: BlockNumber,
218    ) -> Self {
219        // The idea here is to keep block production at the same pace while space pledged on the
220        // network changes. For this, we adjust the previous solution range according to actual and
221        // expected number of blocks per retarget interval.
222        //
223        // Below is code analogous to the following, but without using floats:
224        // ```rust
225        // let actual_slots_per_block = slots_in_last_interval as f64 / retarget_interval as f64;
226        // let expected_slots_per_block =
227        //     slot_probability.1 as f64 / slot_probability.0 as f64;
228        // let adjustment_factor =
229        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
230        //
231        // next_solution_range =
232        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
233        // ```
234        let current_solution_range = self.0;
235        let next_solution_range = u64::try_from(
236            u128::from(current_solution_range)
237                .saturating_mul(u128::from(slots_in_last_interval))
238                .saturating_mul(u128::from(slot_probability.0))
239                / u128::from(u64::from(retarget_interval))
240                / u128::from(slot_probability.1),
241        );
242
243        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
244            current_solution_range / 4,
245            current_solution_range.saturating_mul(4),
246        ))
247    }
248}
249
250// Quick test to ensure the functions above are the inverse of each other
251const {
252    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
253    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
254    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
255}
256
257/// Proof for chunk contained within a record.
258#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
259#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
260#[repr(C)]
261pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
262
263impl fmt::Debug for ChunkProof {
264    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
265        write!(f, "[")?;
266        for hash in self.0 {
267            for byte in hash {
268                write!(f, "{byte:02x}")?;
269            }
270            write!(f, ", ")?;
271        }
272        write!(f, "]")?;
273        Ok(())
274    }
275}
276
277#[cfg(feature = "serde")]
278#[derive(Serialize, Deserialize)]
279#[serde(transparent)]
280struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
281
282#[cfg(feature = "serde")]
283#[derive(Serialize, Deserialize)]
284#[serde(transparent)]
285struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
286
287#[cfg(feature = "serde")]
288#[derive(Serialize, Deserialize)]
289#[serde(transparent)]
290struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
291
292#[cfg(feature = "serde")]
293impl Serialize for ChunkProof {
294    #[inline]
295    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
296    where
297        S: Serializer,
298    {
299        if serializer.is_human_readable() {
300            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
301            // same memory layout
302            ChunkProofHex(unsafe {
303                mem::transmute::<
304                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
305                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
306                >(self.0)
307            })
308            .serialize(serializer)
309        } else {
310            ChunkProofBinary(self.0).serialize(serializer)
311        }
312    }
313}
314
315#[cfg(feature = "serde")]
316impl<'de> Deserialize<'de> for ChunkProof {
317    #[inline]
318    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
319    where
320        D: Deserializer<'de>,
321    {
322        Ok(Self(if deserializer.is_human_readable() {
323            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
324            // same memory layout
325            unsafe {
326                mem::transmute::<
327                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
328                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
329                >(ChunkProofHex::deserialize(deserializer)?.0)
330            }
331        } else {
332            ChunkProofBinary::deserialize(deserializer)?.0
333        }))
334    }
335}
336
337impl Default for ChunkProof {
338    #[inline]
339    fn default() -> Self {
340        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
341    }
342}
343
344impl AsRef<[u8]> for ChunkProof {
345    #[inline]
346    fn as_ref(&self) -> &[u8] {
347        self.0.as_flattened()
348    }
349}
350
351impl AsMut<[u8]> for ChunkProof {
352    #[inline]
353    fn as_mut(&mut self) -> &mut [u8] {
354        self.0.as_flattened_mut()
355    }
356}
357
358impl ChunkProof {
359    /// Size of chunk proof in bytes.
360    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
361    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
362}
363
364/// Solution verification errors
365#[derive(Debug, Eq, PartialEq, thiserror::Error)]
366pub enum SolutionVerifyError {
367    /// Invalid piece offset
368    #[error("Piece verification failed")]
369    InvalidPieceOffset {
370        /// Index of the piece that failed verification
371        piece_offset: u16,
372        /// How many pieces one sector is supposed to contain (max)
373        max_pieces_in_sector: u16,
374    },
375    /// History size is in the future
376    #[error("History size {solution} is in the future, current is {current}")]
377    FutureHistorySize {
378        /// Current history size
379        current: HistorySize,
380        /// History size solution was created for
381        solution: HistorySize,
382    },
383    /// Sector expired
384    #[error("Sector expired")]
385    SectorExpired {
386        /// Expiration history size
387        expiration_history_size: HistorySize,
388        /// Current history size
389        current_history_size: HistorySize,
390    },
391    /// Record does not belong to the segment
392    #[error("Record does not belong to the segment")]
393    RecordNotInSegment,
394    /// Segment doesn't belong to the super segment
395    #[error("Segment doesn't belong to the super segment")]
396    SegmentNotInSuperSegment,
397    /// Solution is outside the solution range
398    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
399    OutsideSolutionRange {
400        /// Solution range
401        solution_range: SolutionRange,
402        /// Solution distance
403        solution_distance: SolutionDistance,
404    },
405    /// Invalid proof of space
406    #[error("Invalid proof of space")]
407    InvalidProofOfSpace,
408    /// Invalid shard commitment
409    #[error("Invalid shard commitment")]
410    InvalidShardCommitment,
411    /// Invalid input shard
412    #[error("Invalid input shard {shard_index} ({shard_kind:?})")]
413    InvalidInputShard {
414        /// Input shard index
415        shard_index: ShardIndex,
416        /// Input shard kind
417        shard_kind: Option<ShardKind>,
418    },
419    /// Invalid solution shard
420    #[error(
421        "Invalid solution shard {solution_shard_index} (parent {solution_parent_shard_index:?}), \
422        expected shard {expected_shard_index} ({expected_shard_kind:?})"
423    )]
424    InvalidSolutionShard {
425        /// Solution shard index
426        solution_shard_index: ShardIndex,
427        /// Solution shard index
428        solution_parent_shard_index: Option<ShardIndex>,
429        /// Expected shard index
430        expected_shard_index: ShardIndex,
431        /// Expected shard kind
432        expected_shard_kind: RealShardKind,
433    },
434    /// Invalid chunk proof
435    #[error("Invalid chunk proof")]
436    InvalidChunkProof,
437    /// Invalid history size
438    #[error("Invalid history size")]
439    InvalidHistorySize,
440}
441
442/// Parameters for stateless solution verification.
443///
444/// These only include the information already contained in the block itself, meaning verification
445/// of different blocks can be done concurrently.
446#[derive(Debug, Clone)]
447#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
448pub struct SolutionVerifyStatelessParams {
449    /// Shard for which the solution is built
450    pub shard_index: ShardIndex,
451    /// Proof of time for which solution is built
452    pub proof_of_time: PotOutput,
453    /// Solution range
454    pub solution_range: SolutionRange,
455    /// Shard membership entropy
456    pub shard_membership_entropy: ShardMembershipEntropy,
457    /// The number of shards in the network
458    pub num_shards: NumShards,
459}
460
461/// Parameters for checking piece validity used in a solution
462#[derive(Debug, Clone)]
463#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
464pub struct SolutionVerifyPieceParams {
465    /// How many pieces one sector is supposed to contain (max)
466    pub max_pieces_in_sector: u16,
467    /// Super segment root of the segment to which piece belongs
468    pub super_segment_root: SuperSegmentRoot,
469    /// Number of segments in the super segment
470    pub num_segments: u32,
471    /// Number of latest archived segments that are considered "recent history"
472    pub recent_segments: HistorySize,
473    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
474    pub recent_history_fraction: (HistorySize, HistorySize),
475    /// Minimum lifetime of a plotted sector, measured in archived segments
476    pub min_sector_lifetime: HistorySize,
477    /// Current size of the history
478    pub current_history_size: HistorySize,
479    /// Super segment root that contains a segment at `min_sector_lifetime` from sector creation
480    /// (if exists)
481    pub sector_expiration_check_super_segment_root: Option<SuperSegmentRoot>,
482}
483
484/// Parameters for full solution verification
485#[derive(Debug, Clone)]
486#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
487pub struct SolutionVerifyFullParams {
488    /// Parameters for stateless solution verification
489    pub stateless: SolutionVerifyStatelessParams,
490    /// Parameters for checking piece validity used in a solution
491    pub piece: SolutionVerifyPieceParams,
492}
493
494/// Proof-of-time verifier to be used in [`Solution::verify_full()`]
495pub trait SolutionPotVerifier {
496    /// Check whether proof created earlier is valid
497    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
498}
499
500/// Entropy used for shard membership assignment
501#[derive(
502    Default,
503    Copy,
504    Clone,
505    Eq,
506    PartialEq,
507    Ord,
508    PartialOrd,
509    Hash,
510    From,
511    Into,
512    AsRef,
513    AsMut,
514    Deref,
515    DerefMut,
516    TrivialType,
517)]
518#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
519#[repr(C)]
520pub struct ShardMembershipEntropy([u8; ShardMembershipEntropy::SIZE]);
521
522impl fmt::Display for ShardMembershipEntropy {
523    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
524        for byte in self.0 {
525            write!(f, "{byte:02x}")?;
526        }
527        Ok(())
528    }
529}
530
531#[cfg(feature = "serde")]
532#[derive(Serialize, Deserialize)]
533#[serde(transparent)]
534struct ShardMembershipEntropyBinary([u8; ShardMembershipEntropy::SIZE]);
535
536#[cfg(feature = "serde")]
537#[derive(Serialize, Deserialize)]
538#[serde(transparent)]
539struct ShardMembershipEntropyHex(#[serde(with = "hex")] [u8; ShardMembershipEntropy::SIZE]);
540
541#[cfg(feature = "serde")]
542impl Serialize for ShardMembershipEntropy {
543    #[inline]
544    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
545    where
546        S: Serializer,
547    {
548        if serializer.is_human_readable() {
549            ShardMembershipEntropyHex(self.0).serialize(serializer)
550        } else {
551            ShardMembershipEntropyBinary(self.0).serialize(serializer)
552        }
553    }
554}
555
556#[cfg(feature = "serde")]
557impl<'de> Deserialize<'de> for ShardMembershipEntropy {
558    #[inline]
559    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
560    where
561        D: Deserializer<'de>,
562    {
563        Ok(Self(if deserializer.is_human_readable() {
564            ShardMembershipEntropyHex::deserialize(deserializer)?.0
565        } else {
566            ShardMembershipEntropyBinary::deserialize(deserializer)?.0
567        }))
568    }
569}
570
571impl fmt::Debug for ShardMembershipEntropy {
572    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
573        for byte in self.0 {
574            write!(f, "{byte:02x}")?;
575        }
576        Ok(())
577    }
578}
579
580impl AsRef<[u8]> for ShardMembershipEntropy {
581    #[inline(always)]
582    fn as_ref(&self) -> &[u8] {
583        &self.0
584    }
585}
586
587impl AsMut<[u8]> for ShardMembershipEntropy {
588    #[inline(always)]
589    fn as_mut(&mut self) -> &mut [u8] {
590        &mut self.0
591    }
592}
593
594impl ShardMembershipEntropy {
595    /// Size in bytes
596    pub const SIZE: usize = PotOutput::SIZE;
597
598    /// Create a new instance
599    #[inline(always)]
600    pub const fn new(bytes: [u8; Self::SIZE]) -> Self {
601        Self(bytes)
602    }
603
604    /// Get internal representation
605    #[inline(always)]
606    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
607        &self.0
608    }
609
610    /// Convenient conversion from slice of underlying representation for efficiency purposes
611    #[inline(always)]
612    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
613        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
614        // layout
615        unsafe { mem::transmute(value) }
616    }
617
618    /// Convenient conversion to slice of underlying representation for efficiency purposes
619    #[inline(always)]
620    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
621        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
622        // layout
623        unsafe { mem::transmute(value) }
624    }
625}
626
627/// Reduced hash used for shard assignment
628#[derive(
629    Default,
630    Copy,
631    Clone,
632    Eq,
633    PartialEq,
634    Ord,
635    PartialOrd,
636    Hash,
637    From,
638    Into,
639    AsRef,
640    AsMut,
641    Deref,
642    DerefMut,
643    TrivialType,
644)]
645#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
646#[repr(C)]
647pub struct ShardCommitmentHash([u8; ShardCommitmentHash::SIZE]);
648
649impl fmt::Display for ShardCommitmentHash {
650    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
651        for byte in self.0 {
652            write!(f, "{byte:02x}")?;
653        }
654        Ok(())
655    }
656}
657
658#[cfg(feature = "serde")]
659#[derive(Serialize, Deserialize)]
660#[serde(transparent)]
661struct ShardCommitmentHashBinary([u8; ShardCommitmentHash::SIZE]);
662
663#[cfg(feature = "serde")]
664#[derive(Serialize, Deserialize)]
665#[serde(transparent)]
666struct ShardCommitmentHashHex(#[serde(with = "hex")] [u8; ShardCommitmentHash::SIZE]);
667
668#[cfg(feature = "serde")]
669impl Serialize for ShardCommitmentHash {
670    #[inline]
671    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
672    where
673        S: Serializer,
674    {
675        if serializer.is_human_readable() {
676            ShardCommitmentHashHex(self.0).serialize(serializer)
677        } else {
678            ShardCommitmentHashBinary(self.0).serialize(serializer)
679        }
680    }
681}
682
683#[cfg(feature = "serde")]
684impl<'de> Deserialize<'de> for ShardCommitmentHash {
685    #[inline]
686    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
687    where
688        D: Deserializer<'de>,
689    {
690        Ok(Self(if deserializer.is_human_readable() {
691            ShardCommitmentHashHex::deserialize(deserializer)?.0
692        } else {
693            ShardCommitmentHashBinary::deserialize(deserializer)?.0
694        }))
695    }
696}
697
698impl fmt::Debug for ShardCommitmentHash {
699    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
700        for byte in self.0 {
701            write!(f, "{byte:02x}")?;
702        }
703        Ok(())
704    }
705}
706
707impl AsRef<[u8]> for ShardCommitmentHash {
708    #[inline(always)]
709    fn as_ref(&self) -> &[u8] {
710        &self.0
711    }
712}
713
714impl AsMut<[u8]> for ShardCommitmentHash {
715    #[inline(always)]
716    fn as_mut(&mut self) -> &mut [u8] {
717        &mut self.0
718    }
719}
720
721impl From<Hash> for ShardCommitmentHash {
722    #[inline(always)]
723    fn from(value: Hash) -> Self {
724        let bytes = value.as_bytes();
725        Self(*bytes)
726        // Self([
727        //     bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
728        // ])
729    }
730}
731
732impl ShardCommitmentHash {
733    // TODO: Reduce to 8 bytes once Merkle Tree implementation exists that produces such hashes
734    /// Size in bytes
735    pub const SIZE: usize = 32;
736
737    /// Create a new instance
738    #[inline(always)]
739    pub const fn new(hash: [u8; Self::SIZE]) -> Self {
740        Self(hash)
741    }
742
743    /// Get internal representation
744    #[inline(always)]
745    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
746        &self.0
747    }
748
749    /// Convenient conversion from slice of underlying representation for efficiency purposes
750    #[inline(always)]
751    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
752        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
753        // layout
754        unsafe { mem::transmute(value) }
755    }
756
757    /// Convenient conversion from array of underlying representation for efficiency purposes
758    #[inline(always)]
759    pub const fn array_from_repr<const N: usize>(value: [[u8; Self::SIZE]; N]) -> [Self; N] {
760        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
761        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
762        // layout
763        unsafe { mem::transmute_copy(&value) }
764    }
765
766    /// Convenient conversion to a slice of underlying representation for efficiency purposes
767    #[inline(always)]
768    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
769        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
770        // layout
771        unsafe { mem::transmute(value) }
772    }
773
774    /// Convenient conversion to an array of underlying representation for efficiency purposes
775    #[inline(always)]
776    pub const fn repr_from_array<const N: usize>(value: [Self; N]) -> [[u8; Self::SIZE]; N] {
777        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
778        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
779        // layout
780        unsafe { mem::transmute_copy(&value) }
781    }
782}
783
784/// Information about shard commitments in the solution
785#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
786#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
787#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
788#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
789#[repr(C)]
790pub struct SolutionShardCommitment {
791    /// Root of the Merkle Tree of shard commitments
792    pub root: ShardCommitmentHash,
793    /// Proof for the shard commitment used the solution
794    pub proof: [ShardCommitmentHash; SolutionShardCommitment::NUM_LEAVES.ilog2() as usize],
795    /// Shard commitment leaf used for the solution
796    pub leaf: ShardCommitmentHash,
797}
798
799impl SolutionShardCommitment {
800    /// Number of leaves in a Merkle Tree of shard commitments
801    pub const NUM_LEAVES: usize = 2_u32.pow(20) as usize;
802}
803
804/// Farmer solution for slot challenge.
805#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
806#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
807#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
808#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
809#[repr(C)]
810pub struct Solution {
811    /// Public key of the farmer that created the solution
812    pub public_key_hash: Blake3Hash,
813    /// Farmer's shard commitment
814    pub shard_commitment: SolutionShardCommitment,
815    /// Local segment index of the piece
816    pub piece_local_segment_index: LocalSegmentIndex,
817    /// Super segment index
818    pub piece_super_segment_index: SuperSegmentIndex,
819    /// Segment root
820    pub segment_root: SegmentRoot,
821    /// Segment proof
822    pub segment_proof: SegmentProof,
823    /// Record root that can use used to verify that the piece was included in blockchain history
824    pub record_root: RecordRoot,
825    /// Proof that the record (root) belongs to a segment
826    pub record_proof: RecordProof,
827    /// Chunk at the below piece offset
828    pub chunk: RecordChunk,
829    /// Proof for the above chunk
830    pub chunk_proof: ChunkProof,
831    /// Proof of space for piece offset
832    pub proof_of_space: PosProof,
833    /// Size of the blockchain history at the time of sector creation
834    pub history_size: HistorySize,
835    /// Index of the sector where the solution was found
836    pub sector_index: SectorIndex,
837    /// Pieces offset within sector
838    pub piece_offset: PieceOffset,
839    /// Position of the segment in the super segment
840    pub segment_position: SegmentPosition,
841    /// Shard index on which the piece was archived
842    pub piece_shard_index: ShardIndex,
843    /// Padding for data structure alignment
844    pub padding: [u8; 4],
845}
846
847impl Solution {
848    /// Fake solution for the genesis block
849    pub fn genesis_solution() -> Self {
850        Self {
851            public_key_hash: Ed25519PublicKey::default().hash(),
852            shard_commitment: SolutionShardCommitment {
853                root: Default::default(),
854                proof: [Default::default(); _],
855                leaf: Default::default(),
856            },
857            piece_local_segment_index: LocalSegmentIndex::ZERO,
858            piece_super_segment_index: SuperSegmentIndex::ZERO,
859            segment_root: SegmentRoot::default(),
860            segment_proof: SegmentProof::default(),
861            record_root: RecordRoot::default(),
862            record_proof: RecordProof::default(),
863            chunk: RecordChunk::default(),
864            chunk_proof: ChunkProof::default(),
865            proof_of_space: PosProof::default(),
866            history_size: HistorySize::from(SegmentIndex::ZERO),
867            sector_index: SectorIndex::ZERO,
868            piece_offset: PieceOffset::default(),
869            segment_position: SegmentPosition::default(),
870            piece_shard_index: ShardIndex::BEACON_CHAIN,
871            padding: [0; _],
872        }
873    }
874
875    /// Check solution validity
876    pub fn verify_full<PotVerifier>(
877        &self,
878        slot: SlotNumber,
879        params: &SolutionVerifyFullParams,
880    ) -> Result<(), SolutionVerifyError>
881    where
882        PotVerifier: SolutionPotVerifier,
883    {
884        let sector_id = SectorId::new(
885            &self.public_key_hash,
886            &self.shard_commitment.root,
887            self.sector_index,
888            self.history_size,
889        );
890
891        self.verify_stateless_inner::<PotVerifier>(&sector_id, slot, &params.stateless)?;
892
893        self.verify_piece_inner(&sector_id, &params.piece)
894    }
895
896    /// Stateless solution verification.
897    ///
898    /// Checks most things, except checking that the piece belongs to the global history.
899    ///
900    /// For piece verification use [`Self::verify_piece()`] or call [`Self::verify_full()`] for more
901    /// efficient verification of both at once.
902    pub fn verify_stateless<PotVerifier>(
903        &self,
904        slot: SlotNumber,
905        params: &SolutionVerifyStatelessParams,
906    ) -> Result<(), SolutionVerifyError>
907    where
908        PotVerifier: SolutionPotVerifier,
909    {
910        let sector_id = SectorId::new(
911            &self.public_key_hash,
912            &self.shard_commitment.root,
913            self.sector_index,
914            self.history_size,
915        );
916
917        self.verify_stateless_inner::<PotVerifier>(&sector_id, slot, params)
918    }
919
920    fn verify_stateless_inner<PotVerifier>(
921        &self,
922        sector_id: &SectorId,
923        slot: SlotNumber,
924        params: &SolutionVerifyStatelessParams,
925    ) -> Result<(), SolutionVerifyError>
926    where
927        PotVerifier: SolutionPotVerifier,
928    {
929        let SolutionVerifyStatelessParams {
930            shard_index,
931            proof_of_time,
932            solution_range,
933            shard_membership_entropy,
934            num_shards,
935        } = params;
936
937        let shard_kind = shard_index
938            .shard_kind()
939            .and_then(|shard_kind| shard_kind.to_real())
940            .ok_or(SolutionVerifyError::InvalidInputShard {
941                shard_index: *shard_index,
942                shard_kind: shard_index.shard_kind(),
943            })?;
944
945        let (solution_shard_index, shard_commitment_index) = num_shards
946            .derive_shard_index_and_shard_commitment_index(
947                &self.public_key_hash,
948                &self.shard_commitment.root,
949                shard_membership_entropy,
950                self.history_size,
951            );
952
953        // Adjust solution range according to shard kind
954        let solution_range = match shard_kind {
955            RealShardKind::BeaconChain => *solution_range,
956            RealShardKind::IntermediateShard => {
957                if solution_shard_index.parent_shard() != Some(*shard_index) {
958                    return Err(SolutionVerifyError::InvalidSolutionShard {
959                        solution_shard_index,
960                        solution_parent_shard_index: solution_shard_index.parent_shard(),
961                        expected_shard_index: *shard_index,
962                        expected_shard_kind: RealShardKind::IntermediateShard,
963                    });
964                }
965
966                solution_range.to_intermediate_shard(*num_shards)
967            }
968            RealShardKind::LeafShard => {
969                if solution_shard_index != *shard_index {
970                    return Err(SolutionVerifyError::InvalidSolutionShard {
971                        solution_shard_index,
972                        solution_parent_shard_index: solution_shard_index.parent_shard(),
973                        expected_shard_index: *shard_index,
974                        expected_shard_kind: RealShardKind::LeafShard,
975                    });
976                }
977
978                solution_range.to_leaf_shard(*num_shards)
979            }
980        };
981
982        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
983        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
984        //  to succeed.
985        const {
986            assert!(SolutionShardCommitment::NUM_LEAVES == 1048576);
987        }
988        if !BalancedMerkleTree::<1048576>::verify(
989            &self.shard_commitment.root,
990            &ShardCommitmentHash::repr_from_array(self.shard_commitment.proof),
991            shard_commitment_index as usize,
992            *self.shard_commitment.leaf,
993        ) {
994            return Err(SolutionVerifyError::InvalidShardCommitment);
995        }
996
997        let global_challenge = proof_of_time.derive_global_challenge(slot);
998        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
999        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
1000
1001        // Check that proof of space is valid
1002        if !PotVerifier::is_proof_valid(
1003            &sector_id.derive_evaluation_seed(self.piece_offset),
1004            s_bucket_audit_index,
1005            &self.proof_of_space,
1006        ) {
1007            return Err(SolutionVerifyError::InvalidProofOfSpace);
1008        };
1009
1010        let masked_chunk =
1011            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
1012
1013        let solution_distance =
1014            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
1015
1016        if !solution_distance.is_within(solution_range) {
1017            return Err(SolutionVerifyError::OutsideSolutionRange {
1018                solution_range,
1019                solution_distance,
1020            });
1021        }
1022
1023        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
1024        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
1025        //  to succeed.
1026        const {
1027            assert!(Record::NUM_S_BUCKETS == 65536);
1028        }
1029        // Check that chunk belongs to the record
1030        if !BalancedMerkleTree::<65536>::verify(
1031            &self.record_root,
1032            &self.chunk_proof,
1033            usize::from(s_bucket_audit_index),
1034            *self.chunk,
1035        ) {
1036            return Err(SolutionVerifyError::InvalidChunkProof);
1037        }
1038
1039        Ok(())
1040    }
1041
1042    /// Verify the piece details of the solution
1043    pub fn verify_piece(
1044        &self,
1045        piece_check_params: &SolutionVerifyPieceParams,
1046    ) -> Result<(), SolutionVerifyError> {
1047        let sector_id = SectorId::new(
1048            &self.public_key_hash,
1049            &self.shard_commitment.root,
1050            self.sector_index,
1051            self.history_size,
1052        );
1053
1054        self.verify_piece_inner(&sector_id, piece_check_params)
1055    }
1056
1057    fn verify_piece_inner(
1058        &self,
1059        sector_id: &SectorId,
1060        piece_check_params: &SolutionVerifyPieceParams,
1061    ) -> Result<(), SolutionVerifyError> {
1062        let SolutionVerifyPieceParams {
1063            max_pieces_in_sector,
1064            super_segment_root,
1065            num_segments,
1066            recent_segments,
1067            recent_history_fraction,
1068            min_sector_lifetime,
1069            current_history_size,
1070            sector_expiration_check_super_segment_root,
1071        } = piece_check_params;
1072
1073        if &self.history_size > current_history_size {
1074            return Err(SolutionVerifyError::FutureHistorySize {
1075                current: *current_history_size,
1076                solution: self.history_size,
1077            });
1078        }
1079
1080        if u16::from(self.piece_offset) >= *max_pieces_in_sector {
1081            return Err(SolutionVerifyError::InvalidPieceOffset {
1082                piece_offset: u16::from(self.piece_offset),
1083                max_pieces_in_sector: *max_pieces_in_sector,
1084            });
1085        }
1086
1087        if let Some(sector_expiration_check_super_segment_root) =
1088            sector_expiration_check_super_segment_root
1089        {
1090            let expiration_history_size = match sector_id.derive_expiration_history_size(
1091                self.history_size,
1092                sector_expiration_check_super_segment_root,
1093                *min_sector_lifetime,
1094            ) {
1095                Some(expiration_history_size) => expiration_history_size,
1096                None => {
1097                    return Err(SolutionVerifyError::InvalidHistorySize);
1098                }
1099            };
1100
1101            if expiration_history_size <= *current_history_size {
1102                return Err(SolutionVerifyError::SectorExpired {
1103                    expiration_history_size,
1104                    current_history_size: *current_history_size,
1105                });
1106            }
1107        }
1108
1109        let position = sector_id
1110            .derive_piece_index(
1111                self.piece_offset,
1112                self.history_size,
1113                *max_pieces_in_sector,
1114                *recent_segments,
1115                *recent_history_fraction,
1116            )
1117            .position();
1118
1119        // Check that record belongs to the segment
1120        if !self
1121            .record_root
1122            .is_valid(&self.segment_root, &self.record_proof, position)
1123        {
1124            return Err(SolutionVerifyError::RecordNotInSegment);
1125        }
1126
1127        // Check that segment belongs to the super segment (global history)
1128        if !self.segment_root.is_valid(
1129            self.piece_shard_index,
1130            self.piece_local_segment_index,
1131            self.segment_position,
1132            &self.segment_proof,
1133            *num_segments,
1134            super_segment_root,
1135        ) {
1136            return Err(SolutionVerifyError::SegmentNotInSuperSegment);
1137        }
1138
1139        Ok(())
1140    }
1141}