Skip to main content

ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::ed25519::Ed25519PublicKey;
5use crate::hashes::Blake3Hash;
6use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot, SegmentProof};
7use crate::pos::{PosProof, PosSeed};
8use crate::pot::{PotOutput, SlotNumber};
9use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
10use crate::segments::{
11    HistorySize, LocalSegmentIndex, SegmentIndex, SegmentPosition, SegmentRoot, SuperSegmentIndex,
12    SuperSegmentRoot,
13};
14use crate::shard::{NumShards, RealShardKind, ShardIndex, ShardKind};
15use ab_blake3::single_block_keyed_hash;
16use ab_io_type::trivial_type::TrivialType;
17use ab_merkle_tree::balanced::BalancedMerkleTree;
18use blake3::{Hash, OUT_LEN};
19use core::simd::Simd;
20use core::{fmt, mem};
21use derive_more::{
22    Add, AddAssign, AsMut, AsRef, Deref, DerefMut, Display, From, Into, Sub, SubAssign,
23};
24#[cfg(feature = "scale-codec")]
25use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
26#[cfg(feature = "serde")]
27use serde::{Deserialize, Serialize};
28#[cfg(feature = "serde")]
29use serde::{Deserializer, Serializer};
30#[cfg(feature = "serde")]
31use serde_big_array::BigArray;
32
33/// Solution distance
34#[derive(
35    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
36)]
37#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
38#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
39#[repr(C)]
40pub struct SolutionDistance(u64);
41
42impl SolutionDistance {
43    /// Maximum value
44    pub const MAX: Self = Self(u64::MAX / 2);
45
46    // TODO: Remove once `From` is stable
47    /// Create a new instance
48    #[inline(always)]
49    pub const fn from_u64(n: u64) -> Self {
50        Self(n)
51    }
52
53    /// Calculate solution distance for given parameters.
54    ///
55    /// Typically used as a primitive to check whether solution distance is within solution range
56    /// (see [`Self::is_within()`]).
57    pub fn calculate(
58        global_challenge: &Blake3Hash,
59        chunk: &[u8; 32],
60        sector_slot_challenge: &SectorSlotChallenge,
61    ) -> Self {
62        // TODO: Is keyed hash really needed here?
63        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
64            .expect("Less than a single block worth of bytes; qed");
65        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
66            audit_chunk[0],
67            audit_chunk[1],
68            audit_chunk[2],
69            audit_chunk[3],
70            audit_chunk[4],
71            audit_chunk[5],
72            audit_chunk[6],
73            audit_chunk[7],
74        ]);
75        let global_challenge_as_solution_range: SolutionRange =
76            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
77
78        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
79    }
80
81    /// Check if solution distance is within the provided solution range
82    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
83        self.0 <= u64::from(solution_range) / 2
84    }
85}
86
87/// Solution range
88#[derive(
89    Debug,
90    Display,
91    Default,
92    Copy,
93    Clone,
94    Ord,
95    PartialOrd,
96    Eq,
97    PartialEq,
98    Hash,
99    Add,
100    AddAssign,
101    Sub,
102    SubAssign,
103    TrivialType,
104)]
105#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
106#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
107#[repr(C)]
108pub struct SolutionRange(u64);
109
110impl const From<u64> for SolutionRange {
111    #[inline(always)]
112    fn from(value: u64) -> Self {
113        Self(value)
114    }
115}
116
117impl const From<SolutionRange> for u64 {
118    #[inline(always)]
119    fn from(value: SolutionRange) -> Self {
120        value.0
121    }
122}
123
124impl SolutionRange {
125    /// Size in bytes
126    pub const SIZE: usize = size_of::<u64>();
127    /// Minimum value
128    pub const MIN: Self = Self(u64::MIN);
129    /// Maximum value
130    pub const MAX: Self = Self(u64::MAX);
131
132    /// Create a new instance from bytes
133    #[inline(always)]
134    pub fn to_bytes(self) -> [u8; 8] {
135        self.0.to_le_bytes()
136    }
137
138    /// Create a new instance from bytes
139    #[inline(always)]
140    pub fn from_bytes(bytes: [u8; 8]) -> Self {
141        Self(u64::from_le_bytes(bytes))
142    }
143
144    /// Computes the following:
145    /// ```text
146    /// MAX * slot_probability / chunks * s_buckets / pieces
147    /// ```
148    #[inline]
149    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
150        let solution_range = u64::MAX
151            // Account for slot probability
152            / slot_probability.1 * slot_probability.0
153            // Now take the probability of hitting occupied s-bucket in a piece into account
154            / Record::NUM_CHUNKS as u64
155            * Record::NUM_S_BUCKETS as u64;
156
157        // Take the number of pieces into account
158        Self(solution_range / pieces)
159    }
160
161    /// Computes the following:
162    /// ```text
163    /// MAX * slot_probability / chunks * s_buckets / solution_range
164    /// ```
165    #[inline]
166    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
167        let pieces = u64::MAX
168            // Account for slot probability
169            / slot_probability.1 * slot_probability.0
170            // Now take the probability of hitting occupied s-bucket in sector into account
171            / Record::NUM_CHUNKS as u64
172            * Record::NUM_S_BUCKETS as u64;
173
174        // Take solution range into account
175        pieces / self.0
176    }
177
178    /// Expands the global solution range to a solution range that corresponds to a leaf shard.
179    ///
180    /// Global solution range is updated based on the beacon chain information, while a farmer also
181    /// creates intermediate shard and leaf shard solutions with a wider solution range.
182    #[inline]
183    pub const fn to_leaf_shard(self, num_shards: NumShards) -> Self {
184        Self(
185            self.0
186                .saturating_mul(u64::from(num_shards.leaf_shards().get())),
187        )
188    }
189
190    /// Expands the global solution range to a solution range that corresponds to an intermediate
191    /// shard
192    #[inline]
193    pub const fn to_intermediate_shard(self, num_shards: NumShards) -> Self {
194        Self(
195            self.0
196                .saturating_mul(u64::from(num_shards.intermediate_shards().get())),
197        )
198    }
199
200    /// Bidirectional distance between two solution ranges
201    #[inline]
202    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
203        let a = self.0;
204        let b = other.0;
205        let diff = a.wrapping_sub(b);
206        let diff2 = b.wrapping_sub(a);
207        // Find smaller diff between 2 directions
208        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
209    }
210
211    /// Derives next solution range
212    #[inline]
213    pub fn derive_next(
214        self,
215        slots_in_last_interval: SlotNumber,
216        slot_probability: (u64, u64),
217        retarget_interval: BlockNumber,
218    ) -> Self {
219        // The idea here is to keep block production at the same pace while space pledged on the
220        // network changes. For this, we adjust the previous solution range according to actual and
221        // expected number of blocks per retarget interval.
222        //
223        // Below is code analogous to the following, but without using floats:
224        // ```rust
225        // let actual_slots_per_block = slots_in_last_interval as f64 / retarget_interval as f64;
226        // let expected_slots_per_block =
227        //     slot_probability.1 as f64 / slot_probability.0 as f64;
228        // let adjustment_factor =
229        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
230        //
231        // next_solution_range =
232        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
233        // ```
234        let current_solution_range = self.0;
235        let next_solution_range = u64::try_from(
236            u128::from(current_solution_range)
237                .saturating_mul(u128::from(slots_in_last_interval))
238                .saturating_mul(u128::from(slot_probability.0))
239                / u128::from(u64::from(retarget_interval))
240                / u128::from(slot_probability.1),
241        );
242
243        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
244            current_solution_range / 4,
245            current_solution_range.saturating_mul(4),
246        ))
247    }
248}
249
250// Quick test to ensure the functions above are the inverse of each other
251const {
252    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
253    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
254    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
255}
256
257/// Proof for chunk contained within a record.
258#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
259#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
260#[repr(C)]
261pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
262
263impl fmt::Debug for ChunkProof {
264    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
265        write!(f, "[")?;
266        for hash in self.0 {
267            for byte in hash {
268                write!(f, "{byte:02x}")?;
269            }
270            write!(f, ", ")?;
271        }
272        write!(f, "]")?;
273        Ok(())
274    }
275}
276
277#[cfg(feature = "serde")]
278#[derive(Serialize, Deserialize)]
279#[serde(transparent)]
280struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
281
282#[cfg(feature = "serde")]
283#[derive(Serialize, Deserialize)]
284#[serde(transparent)]
285struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
286
287#[cfg(feature = "serde")]
288#[derive(Serialize, Deserialize)]
289#[serde(transparent)]
290struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
291
292#[cfg(feature = "serde")]
293impl Serialize for ChunkProof {
294    #[inline]
295    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
296    where
297        S: Serializer,
298    {
299        if serializer.is_human_readable() {
300            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
301            // same memory layout
302            ChunkProofHex(unsafe {
303                mem::transmute::<
304                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
305                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
306                >(self.0)
307            })
308            .serialize(serializer)
309        } else {
310            ChunkProofBinary(self.0).serialize(serializer)
311        }
312    }
313}
314
315#[cfg(feature = "serde")]
316impl<'de> Deserialize<'de> for ChunkProof {
317    #[inline]
318    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
319    where
320        D: Deserializer<'de>,
321    {
322        Ok(Self(if deserializer.is_human_readable() {
323            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
324            // same memory layout
325            unsafe {
326                mem::transmute::<
327                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
328                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
329                >(ChunkProofHex::deserialize(deserializer)?.0)
330            }
331        } else {
332            ChunkProofBinary::deserialize(deserializer)?.0
333        }))
334    }
335}
336
337impl Default for ChunkProof {
338    #[inline]
339    fn default() -> Self {
340        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
341    }
342}
343
344impl AsRef<[u8]> for ChunkProof {
345    #[inline]
346    fn as_ref(&self) -> &[u8] {
347        self.0.as_flattened()
348    }
349}
350
351impl AsMut<[u8]> for ChunkProof {
352    #[inline]
353    fn as_mut(&mut self) -> &mut [u8] {
354        self.0.as_flattened_mut()
355    }
356}
357
358impl ChunkProof {
359    /// Size of chunk proof in bytes.
360    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
361    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
362}
363
364/// Solution verification errors
365#[derive(Debug, Eq, PartialEq, thiserror::Error)]
366pub enum SolutionVerifyError {
367    /// Invalid piece offset
368    #[error("Piece verification failed")]
369    InvalidPieceOffset {
370        /// Index of the piece that failed verification
371        piece_offset: u16,
372        /// How many pieces one sector is supposed to contain (max)
373        max_pieces_in_sector: u16,
374    },
375    /// History size is in the future
376    #[error("History size {solution} is in the future, current is {current}")]
377    FutureHistorySize {
378        /// Current history size
379        current: HistorySize,
380        /// History size solution was created for
381        solution: HistorySize,
382    },
383    /// Sector expired
384    #[error("Sector expired")]
385    SectorExpired {
386        /// Expiration history size
387        expiration_history_size: HistorySize,
388        /// Current history size
389        current_history_size: HistorySize,
390    },
391    /// Record does not belong to the segment
392    #[error("Record does not belong to the segment")]
393    RecordNotInSegment,
394    /// Segment doesn't belong to the super segment
395    #[error("Segment doesn't belong to the super segment")]
396    SegmentNotInSuperSegment,
397    /// Solution is outside the solution range
398    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
399    OutsideSolutionRange {
400        /// Solution range
401        solution_range: SolutionRange,
402        /// Solution distance
403        solution_distance: SolutionDistance,
404    },
405    /// Invalid proof of space
406    #[error("Invalid proof of space")]
407    InvalidProofOfSpace,
408    /// Invalid shard commitment
409    #[error("Invalid shard commitment")]
410    InvalidShardCommitment,
411    /// Invalid input shard
412    #[error("Invalid input shard {shard_index} ({shard_kind:?})")]
413    InvalidInputShard {
414        /// Input shard index
415        shard_index: ShardIndex,
416        /// Input shard kind
417        shard_kind: Option<ShardKind>,
418    },
419    /// Invalid solution shard
420    #[error(
421        "Invalid solution shard {solution_shard_index} (parent {solution_parent_shard_index:?}), \
422        expected shard {expected_shard_index} ({expected_shard_kind:?})"
423    )]
424    InvalidSolutionShard {
425        /// Solution shard index
426        solution_shard_index: ShardIndex,
427        /// Solution shard index
428        solution_parent_shard_index: Option<ShardIndex>,
429        /// Expected shard index
430        expected_shard_index: ShardIndex,
431        /// Expected shard kind
432        expected_shard_kind: RealShardKind,
433    },
434    /// Invalid chunk proof
435    #[error("Invalid chunk proof")]
436    InvalidChunkProof,
437    /// Invalid history size
438    #[error("Invalid history size")]
439    InvalidHistorySize,
440}
441
442/// Parameters for stateless solution verification.
443///
444/// These only include the information already contained in the block itself, meaning verification
445/// of different blocks can be done concurrently.
446#[derive(Debug, Clone)]
447#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
448pub struct SolutionVerifyStatelessParams {
449    /// Shard for which the solution is built
450    pub shard_index: ShardIndex,
451    /// Proof of time for which solution is built
452    pub proof_of_time: PotOutput,
453    /// Solution range
454    pub solution_range: SolutionRange,
455    /// Shard membership entropy
456    pub shard_membership_entropy: ShardMembershipEntropy,
457    /// The number of shards in the network
458    pub num_shards: NumShards,
459}
460
461/// Parameters for checking piece validity used in a solution
462#[derive(Debug, Clone)]
463#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
464pub struct SolutionVerifyPieceParams {
465    /// How many pieces one sector is supposed to contain (max)
466    pub max_pieces_in_sector: u16,
467    /// Super segment root of the segment to which piece belongs
468    pub super_segment_root: SuperSegmentRoot,
469    /// Number of segments in the super segment
470    pub num_segments: u32,
471    /// Number of latest archived segments that are considered "recent history"
472    pub recent_segments: HistorySize,
473    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
474    pub recent_history_fraction: (HistorySize, HistorySize),
475    /// Minimum lifetime of a plotted sector, measured in archived segments
476    pub min_sector_lifetime: HistorySize,
477    /// Current size of the history
478    pub current_history_size: HistorySize,
479    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
480    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
481}
482
483/// Parameters for full solution verification
484#[derive(Debug, Clone)]
485#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
486pub struct SolutionVerifyFullParams {
487    /// Parameters for stateless solution verification
488    pub stateless: SolutionVerifyStatelessParams,
489    /// Parameters for checking piece validity used in a solution
490    pub piece: SolutionVerifyPieceParams,
491}
492
493/// Proof-of-time verifier to be used in [`Solution::verify_full()`]
494pub trait SolutionPotVerifier {
495    /// Check whether proof created earlier is valid
496    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
497}
498
499/// Entropy used for shard membership assignment
500#[derive(
501    Default,
502    Copy,
503    Clone,
504    Eq,
505    PartialEq,
506    Ord,
507    PartialOrd,
508    Hash,
509    From,
510    Into,
511    AsRef,
512    AsMut,
513    Deref,
514    DerefMut,
515    TrivialType,
516)]
517#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
518#[repr(C)]
519pub struct ShardMembershipEntropy([u8; ShardMembershipEntropy::SIZE]);
520
521impl fmt::Display for ShardMembershipEntropy {
522    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
523        for byte in self.0 {
524            write!(f, "{byte:02x}")?;
525        }
526        Ok(())
527    }
528}
529
530#[cfg(feature = "serde")]
531#[derive(Serialize, Deserialize)]
532#[serde(transparent)]
533struct ShardMembershipEntropyBinary([u8; ShardMembershipEntropy::SIZE]);
534
535#[cfg(feature = "serde")]
536#[derive(Serialize, Deserialize)]
537#[serde(transparent)]
538struct ShardMembershipEntropyHex(#[serde(with = "hex")] [u8; ShardMembershipEntropy::SIZE]);
539
540#[cfg(feature = "serde")]
541impl Serialize for ShardMembershipEntropy {
542    #[inline]
543    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
544    where
545        S: Serializer,
546    {
547        if serializer.is_human_readable() {
548            ShardMembershipEntropyHex(self.0).serialize(serializer)
549        } else {
550            ShardMembershipEntropyBinary(self.0).serialize(serializer)
551        }
552    }
553}
554
555#[cfg(feature = "serde")]
556impl<'de> Deserialize<'de> for ShardMembershipEntropy {
557    #[inline]
558    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
559    where
560        D: Deserializer<'de>,
561    {
562        Ok(Self(if deserializer.is_human_readable() {
563            ShardMembershipEntropyHex::deserialize(deserializer)?.0
564        } else {
565            ShardMembershipEntropyBinary::deserialize(deserializer)?.0
566        }))
567    }
568}
569
570impl fmt::Debug for ShardMembershipEntropy {
571    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
572        for byte in self.0 {
573            write!(f, "{byte:02x}")?;
574        }
575        Ok(())
576    }
577}
578
579impl AsRef<[u8]> for ShardMembershipEntropy {
580    #[inline(always)]
581    fn as_ref(&self) -> &[u8] {
582        &self.0
583    }
584}
585
586impl AsMut<[u8]> for ShardMembershipEntropy {
587    #[inline(always)]
588    fn as_mut(&mut self) -> &mut [u8] {
589        &mut self.0
590    }
591}
592
593impl ShardMembershipEntropy {
594    /// Size in bytes
595    pub const SIZE: usize = PotOutput::SIZE;
596
597    /// Create a new instance
598    #[inline(always)]
599    pub const fn new(bytes: [u8; Self::SIZE]) -> Self {
600        Self(bytes)
601    }
602
603    /// Get internal representation
604    #[inline(always)]
605    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
606        &self.0
607    }
608
609    /// Convenient conversion from slice of underlying representation for efficiency purposes
610    #[inline(always)]
611    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
612        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
613        // layout
614        unsafe { mem::transmute(value) }
615    }
616
617    /// Convenient conversion to slice of underlying representation for efficiency purposes
618    #[inline(always)]
619    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
620        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
621        // layout
622        unsafe { mem::transmute(value) }
623    }
624}
625
626/// Reduced hash used for shard assignment
627#[derive(
628    Default,
629    Copy,
630    Clone,
631    Eq,
632    PartialEq,
633    Ord,
634    PartialOrd,
635    Hash,
636    From,
637    Into,
638    AsRef,
639    AsMut,
640    Deref,
641    DerefMut,
642    TrivialType,
643)]
644#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
645#[repr(C)]
646pub struct ShardCommitmentHash([u8; ShardCommitmentHash::SIZE]);
647
648impl fmt::Display for ShardCommitmentHash {
649    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
650        for byte in self.0 {
651            write!(f, "{byte:02x}")?;
652        }
653        Ok(())
654    }
655}
656
657#[cfg(feature = "serde")]
658#[derive(Serialize, Deserialize)]
659#[serde(transparent)]
660struct ShardCommitmentHashBinary([u8; ShardCommitmentHash::SIZE]);
661
662#[cfg(feature = "serde")]
663#[derive(Serialize, Deserialize)]
664#[serde(transparent)]
665struct ShardCommitmentHashHex(#[serde(with = "hex")] [u8; ShardCommitmentHash::SIZE]);
666
667#[cfg(feature = "serde")]
668impl Serialize for ShardCommitmentHash {
669    #[inline]
670    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
671    where
672        S: Serializer,
673    {
674        if serializer.is_human_readable() {
675            ShardCommitmentHashHex(self.0).serialize(serializer)
676        } else {
677            ShardCommitmentHashBinary(self.0).serialize(serializer)
678        }
679    }
680}
681
682#[cfg(feature = "serde")]
683impl<'de> Deserialize<'de> for ShardCommitmentHash {
684    #[inline]
685    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
686    where
687        D: Deserializer<'de>,
688    {
689        Ok(Self(if deserializer.is_human_readable() {
690            ShardCommitmentHashHex::deserialize(deserializer)?.0
691        } else {
692            ShardCommitmentHashBinary::deserialize(deserializer)?.0
693        }))
694    }
695}
696
697impl fmt::Debug for ShardCommitmentHash {
698    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
699        for byte in self.0 {
700            write!(f, "{byte:02x}")?;
701        }
702        Ok(())
703    }
704}
705
706impl AsRef<[u8]> for ShardCommitmentHash {
707    #[inline(always)]
708    fn as_ref(&self) -> &[u8] {
709        &self.0
710    }
711}
712
713impl AsMut<[u8]> for ShardCommitmentHash {
714    #[inline(always)]
715    fn as_mut(&mut self) -> &mut [u8] {
716        &mut self.0
717    }
718}
719
720impl From<Hash> for ShardCommitmentHash {
721    #[inline(always)]
722    fn from(value: Hash) -> Self {
723        let bytes = value.as_bytes();
724        Self(*bytes)
725        // Self([
726        //     bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
727        // ])
728    }
729}
730
731impl ShardCommitmentHash {
732    // TODO: Reduce to 8 bytes once Merkle Tree implementation exists that produces such hashes
733    /// Size in bytes
734    pub const SIZE: usize = 32;
735
736    /// Create a new instance
737    #[inline(always)]
738    pub const fn new(hash: [u8; Self::SIZE]) -> Self {
739        Self(hash)
740    }
741
742    /// Get internal representation
743    #[inline(always)]
744    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
745        &self.0
746    }
747
748    /// Convenient conversion from slice of underlying representation for efficiency purposes
749    #[inline(always)]
750    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
751        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
752        // layout
753        unsafe { mem::transmute(value) }
754    }
755
756    /// Convenient conversion from array of underlying representation for efficiency purposes
757    #[inline(always)]
758    pub const fn array_from_repr<const N: usize>(value: [[u8; Self::SIZE]; N]) -> [Self; N] {
759        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
760        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
761        // layout
762        unsafe { mem::transmute_copy(&value) }
763    }
764
765    /// Convenient conversion to a slice of underlying representation for efficiency purposes
766    #[inline(always)]
767    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
768        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
769        // layout
770        unsafe { mem::transmute(value) }
771    }
772
773    /// Convenient conversion to an array of underlying representation for efficiency purposes
774    #[inline(always)]
775    pub const fn repr_from_array<const N: usize>(value: [Self; N]) -> [[u8; Self::SIZE]; N] {
776        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
777        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
778        // layout
779        unsafe { mem::transmute_copy(&value) }
780    }
781}
782
783/// Information about shard commitments in the solution
784#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
785#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
786#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
787#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
788#[repr(C)]
789pub struct SolutionShardCommitment {
790    /// Root of the Merkle Tree of shard commitments
791    pub root: ShardCommitmentHash,
792    /// Proof for the shard commitment used the solution
793    pub proof: [ShardCommitmentHash; SolutionShardCommitment::NUM_LEAVES.ilog2() as usize],
794    /// Shard commitment leaf used for the solution
795    pub leaf: ShardCommitmentHash,
796}
797
798impl SolutionShardCommitment {
799    /// Number of leaves in a Merkle Tree of shard commitments
800    pub const NUM_LEAVES: usize = 2_u32.pow(20) as usize;
801}
802
803/// Farmer solution for slot challenge.
804#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
805#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
806#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
807#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
808#[repr(C)]
809pub struct Solution {
810    /// Public key of the farmer that created the solution
811    pub public_key_hash: Blake3Hash,
812    /// Farmer's shard commitment
813    pub shard_commitment: SolutionShardCommitment,
814    /// Local segment index of the piece
815    pub piece_local_segment_index: LocalSegmentIndex,
816    /// Super segment index
817    pub piece_super_segment_index: SuperSegmentIndex,
818    /// Segment root
819    pub segment_root: SegmentRoot,
820    /// Segment proof
821    pub segment_proof: SegmentProof,
822    /// Record root that can use used to verify that the piece was included in blockchain history
823    pub record_root: RecordRoot,
824    /// Proof that the record (root) belongs to a segment
825    pub record_proof: RecordProof,
826    /// Chunk at the below piece offset
827    pub chunk: RecordChunk,
828    /// Proof for the above chunk
829    pub chunk_proof: ChunkProof,
830    /// Proof of space for piece offset
831    pub proof_of_space: PosProof,
832    /// Size of the blockchain history at the time of sector creation
833    pub history_size: HistorySize,
834    /// Index of the sector where the solution was found
835    pub sector_index: SectorIndex,
836    /// Pieces offset within sector
837    pub piece_offset: PieceOffset,
838    /// Position of the segment in the super segment
839    pub segment_position: SegmentPosition,
840    /// Shard index on which the piece was archived
841    pub piece_shard_index: ShardIndex,
842    /// Padding for data structure alignment
843    pub padding: [u8; 4],
844}
845
846impl Solution {
847    /// Fake solution for the genesis block
848    pub fn genesis_solution() -> Self {
849        Self {
850            public_key_hash: Ed25519PublicKey::default().hash(),
851            shard_commitment: SolutionShardCommitment {
852                root: Default::default(),
853                proof: [Default::default(); _],
854                leaf: Default::default(),
855            },
856            piece_local_segment_index: LocalSegmentIndex::ZERO,
857            piece_super_segment_index: SuperSegmentIndex::ZERO,
858            segment_root: SegmentRoot::default(),
859            segment_proof: SegmentProof::default(),
860            record_root: RecordRoot::default(),
861            record_proof: RecordProof::default(),
862            chunk: RecordChunk::default(),
863            chunk_proof: ChunkProof::default(),
864            proof_of_space: PosProof::default(),
865            history_size: HistorySize::from(SegmentIndex::ZERO),
866            sector_index: SectorIndex::ZERO,
867            piece_offset: PieceOffset::default(),
868            segment_position: SegmentPosition::default(),
869            piece_shard_index: ShardIndex::BEACON_CHAIN,
870            padding: [0; _],
871        }
872    }
873
874    /// Check solution validity
875    pub fn verify_full<PotVerifier>(
876        &self,
877        slot: SlotNumber,
878        params: &SolutionVerifyFullParams,
879    ) -> Result<(), SolutionVerifyError>
880    where
881        PotVerifier: SolutionPotVerifier,
882    {
883        let sector_id = SectorId::new(
884            &self.public_key_hash,
885            &self.shard_commitment.root,
886            self.sector_index,
887            self.history_size,
888        );
889
890        self.verify_stateless_inner::<PotVerifier>(&sector_id, slot, &params.stateless)?;
891
892        self.verify_piece_inner(&sector_id, &params.piece)
893    }
894
895    /// Stateless solution verification.
896    ///
897    /// Checks most things, except checking that the piece belongs to the global history.
898    ///
899    /// For piece verification use [`Self::verify_piece()`] or call [`Self::verify_full()`] for more
900    /// efficient verification of both at once.
901    pub fn verify_stateless<PotVerifier>(
902        &self,
903        slot: SlotNumber,
904        params: &SolutionVerifyStatelessParams,
905    ) -> Result<(), SolutionVerifyError>
906    where
907        PotVerifier: SolutionPotVerifier,
908    {
909        let sector_id = SectorId::new(
910            &self.public_key_hash,
911            &self.shard_commitment.root,
912            self.sector_index,
913            self.history_size,
914        );
915
916        self.verify_stateless_inner::<PotVerifier>(&sector_id, slot, params)
917    }
918
919    fn verify_stateless_inner<PotVerifier>(
920        &self,
921        sector_id: &SectorId,
922        slot: SlotNumber,
923        params: &SolutionVerifyStatelessParams,
924    ) -> Result<(), SolutionVerifyError>
925    where
926        PotVerifier: SolutionPotVerifier,
927    {
928        let SolutionVerifyStatelessParams {
929            shard_index,
930            proof_of_time,
931            solution_range,
932            shard_membership_entropy,
933            num_shards,
934        } = params;
935
936        let shard_kind = shard_index
937            .shard_kind()
938            .and_then(|shard_kind| shard_kind.to_real())
939            .ok_or(SolutionVerifyError::InvalidInputShard {
940                shard_index: *shard_index,
941                shard_kind: shard_index.shard_kind(),
942            })?;
943
944        let (solution_shard_index, shard_commitment_index) = num_shards
945            .derive_shard_index_and_shard_commitment_index(
946                &self.public_key_hash,
947                &self.shard_commitment.root,
948                shard_membership_entropy,
949                self.history_size,
950            );
951
952        // Adjust solution range according to shard kind
953        let solution_range = match shard_kind {
954            RealShardKind::BeaconChain => *solution_range,
955            RealShardKind::IntermediateShard => {
956                if solution_shard_index.parent_shard() != Some(*shard_index) {
957                    return Err(SolutionVerifyError::InvalidSolutionShard {
958                        solution_shard_index,
959                        solution_parent_shard_index: solution_shard_index.parent_shard(),
960                        expected_shard_index: *shard_index,
961                        expected_shard_kind: RealShardKind::IntermediateShard,
962                    });
963                }
964
965                solution_range.to_intermediate_shard(*num_shards)
966            }
967            RealShardKind::LeafShard => {
968                if solution_shard_index != *shard_index {
969                    return Err(SolutionVerifyError::InvalidSolutionShard {
970                        solution_shard_index,
971                        solution_parent_shard_index: solution_shard_index.parent_shard(),
972                        expected_shard_index: *shard_index,
973                        expected_shard_kind: RealShardKind::LeafShard,
974                    });
975                }
976
977                solution_range.to_leaf_shard(*num_shards)
978            }
979        };
980
981        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
982        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
983        //  to succeed.
984        const {
985            assert!(SolutionShardCommitment::NUM_LEAVES == 1048576);
986        }
987        if !BalancedMerkleTree::<1048576>::verify(
988            &self.shard_commitment.root,
989            &ShardCommitmentHash::repr_from_array(self.shard_commitment.proof),
990            shard_commitment_index as usize,
991            *self.shard_commitment.leaf,
992        ) {
993            return Err(SolutionVerifyError::InvalidShardCommitment);
994        }
995
996        let global_challenge = proof_of_time.derive_global_challenge(slot);
997        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
998        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
999
1000        // Check that proof of space is valid
1001        if !PotVerifier::is_proof_valid(
1002            &sector_id.derive_evaluation_seed(self.piece_offset),
1003            s_bucket_audit_index,
1004            &self.proof_of_space,
1005        ) {
1006            return Err(SolutionVerifyError::InvalidProofOfSpace);
1007        };
1008
1009        let masked_chunk =
1010            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
1011
1012        let solution_distance =
1013            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
1014
1015        if !solution_distance.is_within(solution_range) {
1016            return Err(SolutionVerifyError::OutsideSolutionRange {
1017                solution_range,
1018                solution_distance,
1019            });
1020        }
1021
1022        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
1023        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
1024        //  to succeed.
1025        const {
1026            assert!(Record::NUM_S_BUCKETS == 65536);
1027        }
1028        // Check that chunk belongs to the record
1029        if !BalancedMerkleTree::<65536>::verify(
1030            &self.record_root,
1031            &self.chunk_proof,
1032            usize::from(s_bucket_audit_index),
1033            *self.chunk,
1034        ) {
1035            return Err(SolutionVerifyError::InvalidChunkProof);
1036        }
1037
1038        Ok(())
1039    }
1040
1041    /// Verify the piece details of the solution
1042    pub fn verify_piece(
1043        &self,
1044        piece_check_params: &SolutionVerifyPieceParams,
1045    ) -> Result<(), SolutionVerifyError> {
1046        let sector_id = SectorId::new(
1047            &self.public_key_hash,
1048            &self.shard_commitment.root,
1049            self.sector_index,
1050            self.history_size,
1051        );
1052
1053        self.verify_piece_inner(&sector_id, piece_check_params)
1054    }
1055
1056    fn verify_piece_inner(
1057        &self,
1058        sector_id: &SectorId,
1059        piece_check_params: &SolutionVerifyPieceParams,
1060    ) -> Result<(), SolutionVerifyError> {
1061        let SolutionVerifyPieceParams {
1062            max_pieces_in_sector,
1063            super_segment_root,
1064            num_segments,
1065            recent_segments,
1066            recent_history_fraction,
1067            min_sector_lifetime,
1068            current_history_size,
1069            sector_expiration_check_segment_root,
1070        } = piece_check_params;
1071
1072        if &self.history_size > current_history_size {
1073            return Err(SolutionVerifyError::FutureHistorySize {
1074                current: *current_history_size,
1075                solution: self.history_size,
1076            });
1077        }
1078
1079        if u16::from(self.piece_offset) >= *max_pieces_in_sector {
1080            return Err(SolutionVerifyError::InvalidPieceOffset {
1081                piece_offset: u16::from(self.piece_offset),
1082                max_pieces_in_sector: *max_pieces_in_sector,
1083            });
1084        }
1085
1086        if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root {
1087            let expiration_history_size = match sector_id.derive_expiration_history_size(
1088                self.history_size,
1089                sector_expiration_check_segment_root,
1090                *min_sector_lifetime,
1091            ) {
1092                Some(expiration_history_size) => expiration_history_size,
1093                None => {
1094                    return Err(SolutionVerifyError::InvalidHistorySize);
1095                }
1096            };
1097
1098            if expiration_history_size <= *current_history_size {
1099                return Err(SolutionVerifyError::SectorExpired {
1100                    expiration_history_size,
1101                    current_history_size: *current_history_size,
1102                });
1103            }
1104        }
1105
1106        let position = sector_id
1107            .derive_piece_index(
1108                self.piece_offset,
1109                self.history_size,
1110                *max_pieces_in_sector,
1111                *recent_segments,
1112                *recent_history_fraction,
1113            )
1114            .position();
1115
1116        // Check that record belongs to the segment
1117        if !self
1118            .record_root
1119            .is_valid(&self.segment_root, &self.record_proof, position)
1120        {
1121            return Err(SolutionVerifyError::RecordNotInSegment);
1122        }
1123
1124        // Check that segment belongs to the super segment (global history)
1125        if !self.segment_root.is_valid(
1126            self.piece_shard_index,
1127            self.piece_local_segment_index,
1128            self.segment_position,
1129            &self.segment_proof,
1130            *num_segments,
1131            super_segment_root,
1132        ) {
1133            return Err(SolutionVerifyError::SegmentNotInSuperSegment);
1134        }
1135
1136        Ok(())
1137    }
1138}