Skip to main content

ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::ed25519::Ed25519PublicKey;
5use crate::hashes::Blake3Hash;
6use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
7use crate::pos::{PosProof, PosSeed};
8use crate::pot::{PotOutput, SlotNumber};
9use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
10use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
11use crate::shard::{NumShards, RealShardKind, ShardIndex, ShardKind};
12use ab_blake3::single_block_keyed_hash;
13use ab_io_type::trivial_type::TrivialType;
14use ab_merkle_tree::balanced::BalancedMerkleTree;
15use blake3::{Hash, OUT_LEN};
16use core::simd::Simd;
17use core::{fmt, mem};
18use derive_more::{
19    Add, AddAssign, AsMut, AsRef, Deref, DerefMut, Display, From, Into, Sub, SubAssign,
20};
21#[cfg(feature = "scale-codec")]
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23#[cfg(feature = "serde")]
24use serde::{Deserialize, Serialize};
25#[cfg(feature = "serde")]
26use serde::{Deserializer, Serializer};
27#[cfg(feature = "serde")]
28use serde_big_array::BigArray;
29
30/// Solution distance
31#[derive(
32    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
33)]
34#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
35#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
36#[repr(C)]
37pub struct SolutionDistance(u64);
38
39impl SolutionDistance {
40    /// Maximum value
41    pub const MAX: Self = Self(u64::MAX / 2);
42
43    // TODO: Remove once `From` is stable
44    /// Create a new instance
45    #[inline(always)]
46    pub const fn from_u64(n: u64) -> Self {
47        Self(n)
48    }
49
50    /// Calculate solution distance for given parameters.
51    ///
52    /// Typically used as a primitive to check whether solution distance is within solution range
53    /// (see [`Self::is_within()`]).
54    pub fn calculate(
55        global_challenge: &Blake3Hash,
56        chunk: &[u8; 32],
57        sector_slot_challenge: &SectorSlotChallenge,
58    ) -> Self {
59        // TODO: Is keyed hash really needed here?
60        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
61            .expect("Less than a single block worth of bytes; qed");
62        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
63            audit_chunk[0],
64            audit_chunk[1],
65            audit_chunk[2],
66            audit_chunk[3],
67            audit_chunk[4],
68            audit_chunk[5],
69            audit_chunk[6],
70            audit_chunk[7],
71        ]);
72        let global_challenge_as_solution_range: SolutionRange =
73            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
74
75        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
76    }
77
78    /// Check if solution distance is within the provided solution range
79    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
80        self.0 <= u64::from(solution_range) / 2
81    }
82}
83
84/// Solution range
85#[derive(
86    Debug,
87    Display,
88    Default,
89    Copy,
90    Clone,
91    Ord,
92    PartialOrd,
93    Eq,
94    PartialEq,
95    Hash,
96    Add,
97    AddAssign,
98    Sub,
99    SubAssign,
100    TrivialType,
101)]
102#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
103#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
104#[repr(C)]
105pub struct SolutionRange(u64);
106
107impl const From<u64> for SolutionRange {
108    #[inline(always)]
109    fn from(value: u64) -> Self {
110        Self(value)
111    }
112}
113
114impl const From<SolutionRange> for u64 {
115    #[inline(always)]
116    fn from(value: SolutionRange) -> Self {
117        value.0
118    }
119}
120
121impl SolutionRange {
122    /// Size in bytes
123    pub const SIZE: usize = size_of::<u64>();
124    /// Minimum value
125    pub const MIN: Self = Self(u64::MIN);
126    /// Maximum value
127    pub const MAX: Self = Self(u64::MAX);
128
129    /// Create a new instance from bytes
130    #[inline(always)]
131    pub fn to_bytes(self) -> [u8; 8] {
132        self.0.to_le_bytes()
133    }
134
135    /// Create a new instance from bytes
136    #[inline(always)]
137    pub fn from_bytes(bytes: [u8; 8]) -> Self {
138        Self(u64::from_le_bytes(bytes))
139    }
140
141    /// Computes the following:
142    /// ```text
143    /// MAX * slot_probability / chunks * s_buckets / pieces
144    /// ```
145    #[inline]
146    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
147        let solution_range = u64::MAX
148            // Account for slot probability
149            / slot_probability.1 * slot_probability.0
150            // Now take the probability of hitting occupied s-bucket in a piece into account
151            / Record::NUM_CHUNKS as u64
152            * Record::NUM_S_BUCKETS as u64;
153
154        // Take the number of pieces into account
155        Self(solution_range / pieces)
156    }
157
158    /// Computes the following:
159    /// ```text
160    /// MAX * slot_probability / chunks * s_buckets / solution_range
161    /// ```
162    #[inline]
163    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
164        let pieces = u64::MAX
165            // Account for slot probability
166            / slot_probability.1 * slot_probability.0
167            // Now take the probability of hitting occupied s-bucket in sector into account
168            / Record::NUM_CHUNKS as u64
169            * Record::NUM_S_BUCKETS as u64;
170
171        // Take solution range into account
172        pieces / self.0
173    }
174
175    /// Expands the global solution range to a solution range that corresponds to a leaf shard.
176    ///
177    /// Global solution range is updated based on the beacon chain information, while a farmer also
178    /// creates intermediate shard and leaf shard solutions with a wider solution range.
179    #[inline]
180    pub const fn to_leaf_shard(self, num_shards: NumShards) -> Self {
181        Self(
182            self.0
183                .saturating_mul(u64::from(num_shards.leaf_shards().get())),
184        )
185    }
186
187    /// Expands the global solution range to a solution range that corresponds to an intermediate
188    /// shard
189    #[inline]
190    pub const fn to_intermediate_shard(self, num_shards: NumShards) -> Self {
191        Self(
192            self.0
193                .saturating_mul(u64::from(num_shards.intermediate_shards().get())),
194        )
195    }
196
197    /// Bidirectional distance between two solution ranges
198    #[inline]
199    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
200        let a = self.0;
201        let b = other.0;
202        let diff = a.wrapping_sub(b);
203        let diff2 = b.wrapping_sub(a);
204        // Find smaller diff between 2 directions
205        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
206    }
207
208    /// Derives next solution range
209    #[inline]
210    pub fn derive_next(
211        self,
212        slots_in_last_interval: SlotNumber,
213        slot_probability: (u64, u64),
214        retarget_interval: BlockNumber,
215    ) -> Self {
216        // The idea here is to keep block production at the same pace while space pledged on the
217        // network changes. For this, we adjust the previous solution range according to actual and
218        // expected number of blocks per retarget interval.
219        //
220        // Below is code analogous to the following, but without using floats:
221        // ```rust
222        // let actual_slots_per_block = slots_in_last_interval as f64 / retarget_interval as f64;
223        // let expected_slots_per_block =
224        //     slot_probability.1 as f64 / slot_probability.0 as f64;
225        // let adjustment_factor =
226        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
227        //
228        // next_solution_range =
229        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
230        // ```
231        let current_solution_range = self.0;
232        let next_solution_range = u64::try_from(
233            u128::from(current_solution_range)
234                .saturating_mul(u128::from(slots_in_last_interval))
235                .saturating_mul(u128::from(slot_probability.0))
236                / u128::from(u64::from(retarget_interval))
237                / u128::from(slot_probability.1),
238        );
239
240        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
241            current_solution_range / 4,
242            current_solution_range.saturating_mul(4),
243        ))
244    }
245}
246
247// Quick test to ensure the functions above are the inverse of each other
248const {
249    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
250    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
251    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
252}
253
254/// Proof for chunk contained within a record.
255#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
256#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
257#[repr(C)]
258pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
259
260impl fmt::Debug for ChunkProof {
261    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
262        write!(f, "[")?;
263        for hash in self.0 {
264            for byte in hash {
265                write!(f, "{byte:02x}")?;
266            }
267            write!(f, ", ")?;
268        }
269        write!(f, "]")?;
270        Ok(())
271    }
272}
273
274#[cfg(feature = "serde")]
275#[derive(Serialize, Deserialize)]
276#[serde(transparent)]
277struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
278
279#[cfg(feature = "serde")]
280#[derive(Serialize, Deserialize)]
281#[serde(transparent)]
282struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
283
284#[cfg(feature = "serde")]
285#[derive(Serialize, Deserialize)]
286#[serde(transparent)]
287struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
288
289#[cfg(feature = "serde")]
290impl Serialize for ChunkProof {
291    #[inline]
292    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
293    where
294        S: Serializer,
295    {
296        if serializer.is_human_readable() {
297            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
298            // same memory layout
299            ChunkProofHex(unsafe {
300                mem::transmute::<
301                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
302                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
303                >(self.0)
304            })
305            .serialize(serializer)
306        } else {
307            ChunkProofBinary(self.0).serialize(serializer)
308        }
309    }
310}
311
312#[cfg(feature = "serde")]
313impl<'de> Deserialize<'de> for ChunkProof {
314    #[inline]
315    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
316    where
317        D: Deserializer<'de>,
318    {
319        Ok(Self(if deserializer.is_human_readable() {
320            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
321            // same memory layout
322            unsafe {
323                mem::transmute::<
324                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
325                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
326                >(ChunkProofHex::deserialize(deserializer)?.0)
327            }
328        } else {
329            ChunkProofBinary::deserialize(deserializer)?.0
330        }))
331    }
332}
333
334impl Default for ChunkProof {
335    #[inline]
336    fn default() -> Self {
337        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
338    }
339}
340
341impl AsRef<[u8]> for ChunkProof {
342    #[inline]
343    fn as_ref(&self) -> &[u8] {
344        self.0.as_flattened()
345    }
346}
347
348impl AsMut<[u8]> for ChunkProof {
349    #[inline]
350    fn as_mut(&mut self) -> &mut [u8] {
351        self.0.as_flattened_mut()
352    }
353}
354
355impl ChunkProof {
356    /// Size of chunk proof in bytes.
357    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
358    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
359}
360
361/// Solution verification errors
362#[derive(Debug, Eq, PartialEq, thiserror::Error)]
363pub enum SolutionVerifyError {
364    /// Invalid piece offset
365    #[error("Piece verification failed")]
366    InvalidPieceOffset {
367        /// Index of the piece that failed verification
368        piece_offset: u16,
369        /// How many pieces one sector is supposed to contain (max)
370        max_pieces_in_sector: u16,
371    },
372    /// History size is in the future
373    #[error("History size {solution} is in the future, current is {current}")]
374    FutureHistorySize {
375        /// Current history size
376        current: HistorySize,
377        /// History size solution was created for
378        solution: HistorySize,
379    },
380    /// Sector expired
381    #[error("Sector expired")]
382    SectorExpired {
383        /// Expiration history size
384        expiration_history_size: HistorySize,
385        /// Current history size
386        current_history_size: HistorySize,
387    },
388    /// Piece verification failed
389    #[error("Piece verification failed")]
390    InvalidPiece,
391    /// Solution is outside the solution range
392    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
393    OutsideSolutionRange {
394        /// Solution range
395        solution_range: SolutionRange,
396        /// Solution distance
397        solution_distance: SolutionDistance,
398    },
399    /// Invalid proof of space
400    #[error("Invalid proof of space")]
401    InvalidProofOfSpace,
402    /// Invalid shard commitment
403    #[error("Invalid shard commitment")]
404    InvalidShardCommitment,
405    /// Invalid input shard
406    #[error("Invalid input shard {shard_index} ({shard_kind:?})")]
407    InvalidInputShard {
408        /// Input shard index
409        shard_index: ShardIndex,
410        /// Input shard kind
411        shard_kind: Option<ShardKind>,
412    },
413    /// Invalid solution shard
414    #[error(
415        "Invalid solution shard {solution_shard_index} (parent {solution_parent_shard_index:?}), \
416        expected shard {expected_shard_index} ({expected_shard_kind:?})"
417    )]
418    InvalidSolutionShard {
419        /// Solution shard index
420        solution_shard_index: ShardIndex,
421        /// Solution shard index
422        solution_parent_shard_index: Option<ShardIndex>,
423        /// Expected shard index
424        expected_shard_index: ShardIndex,
425        /// Expected shard kind
426        expected_shard_kind: RealShardKind,
427    },
428    /// Invalid chunk proof
429    #[error("Invalid chunk proof")]
430    InvalidChunkProof,
431    /// Invalid history size
432    #[error("Invalid history size")]
433    InvalidHistorySize,
434}
435
436/// Parameters for checking piece validity
437#[derive(Debug, Clone)]
438#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
439pub struct SolutionVerifyPieceCheckParams {
440    /// How many pieces one sector is supposed to contain (max)
441    pub max_pieces_in_sector: u16,
442    /// Segment root of the segment to which piece belongs
443    pub segment_root: SegmentRoot,
444    /// Number of latest archived segments that are considered "recent history"
445    pub recent_segments: HistorySize,
446    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
447    pub recent_history_fraction: (HistorySize, HistorySize),
448    /// Minimum lifetime of a plotted sector, measured in archived segments
449    pub min_sector_lifetime: HistorySize,
450    /// Current size of the history
451    pub current_history_size: HistorySize,
452    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
453    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
454}
455
456/// Parameters for solution verification
457#[derive(Debug, Clone)]
458#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
459pub struct SolutionVerifyParams {
460    /// Shard for which the solution is built
461    pub shard_index: ShardIndex,
462    /// Proof of time for which solution is built
463    pub proof_of_time: PotOutput,
464    /// Solution range
465    pub solution_range: SolutionRange,
466    /// Shard membership entropy
467    pub shard_membership_entropy: ShardMembershipEntropy,
468    /// The number of shards in the network
469    pub num_shards: NumShards,
470    /// Parameters for checking piece validity.
471    ///
472    /// If `None`, piece validity check will be skipped.
473    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
474}
475
476/// Proof-of-time verifier to be used in [`Solution::verify()`]
477pub trait SolutionPotVerifier {
478    /// Check whether proof created earlier is valid
479    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
480}
481
482/// Entropy used for shard membership assignment
483#[derive(
484    Default,
485    Copy,
486    Clone,
487    Eq,
488    PartialEq,
489    Ord,
490    PartialOrd,
491    Hash,
492    From,
493    Into,
494    AsRef,
495    AsMut,
496    Deref,
497    DerefMut,
498    TrivialType,
499)]
500#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
501#[repr(C)]
502pub struct ShardMembershipEntropy([u8; ShardMembershipEntropy::SIZE]);
503
504impl fmt::Display for ShardMembershipEntropy {
505    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
506        for byte in self.0 {
507            write!(f, "{byte:02x}")?;
508        }
509        Ok(())
510    }
511}
512
513#[cfg(feature = "serde")]
514#[derive(Serialize, Deserialize)]
515#[serde(transparent)]
516struct ShardMembershipEntropyBinary([u8; ShardMembershipEntropy::SIZE]);
517
518#[cfg(feature = "serde")]
519#[derive(Serialize, Deserialize)]
520#[serde(transparent)]
521struct ShardMembershipEntropyHex(#[serde(with = "hex")] [u8; ShardMembershipEntropy::SIZE]);
522
523#[cfg(feature = "serde")]
524impl Serialize for ShardMembershipEntropy {
525    #[inline]
526    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
527    where
528        S: Serializer,
529    {
530        if serializer.is_human_readable() {
531            ShardMembershipEntropyHex(self.0).serialize(serializer)
532        } else {
533            ShardMembershipEntropyBinary(self.0).serialize(serializer)
534        }
535    }
536}
537
538#[cfg(feature = "serde")]
539impl<'de> Deserialize<'de> for ShardMembershipEntropy {
540    #[inline]
541    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
542    where
543        D: Deserializer<'de>,
544    {
545        Ok(Self(if deserializer.is_human_readable() {
546            ShardMembershipEntropyHex::deserialize(deserializer)?.0
547        } else {
548            ShardMembershipEntropyBinary::deserialize(deserializer)?.0
549        }))
550    }
551}
552
553impl fmt::Debug for ShardMembershipEntropy {
554    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
555        for byte in self.0 {
556            write!(f, "{byte:02x}")?;
557        }
558        Ok(())
559    }
560}
561
562impl AsRef<[u8]> for ShardMembershipEntropy {
563    #[inline(always)]
564    fn as_ref(&self) -> &[u8] {
565        &self.0
566    }
567}
568
569impl AsMut<[u8]> for ShardMembershipEntropy {
570    #[inline(always)]
571    fn as_mut(&mut self) -> &mut [u8] {
572        &mut self.0
573    }
574}
575
576impl ShardMembershipEntropy {
577    /// Size in bytes
578    pub const SIZE: usize = PotOutput::SIZE;
579
580    /// Create a new instance
581    #[inline(always)]
582    pub const fn new(bytes: [u8; Self::SIZE]) -> Self {
583        Self(bytes)
584    }
585
586    /// Get internal representation
587    #[inline(always)]
588    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
589        &self.0
590    }
591
592    /// Convenient conversion from slice of underlying representation for efficiency purposes
593    #[inline(always)]
594    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
595        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
596        // layout
597        unsafe { mem::transmute(value) }
598    }
599
600    /// Convenient conversion to slice of underlying representation for efficiency purposes
601    #[inline(always)]
602    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
603        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
604        // layout
605        unsafe { mem::transmute(value) }
606    }
607}
608
609/// Reduced hash used for shard assignment
610#[derive(
611    Default,
612    Copy,
613    Clone,
614    Eq,
615    PartialEq,
616    Ord,
617    PartialOrd,
618    Hash,
619    From,
620    Into,
621    AsRef,
622    AsMut,
623    Deref,
624    DerefMut,
625    TrivialType,
626)]
627#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
628#[repr(C)]
629pub struct ShardCommitmentHash([u8; ShardCommitmentHash::SIZE]);
630
631impl fmt::Display for ShardCommitmentHash {
632    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
633        for byte in self.0 {
634            write!(f, "{byte:02x}")?;
635        }
636        Ok(())
637    }
638}
639
640#[cfg(feature = "serde")]
641#[derive(Serialize, Deserialize)]
642#[serde(transparent)]
643struct ShardCommitmentHashBinary([u8; ShardCommitmentHash::SIZE]);
644
645#[cfg(feature = "serde")]
646#[derive(Serialize, Deserialize)]
647#[serde(transparent)]
648struct ShardCommitmentHashHex(#[serde(with = "hex")] [u8; ShardCommitmentHash::SIZE]);
649
650#[cfg(feature = "serde")]
651impl Serialize for ShardCommitmentHash {
652    #[inline]
653    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
654    where
655        S: Serializer,
656    {
657        if serializer.is_human_readable() {
658            ShardCommitmentHashHex(self.0).serialize(serializer)
659        } else {
660            ShardCommitmentHashBinary(self.0).serialize(serializer)
661        }
662    }
663}
664
665#[cfg(feature = "serde")]
666impl<'de> Deserialize<'de> for ShardCommitmentHash {
667    #[inline]
668    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
669    where
670        D: Deserializer<'de>,
671    {
672        Ok(Self(if deserializer.is_human_readable() {
673            ShardCommitmentHashHex::deserialize(deserializer)?.0
674        } else {
675            ShardCommitmentHashBinary::deserialize(deserializer)?.0
676        }))
677    }
678}
679
680impl fmt::Debug for ShardCommitmentHash {
681    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
682        for byte in self.0 {
683            write!(f, "{byte:02x}")?;
684        }
685        Ok(())
686    }
687}
688
689impl AsRef<[u8]> for ShardCommitmentHash {
690    #[inline(always)]
691    fn as_ref(&self) -> &[u8] {
692        &self.0
693    }
694}
695
696impl AsMut<[u8]> for ShardCommitmentHash {
697    #[inline(always)]
698    fn as_mut(&mut self) -> &mut [u8] {
699        &mut self.0
700    }
701}
702
703impl From<Hash> for ShardCommitmentHash {
704    #[inline(always)]
705    fn from(value: Hash) -> Self {
706        let bytes = value.as_bytes();
707        Self(*bytes)
708        // Self([
709        //     bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
710        // ])
711    }
712}
713
714impl ShardCommitmentHash {
715    // TODO: Reduce to 8 bytes once Merkle Tree implementation exists that produces such hashes
716    /// Size in bytes
717    pub const SIZE: usize = 32;
718
719    /// Create a new instance
720    #[inline(always)]
721    pub const fn new(hash: [u8; Self::SIZE]) -> Self {
722        Self(hash)
723    }
724
725    /// Get internal representation
726    #[inline(always)]
727    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
728        &self.0
729    }
730
731    /// Convenient conversion from slice of underlying representation for efficiency purposes
732    #[inline(always)]
733    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
734        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
735        // layout
736        unsafe { mem::transmute(value) }
737    }
738
739    /// Convenient conversion from array of underlying representation for efficiency purposes
740    #[inline(always)]
741    pub const fn array_from_repr<const N: usize>(value: [[u8; Self::SIZE]; N]) -> [Self; N] {
742        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
743        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
744        // layout
745        unsafe { mem::transmute_copy(&value) }
746    }
747
748    /// Convenient conversion to a slice of underlying representation for efficiency purposes
749    #[inline(always)]
750    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
751        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
752        // layout
753        unsafe { mem::transmute(value) }
754    }
755
756    /// Convenient conversion to an array of underlying representation for efficiency purposes
757    #[inline(always)]
758    pub const fn repr_from_array<const N: usize>(value: [Self; N]) -> [[u8; Self::SIZE]; N] {
759        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/152507
760        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
761        // layout
762        unsafe { mem::transmute_copy(&value) }
763    }
764}
765
766/// Information about shard commitments in the solution
767#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
768#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
769#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
770#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
771#[repr(C)]
772pub struct SolutionShardCommitment {
773    /// Root of the Merkle Tree of shard commitments
774    pub root: ShardCommitmentHash,
775    /// Proof for the shard commitment used the solution
776    pub proof: [ShardCommitmentHash; SolutionShardCommitment::NUM_LEAVES.ilog2() as usize],
777    /// Shard commitment leaf used for the solution
778    pub leaf: ShardCommitmentHash,
779}
780
781impl SolutionShardCommitment {
782    /// Number of leaves in a Merkle Tree of shard commitments
783    pub const NUM_LEAVES: usize = 2_u32.pow(20) as usize;
784}
785
786/// Farmer solution for slot challenge.
787#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
788#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
789#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
790#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
791#[repr(C)]
792pub struct Solution {
793    /// Public key of the farmer that created the solution
794    pub public_key_hash: Blake3Hash,
795    /// Farmer's shard commitment
796    pub shard_commitment: SolutionShardCommitment,
797    /// Record root that can use used to verify that piece was included in blockchain history
798    pub record_root: RecordRoot,
799    /// Proof for above record root
800    pub record_proof: RecordProof,
801    /// Chunk at the above offset
802    pub chunk: RecordChunk,
803    /// Proof for the above chunk
804    pub chunk_proof: ChunkProof,
805    /// Proof of space for piece offset
806    pub proof_of_space: PosProof,
807    /// Size of the blockchain history at the time of sector creation
808    pub history_size: HistorySize,
809    /// Index of the sector where the solution was found
810    pub sector_index: SectorIndex,
811    /// Pieces offset within sector
812    pub piece_offset: PieceOffset,
813    /// Padding for data structure alignment
814    pub padding: [u8; 4],
815}
816
817impl Solution {
818    /// Fake solution for the genesis block
819    pub fn genesis_solution() -> Self {
820        Self {
821            public_key_hash: Ed25519PublicKey::default().hash(),
822            shard_commitment: SolutionShardCommitment {
823                root: Default::default(),
824                proof: [Default::default(); _],
825                leaf: Default::default(),
826            },
827            record_root: RecordRoot::default(),
828            record_proof: RecordProof::default(),
829            chunk: RecordChunk::default(),
830            chunk_proof: ChunkProof::default(),
831            proof_of_space: PosProof::default(),
832            history_size: HistorySize::from(SegmentIndex::ZERO),
833            sector_index: SectorIndex::ZERO,
834            piece_offset: PieceOffset::default(),
835            padding: [0; _],
836        }
837    }
838
839    /// Check solution validity
840    pub fn verify<PotVerifier>(
841        &self,
842        slot: SlotNumber,
843        params: &SolutionVerifyParams,
844    ) -> Result<(), SolutionVerifyError>
845    where
846        PotVerifier: SolutionPotVerifier,
847    {
848        let SolutionVerifyParams {
849            shard_index,
850            proof_of_time,
851            solution_range,
852            shard_membership_entropy,
853            num_shards,
854            piece_check_params,
855        } = params;
856
857        let shard_kind = shard_index
858            .shard_kind()
859            .and_then(|shard_kind| shard_kind.to_real())
860            .ok_or(SolutionVerifyError::InvalidInputShard {
861                shard_index: *shard_index,
862                shard_kind: shard_index.shard_kind(),
863            })?;
864
865        let (solution_shard_index, shard_commitment_index) = num_shards
866            .derive_shard_index_and_shard_commitment_index(
867                &self.public_key_hash,
868                &self.shard_commitment.root,
869                shard_membership_entropy,
870                self.history_size,
871            );
872
873        // Adjust solution range according to shard kind
874        let solution_range = match shard_kind {
875            RealShardKind::BeaconChain => *solution_range,
876            RealShardKind::IntermediateShard => {
877                if solution_shard_index.parent_shard() != Some(*shard_index) {
878                    return Err(SolutionVerifyError::InvalidSolutionShard {
879                        solution_shard_index,
880                        solution_parent_shard_index: solution_shard_index.parent_shard(),
881                        expected_shard_index: *shard_index,
882                        expected_shard_kind: RealShardKind::IntermediateShard,
883                    });
884                }
885
886                solution_range.to_intermediate_shard(*num_shards)
887            }
888            RealShardKind::LeafShard => {
889                if solution_shard_index != *shard_index {
890                    return Err(SolutionVerifyError::InvalidSolutionShard {
891                        solution_shard_index,
892                        solution_parent_shard_index: solution_shard_index.parent_shard(),
893                        expected_shard_index: *shard_index,
894                        expected_shard_kind: RealShardKind::LeafShard,
895                    });
896                }
897
898                solution_range.to_leaf_shard(*num_shards)
899            }
900        };
901
902        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
903        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
904        //  to succeed.
905        const {
906            assert!(SolutionShardCommitment::NUM_LEAVES == 1048576);
907        }
908        if !BalancedMerkleTree::<1048576>::verify(
909            &self.shard_commitment.root,
910            &ShardCommitmentHash::repr_from_array(self.shard_commitment.proof),
911            shard_commitment_index as usize,
912            *self.shard_commitment.leaf,
913        ) {
914            return Err(SolutionVerifyError::InvalidShardCommitment);
915        }
916
917        let sector_id = SectorId::new(
918            &self.public_key_hash,
919            &self.shard_commitment.root,
920            self.sector_index,
921            self.history_size,
922        );
923
924        let global_challenge = proof_of_time.derive_global_challenge(slot);
925        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
926        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
927
928        // Check that proof of space is valid
929        if !PotVerifier::is_proof_valid(
930            &sector_id.derive_evaluation_seed(self.piece_offset),
931            s_bucket_audit_index,
932            &self.proof_of_space,
933        ) {
934            return Err(SolutionVerifyError::InvalidProofOfSpace);
935        };
936
937        let masked_chunk =
938            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
939
940        let solution_distance =
941            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
942
943        if !solution_distance.is_within(solution_range) {
944            return Err(SolutionVerifyError::OutsideSolutionRange {
945                solution_range,
946                solution_distance,
947            });
948        }
949
950        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
951        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
952        //  to succeed.
953        const {
954            assert!(Record::NUM_S_BUCKETS == 65536);
955        }
956        // Check that chunk belongs to the record
957        if !BalancedMerkleTree::<65536>::verify(
958            &self.record_root,
959            &self.chunk_proof,
960            usize::from(s_bucket_audit_index),
961            *self.chunk,
962        ) {
963            return Err(SolutionVerifyError::InvalidChunkProof);
964        }
965
966        if let Some(SolutionVerifyPieceCheckParams {
967            max_pieces_in_sector,
968            segment_root,
969            recent_segments,
970            recent_history_fraction,
971            min_sector_lifetime,
972            current_history_size,
973            sector_expiration_check_segment_root,
974        }) = piece_check_params
975        {
976            if &self.history_size > current_history_size {
977                return Err(SolutionVerifyError::FutureHistorySize {
978                    current: *current_history_size,
979                    solution: self.history_size,
980                });
981            }
982
983            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
984                return Err(SolutionVerifyError::InvalidPieceOffset {
985                    piece_offset: u16::from(self.piece_offset),
986                    max_pieces_in_sector: *max_pieces_in_sector,
987                });
988            }
989
990            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
991            {
992                let expiration_history_size = match sector_id.derive_expiration_history_size(
993                    self.history_size,
994                    sector_expiration_check_segment_root,
995                    *min_sector_lifetime,
996                ) {
997                    Some(expiration_history_size) => expiration_history_size,
998                    None => {
999                        return Err(SolutionVerifyError::InvalidHistorySize);
1000                    }
1001                };
1002
1003                if expiration_history_size <= *current_history_size {
1004                    return Err(SolutionVerifyError::SectorExpired {
1005                        expiration_history_size,
1006                        current_history_size: *current_history_size,
1007                    });
1008                }
1009            }
1010
1011            let position = sector_id
1012                .derive_piece_index(
1013                    self.piece_offset,
1014                    self.history_size,
1015                    *max_pieces_in_sector,
1016                    *recent_segments,
1017                    *recent_history_fraction,
1018                )
1019                .position();
1020
1021            // Check that piece is part of the blockchain history
1022            if !self
1023                .record_root
1024                .is_valid(segment_root, &self.record_proof, position)
1025            {
1026                return Err(SolutionVerifyError::InvalidPiece);
1027            }
1028        }
1029
1030        Ok(())
1031    }
1032}