ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::ed25519::Ed25519PublicKey;
5use crate::hashes::Blake3Hash;
6use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
7use crate::pos::{PosProof, PosSeed};
8use crate::pot::{PotOutput, SlotNumber};
9use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
10use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
11use crate::shard::{NumShards, RealShardKind, ShardIndex, ShardKind};
12use ab_blake3::single_block_keyed_hash;
13use ab_io_type::trivial_type::TrivialType;
14use ab_merkle_tree::balanced::BalancedMerkleTree;
15use blake3::{Hash, OUT_LEN};
16use core::simd::Simd;
17use core::{fmt, mem};
18use derive_more::{
19    Add, AddAssign, AsMut, AsRef, Deref, DerefMut, Display, From, Into, Sub, SubAssign,
20};
21#[cfg(feature = "scale-codec")]
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23#[cfg(feature = "serde")]
24use serde::{Deserialize, Serialize};
25#[cfg(feature = "serde")]
26use serde::{Deserializer, Serializer};
27#[cfg(feature = "serde")]
28use serde_big_array::BigArray;
29
30/// Solution distance
31#[derive(
32    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
33)]
34#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
35#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
36#[repr(C)]
37pub struct SolutionDistance(u64);
38
39impl SolutionDistance {
40    /// Maximum value
41    pub const MAX: Self = Self(u64::MAX / 2);
42
43    // TODO: Remove once `From` is stable
44    /// Create a new instance
45    #[inline(always)]
46    pub const fn from_u64(n: u64) -> Self {
47        Self(n)
48    }
49
50    /// Calculate solution distance for given parameters.
51    ///
52    /// Typically used as a primitive to check whether solution distance is within solution range
53    /// (see [`Self::is_within()`]).
54    pub fn calculate(
55        global_challenge: &Blake3Hash,
56        chunk: &[u8; 32],
57        sector_slot_challenge: &SectorSlotChallenge,
58    ) -> Self {
59        // TODO: Is keyed hash really needed here?
60        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
61            .expect("Less than a single block worth of bytes; qed");
62        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
63            audit_chunk[0],
64            audit_chunk[1],
65            audit_chunk[2],
66            audit_chunk[3],
67            audit_chunk[4],
68            audit_chunk[5],
69            audit_chunk[6],
70            audit_chunk[7],
71        ]);
72        let global_challenge_as_solution_range: SolutionRange =
73            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
74
75        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
76    }
77
78    /// Check if solution distance is within the provided solution range
79    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
80        self.0 <= solution_range.as_u64() / 2
81    }
82}
83
84/// Solution range
85#[derive(
86    Debug,
87    Display,
88    Default,
89    Copy,
90    Clone,
91    Ord,
92    PartialOrd,
93    Eq,
94    PartialEq,
95    Hash,
96    From,
97    Into,
98    Add,
99    AddAssign,
100    Sub,
101    SubAssign,
102    TrivialType,
103)]
104#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
105#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
106#[repr(C)]
107pub struct SolutionRange(u64);
108
109impl SolutionRange {
110    /// Size in bytes
111    pub const SIZE: usize = size_of::<u64>();
112    /// Minimum value
113    pub const MIN: Self = Self(u64::MIN);
114    /// Maximum value
115    pub const MAX: Self = Self(u64::MAX);
116
117    // TODO: Remove once `From` is stable
118    /// Create a new instance
119    #[inline(always)]
120    pub const fn new(n: u64) -> Self {
121        Self(n)
122    }
123
124    // TODO: Remove once `From` is stable
125    /// Get internal representation
126    #[inline(always)]
127    pub const fn as_u64(self) -> u64 {
128        self.0
129    }
130
131    /// Create a new instance from bytes
132    #[inline(always)]
133    pub fn to_bytes(self) -> [u8; 8] {
134        self.0.to_le_bytes()
135    }
136
137    /// Create a new instance from bytes
138    #[inline(always)]
139    pub fn from_bytes(bytes: [u8; 8]) -> Self {
140        Self(u64::from_le_bytes(bytes))
141    }
142
143    /// Computes the following:
144    /// ```text
145    /// MAX * slot_probability / chunks * s_buckets / pieces
146    /// ```
147    #[inline]
148    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
149        let solution_range = u64::MAX
150            // Account for slot probability
151            / slot_probability.1 * slot_probability.0
152            // Now take the probability of hitting occupied s-bucket in a piece into account
153            / Record::NUM_CHUNKS as u64
154            * Record::NUM_S_BUCKETS as u64;
155
156        // Take the number of pieces into account
157        Self(solution_range / pieces)
158    }
159
160    /// Computes the following:
161    /// ```text
162    /// MAX * slot_probability / chunks * s_buckets / solution_range
163    /// ```
164    #[inline]
165    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
166        let pieces = u64::MAX
167            // Account for slot probability
168            / slot_probability.1 * slot_probability.0
169            // Now take the probability of hitting occupied s-bucket in sector into account
170            / Record::NUM_CHUNKS as u64
171            * Record::NUM_S_BUCKETS as u64;
172
173        // Take solution range into account
174        pieces / self.0
175    }
176
177    /// Expands the global solution range to a solution range that corresponds to a leaf shard.
178    ///
179    /// Global solution range is updated based on the beacon chain information, while a farmer also
180    /// creates intermediate shard and leaf shard solutions with a wider solution range.
181    #[inline]
182    pub const fn to_leaf_shard(self, num_shards: NumShards) -> Self {
183        Self(
184            self.0
185                .saturating_mul(u64::from(num_shards.leaf_shards().get())),
186        )
187    }
188
189    /// Expands the global solution range to a solution range that corresponds to an intermediate
190    /// shard
191    #[inline]
192    pub const fn to_intermediate_shard(self, num_shards: NumShards) -> Self {
193        Self(
194            self.0
195                .saturating_mul(u64::from(num_shards.intermediate_shards().get())),
196        )
197    }
198
199    /// Bidirectional distance between two solution ranges
200    #[inline]
201    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
202        let a = self.0;
203        let b = other.0;
204        let diff = a.wrapping_sub(b);
205        let diff2 = b.wrapping_sub(a);
206        // Find smaller diff between 2 directions
207        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
208    }
209
210    /// Derives next solution range
211    #[inline]
212    pub fn derive_next(
213        self,
214        slots_in_last_interval: SlotNumber,
215        slot_probability: (u64, u64),
216        retarget_interval: BlockNumber,
217    ) -> Self {
218        // The idea here is to keep block production at the same pace while space pledged on the
219        // network changes. For this, we adjust the previous solution range according to actual and
220        // expected number of blocks per retarget interval.
221        //
222        // Below is code analogous to the following, but without using floats:
223        // ```rust
224        // let actual_slots_per_block = slots_in_last_interval as f64 / retarget_interval as f64;
225        // let expected_slots_per_block =
226        //     slot_probability.1 as f64 / slot_probability.0 as f64;
227        // let adjustment_factor =
228        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
229        //
230        // next_solution_range =
231        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
232        // ```
233        let current_solution_range = self.0;
234        let next_solution_range = u64::try_from(
235            u128::from(current_solution_range)
236                .saturating_mul(u128::from(slots_in_last_interval))
237                .saturating_mul(u128::from(slot_probability.0))
238                / u128::from(u64::from(retarget_interval))
239                / u128::from(slot_probability.1),
240        );
241
242        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
243            current_solution_range / 4,
244            current_solution_range.saturating_mul(4),
245        ))
246    }
247}
248
249// Quick test to ensure the functions above are the inverse of each other
250const _: () = {
251    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
252    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
253    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
254};
255
256/// Proof for chunk contained within a record.
257#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
258#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
259#[repr(C)]
260pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
261
262impl fmt::Debug for ChunkProof {
263    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
264        write!(f, "[")?;
265        for hash in self.0 {
266            for byte in hash {
267                write!(f, "{byte:02x}")?;
268            }
269            write!(f, ", ")?;
270        }
271        write!(f, "]")?;
272        Ok(())
273    }
274}
275
276#[cfg(feature = "serde")]
277#[derive(Serialize, Deserialize)]
278#[serde(transparent)]
279struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
280
281#[cfg(feature = "serde")]
282#[derive(Serialize, Deserialize)]
283#[serde(transparent)]
284struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
285
286#[cfg(feature = "serde")]
287#[derive(Serialize, Deserialize)]
288#[serde(transparent)]
289struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
290
291#[cfg(feature = "serde")]
292impl Serialize for ChunkProof {
293    #[inline]
294    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
295    where
296        S: Serializer,
297    {
298        if serializer.is_human_readable() {
299            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
300            // same memory layout
301            ChunkProofHex(unsafe {
302                mem::transmute::<
303                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
304                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
305                >(self.0)
306            })
307            .serialize(serializer)
308        } else {
309            ChunkProofBinary(self.0).serialize(serializer)
310        }
311    }
312}
313
314#[cfg(feature = "serde")]
315impl<'de> Deserialize<'de> for ChunkProof {
316    #[inline]
317    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
318    where
319        D: Deserializer<'de>,
320    {
321        Ok(Self(if deserializer.is_human_readable() {
322            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
323            // same memory layout
324            unsafe {
325                mem::transmute::<
326                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
327                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
328                >(ChunkProofHex::deserialize(deserializer)?.0)
329            }
330        } else {
331            ChunkProofBinary::deserialize(deserializer)?.0
332        }))
333    }
334}
335
336impl Default for ChunkProof {
337    #[inline]
338    fn default() -> Self {
339        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
340    }
341}
342
343impl AsRef<[u8]> for ChunkProof {
344    #[inline]
345    fn as_ref(&self) -> &[u8] {
346        self.0.as_flattened()
347    }
348}
349
350impl AsMut<[u8]> for ChunkProof {
351    #[inline]
352    fn as_mut(&mut self) -> &mut [u8] {
353        self.0.as_flattened_mut()
354    }
355}
356
357impl ChunkProof {
358    /// Size of chunk proof in bytes.
359    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
360    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
361}
362
363/// Solution verification errors
364#[derive(Debug, Eq, PartialEq, thiserror::Error)]
365pub enum SolutionVerifyError {
366    /// Invalid piece offset
367    #[error("Piece verification failed")]
368    InvalidPieceOffset {
369        /// Index of the piece that failed verification
370        piece_offset: u16,
371        /// How many pieces one sector is supposed to contain (max)
372        max_pieces_in_sector: u16,
373    },
374    /// History size is in the future
375    #[error("History size {solution} is in the future, current is {current}")]
376    FutureHistorySize {
377        /// Current history size
378        current: HistorySize,
379        /// History size solution was created for
380        solution: HistorySize,
381    },
382    /// Sector expired
383    #[error("Sector expired")]
384    SectorExpired {
385        /// Expiration history size
386        expiration_history_size: HistorySize,
387        /// Current history size
388        current_history_size: HistorySize,
389    },
390    /// Piece verification failed
391    #[error("Piece verification failed")]
392    InvalidPiece,
393    /// Solution is outside the solution range
394    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
395    OutsideSolutionRange {
396        /// Solution range
397        solution_range: SolutionRange,
398        /// Solution distance
399        solution_distance: SolutionDistance,
400    },
401    /// Invalid proof of space
402    #[error("Invalid proof of space")]
403    InvalidProofOfSpace,
404    /// Invalid shard commitment
405    #[error("Invalid shard commitment")]
406    InvalidShardCommitment,
407    /// Invalid input shard
408    #[error("Invalid input shard {shard_index} ({shard_kind:?})")]
409    InvalidInputShard {
410        /// Input shard index
411        shard_index: ShardIndex,
412        /// Input shard kind
413        shard_kind: Option<ShardKind>,
414    },
415    /// Invalid solution shard
416    #[error(
417        "Invalid solution shard {solution_shard_index} (parent {solution_parent_shard_index:?}), \
418        expected shard {expected_shard_index} ({expected_shard_kind:?})"
419    )]
420    InvalidSolutionShard {
421        /// Solution shard index
422        solution_shard_index: ShardIndex,
423        /// Solution shard index
424        solution_parent_shard_index: Option<ShardIndex>,
425        /// Expected shard index
426        expected_shard_index: ShardIndex,
427        /// Expected shard kind
428        expected_shard_kind: RealShardKind,
429    },
430    /// Invalid chunk proof
431    #[error("Invalid chunk proof")]
432    InvalidChunkProof,
433    /// Invalid history size
434    #[error("Invalid history size")]
435    InvalidHistorySize,
436}
437
438/// Parameters for checking piece validity
439#[derive(Debug, Clone)]
440#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
441pub struct SolutionVerifyPieceCheckParams {
442    /// How many pieces one sector is supposed to contain (max)
443    pub max_pieces_in_sector: u16,
444    /// Segment root of the segment to which piece belongs
445    pub segment_root: SegmentRoot,
446    /// Number of latest archived segments that are considered "recent history"
447    pub recent_segments: HistorySize,
448    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
449    pub recent_history_fraction: (HistorySize, HistorySize),
450    /// Minimum lifetime of a plotted sector, measured in archived segments
451    pub min_sector_lifetime: HistorySize,
452    /// Current size of the history
453    pub current_history_size: HistorySize,
454    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
455    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
456}
457
458/// Parameters for solution verification
459#[derive(Debug, Clone)]
460#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
461pub struct SolutionVerifyParams {
462    /// Shard for which the solution is built
463    pub shard_index: ShardIndex,
464    /// Proof of time for which solution is built
465    pub proof_of_time: PotOutput,
466    /// Solution range
467    pub solution_range: SolutionRange,
468    /// Shard membership entropy
469    pub shard_membership_entropy: ShardMembershipEntropy,
470    /// The number of shards in the network
471    pub num_shards: NumShards,
472    /// Parameters for checking piece validity.
473    ///
474    /// If `None`, piece validity check will be skipped.
475    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
476}
477
478/// Proof-of-time verifier to be used in [`Solution::verify()`]
479pub trait SolutionPotVerifier {
480    /// Check whether proof created earlier is valid
481    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
482}
483
484/// Entropy used for shard membership assignment
485#[derive(
486    Default,
487    Copy,
488    Clone,
489    Eq,
490    PartialEq,
491    Ord,
492    PartialOrd,
493    Hash,
494    From,
495    Into,
496    AsRef,
497    AsMut,
498    Deref,
499    DerefMut,
500    TrivialType,
501)]
502#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
503#[repr(C)]
504pub struct ShardMembershipEntropy([u8; ShardMembershipEntropy::SIZE]);
505
506impl fmt::Display for ShardMembershipEntropy {
507    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
508        for byte in self.0 {
509            write!(f, "{byte:02x}")?;
510        }
511        Ok(())
512    }
513}
514
515#[cfg(feature = "serde")]
516#[derive(Serialize, Deserialize)]
517#[serde(transparent)]
518struct ShardMembershipEntropyBinary([u8; ShardMembershipEntropy::SIZE]);
519
520#[cfg(feature = "serde")]
521#[derive(Serialize, Deserialize)]
522#[serde(transparent)]
523struct ShardMembershipEntropyHex(#[serde(with = "hex")] [u8; ShardMembershipEntropy::SIZE]);
524
525#[cfg(feature = "serde")]
526impl Serialize for ShardMembershipEntropy {
527    #[inline]
528    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
529    where
530        S: Serializer,
531    {
532        if serializer.is_human_readable() {
533            ShardMembershipEntropyHex(self.0).serialize(serializer)
534        } else {
535            ShardMembershipEntropyBinary(self.0).serialize(serializer)
536        }
537    }
538}
539
540#[cfg(feature = "serde")]
541impl<'de> Deserialize<'de> for ShardMembershipEntropy {
542    #[inline]
543    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
544    where
545        D: Deserializer<'de>,
546    {
547        Ok(Self(if deserializer.is_human_readable() {
548            ShardMembershipEntropyHex::deserialize(deserializer)?.0
549        } else {
550            ShardMembershipEntropyBinary::deserialize(deserializer)?.0
551        }))
552    }
553}
554
555impl fmt::Debug for ShardMembershipEntropy {
556    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
557        for byte in self.0 {
558            write!(f, "{byte:02x}")?;
559        }
560        Ok(())
561    }
562}
563
564impl AsRef<[u8]> for ShardMembershipEntropy {
565    #[inline(always)]
566    fn as_ref(&self) -> &[u8] {
567        &self.0
568    }
569}
570
571impl AsMut<[u8]> for ShardMembershipEntropy {
572    #[inline(always)]
573    fn as_mut(&mut self) -> &mut [u8] {
574        &mut self.0
575    }
576}
577
578impl ShardMembershipEntropy {
579    /// Size in bytes
580    pub const SIZE: usize = PotOutput::SIZE;
581
582    /// Create a new instance
583    #[inline(always)]
584    pub const fn new(bytes: [u8; Self::SIZE]) -> Self {
585        Self(bytes)
586    }
587
588    /// Get internal representation
589    #[inline(always)]
590    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
591        &self.0
592    }
593
594    /// Convenient conversion from slice of underlying representation for efficiency purposes
595    #[inline(always)]
596    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
597        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
598        // layout
599        unsafe { mem::transmute(value) }
600    }
601
602    /// Convenient conversion to slice of underlying representation for efficiency purposes
603    #[inline(always)]
604    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
605        // SAFETY: `ShardMembershipEntropy` is `#[repr(C)]` and guaranteed to have the same memory
606        // layout
607        unsafe { mem::transmute(value) }
608    }
609}
610
611/// Reduced hash used for shard assignment
612#[derive(
613    Default,
614    Copy,
615    Clone,
616    Eq,
617    PartialEq,
618    Ord,
619    PartialOrd,
620    Hash,
621    From,
622    Into,
623    AsRef,
624    AsMut,
625    Deref,
626    DerefMut,
627    TrivialType,
628)]
629#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
630#[repr(C)]
631pub struct ShardCommitmentHash([u8; ShardCommitmentHash::SIZE]);
632
633impl fmt::Display for ShardCommitmentHash {
634    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
635        for byte in self.0 {
636            write!(f, "{byte:02x}")?;
637        }
638        Ok(())
639    }
640}
641
642#[cfg(feature = "serde")]
643#[derive(Serialize, Deserialize)]
644#[serde(transparent)]
645struct ShardCommitmentHashBinary([u8; ShardCommitmentHash::SIZE]);
646
647#[cfg(feature = "serde")]
648#[derive(Serialize, Deserialize)]
649#[serde(transparent)]
650struct ShardCommitmentHashHex(#[serde(with = "hex")] [u8; ShardCommitmentHash::SIZE]);
651
652#[cfg(feature = "serde")]
653impl Serialize for ShardCommitmentHash {
654    #[inline]
655    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
656    where
657        S: Serializer,
658    {
659        if serializer.is_human_readable() {
660            ShardCommitmentHashHex(self.0).serialize(serializer)
661        } else {
662            ShardCommitmentHashBinary(self.0).serialize(serializer)
663        }
664    }
665}
666
667#[cfg(feature = "serde")]
668impl<'de> Deserialize<'de> for ShardCommitmentHash {
669    #[inline]
670    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
671    where
672        D: Deserializer<'de>,
673    {
674        Ok(Self(if deserializer.is_human_readable() {
675            ShardCommitmentHashHex::deserialize(deserializer)?.0
676        } else {
677            ShardCommitmentHashBinary::deserialize(deserializer)?.0
678        }))
679    }
680}
681
682impl fmt::Debug for ShardCommitmentHash {
683    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
684        for byte in self.0 {
685            write!(f, "{byte:02x}")?;
686        }
687        Ok(())
688    }
689}
690
691impl AsRef<[u8]> for ShardCommitmentHash {
692    #[inline(always)]
693    fn as_ref(&self) -> &[u8] {
694        &self.0
695    }
696}
697
698impl AsMut<[u8]> for ShardCommitmentHash {
699    #[inline(always)]
700    fn as_mut(&mut self) -> &mut [u8] {
701        &mut self.0
702    }
703}
704
705impl From<Hash> for ShardCommitmentHash {
706    #[inline(always)]
707    fn from(value: Hash) -> Self {
708        let bytes = value.as_bytes();
709        Self(*bytes)
710        // Self([
711        //     bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
712        // ])
713    }
714}
715
716impl ShardCommitmentHash {
717    // TODO: Reduce to 8 bytes once Merkle Tree implementation exists that produces such hashes
718    /// Size in bytes
719    pub const SIZE: usize = 32;
720
721    /// Create a new instance
722    #[inline(always)]
723    pub const fn new(hash: [u8; Self::SIZE]) -> Self {
724        Self(hash)
725    }
726
727    /// Get internal representation
728    #[inline(always)]
729    pub const fn as_bytes(&self) -> &[u8; Self::SIZE] {
730        &self.0
731    }
732
733    /// Convenient conversion from slice of underlying representation for efficiency purposes
734    #[inline(always)]
735    pub const fn slice_from_repr(value: &[[u8; Self::SIZE]]) -> &[Self] {
736        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
737        // layout
738        unsafe { mem::transmute(value) }
739    }
740
741    /// Convenient conversion from array of underlying representation for efficiency purposes
742    #[inline(always)]
743    pub const fn array_from_repr<const N: usize>(value: [[u8; Self::SIZE]; N]) -> [Self; N] {
744        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/61956
745        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
746        // layout
747        unsafe { mem::transmute_copy(&value) }
748    }
749
750    /// Convenient conversion to a slice of underlying representation for efficiency purposes
751    #[inline(always)]
752    pub const fn repr_from_slice(value: &[Self]) -> &[[u8; Self::SIZE]] {
753        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
754        // layout
755        unsafe { mem::transmute(value) }
756    }
757
758    /// Convenient conversion to an array of underlying representation for efficiency purposes
759    #[inline(always)]
760    pub const fn repr_from_array<const N: usize>(value: [Self; N]) -> [[u8; Self::SIZE]; N] {
761        // TODO: Should have been transmute, but https://github.com/rust-lang/rust/issues/61956
762        // SAFETY: `ShardCommitmentHash` is `#[repr(C)]` and guaranteed to have the same memory
763        // layout
764        unsafe { mem::transmute_copy(&value) }
765    }
766}
767
768/// Information about shard commitments in the solution
769#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
770#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
771#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
772#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
773#[repr(C)]
774pub struct SolutionShardCommitment {
775    /// Root of the Merkle Tree of shard commitments
776    pub root: ShardCommitmentHash,
777    /// Proof for the shard commitment used the solution
778    pub proof: [ShardCommitmentHash; SolutionShardCommitment::NUM_LEAVES.ilog2() as usize],
779    /// Shard commitment leaf used for the solution
780    pub leaf: ShardCommitmentHash,
781}
782
783impl SolutionShardCommitment {
784    /// Number of leaves in a Merkle Tree of shard commitments
785    pub const NUM_LEAVES: usize = 2_u32.pow(20) as usize;
786}
787
788/// Farmer solution for slot challenge.
789#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
790#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
791#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
792#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
793#[repr(C)]
794pub struct Solution {
795    /// Public key of the farmer that created the solution
796    pub public_key_hash: Blake3Hash,
797    /// Farmer's shard commitment
798    pub shard_commitment: SolutionShardCommitment,
799    /// Record root that can use used to verify that piece was included in blockchain history
800    pub record_root: RecordRoot,
801    /// Proof for above record root
802    pub record_proof: RecordProof,
803    /// Chunk at the above offset
804    pub chunk: RecordChunk,
805    /// Proof for the above chunk
806    pub chunk_proof: ChunkProof,
807    /// Proof of space for piece offset
808    pub proof_of_space: PosProof,
809    /// Size of the blockchain history at the time of sector creation
810    pub history_size: HistorySize,
811    /// Index of the sector where the solution was found
812    pub sector_index: SectorIndex,
813    /// Pieces offset within sector
814    pub piece_offset: PieceOffset,
815    /// Padding for data structure alignment
816    pub padding: [u8; 4],
817}
818
819impl Solution {
820    /// Fake solution for the genesis block
821    pub fn genesis_solution() -> Self {
822        Self {
823            public_key_hash: Ed25519PublicKey::default().hash(),
824            shard_commitment: SolutionShardCommitment {
825                root: Default::default(),
826                proof: [Default::default(); _],
827                leaf: Default::default(),
828            },
829            record_root: RecordRoot::default(),
830            record_proof: RecordProof::default(),
831            chunk: RecordChunk::default(),
832            chunk_proof: ChunkProof::default(),
833            proof_of_space: PosProof::default(),
834            history_size: HistorySize::from(SegmentIndex::ZERO),
835            sector_index: SectorIndex::ZERO,
836            piece_offset: PieceOffset::default(),
837            padding: [0; _],
838        }
839    }
840
841    /// Check solution validity
842    pub fn verify<PotVerifier>(
843        &self,
844        slot: SlotNumber,
845        params: &SolutionVerifyParams,
846    ) -> Result<(), SolutionVerifyError>
847    where
848        PotVerifier: SolutionPotVerifier,
849    {
850        let SolutionVerifyParams {
851            shard_index,
852            proof_of_time,
853            solution_range,
854            shard_membership_entropy,
855            num_shards,
856            piece_check_params,
857        } = params;
858
859        let shard_kind = shard_index
860            .shard_kind()
861            .and_then(|shard_kind| shard_kind.to_real())
862            .ok_or(SolutionVerifyError::InvalidInputShard {
863                shard_index: *shard_index,
864                shard_kind: shard_index.shard_kind(),
865            })?;
866
867        let (solution_shard_index, shard_commitment_index) = num_shards
868            .derive_shard_index_and_shard_commitment_index(
869                &self.public_key_hash,
870                &self.shard_commitment.root,
871                shard_membership_entropy,
872                self.history_size,
873            );
874
875        // Adjust solution range according to shard kind
876        let solution_range = match shard_kind {
877            RealShardKind::BeaconChain => *solution_range,
878            RealShardKind::IntermediateShard => {
879                if solution_shard_index.parent_shard() != Some(*shard_index) {
880                    return Err(SolutionVerifyError::InvalidSolutionShard {
881                        solution_shard_index,
882                        solution_parent_shard_index: solution_shard_index.parent_shard(),
883                        expected_shard_index: *shard_index,
884                        expected_shard_kind: RealShardKind::IntermediateShard,
885                    });
886                }
887
888                solution_range.to_intermediate_shard(*num_shards)
889            }
890            RealShardKind::LeafShard => {
891                if solution_shard_index != *shard_index {
892                    return Err(SolutionVerifyError::InvalidSolutionShard {
893                        solution_shard_index,
894                        solution_parent_shard_index: solution_shard_index.parent_shard(),
895                        expected_shard_index: *shard_index,
896                        expected_shard_kind: RealShardKind::LeafShard,
897                    });
898                }
899
900                solution_range.to_leaf_shard(*num_shards)
901            }
902        };
903
904        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
905        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
906        //  to succeed.
907        const {
908            assert!(SolutionShardCommitment::NUM_LEAVES == 1048576);
909        }
910        if !BalancedMerkleTree::<1048576>::verify(
911            &self.shard_commitment.root,
912            &ShardCommitmentHash::repr_from_array(self.shard_commitment.proof),
913            shard_commitment_index as usize,
914            *self.shard_commitment.leaf,
915        ) {
916            return Err(SolutionVerifyError::InvalidShardCommitment);
917        }
918
919        let sector_id = SectorId::new(
920            &self.public_key_hash,
921            &self.shard_commitment.root,
922            self.sector_index,
923            self.history_size,
924        );
925
926        let global_challenge = proof_of_time.derive_global_challenge(slot);
927        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
928        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
929
930        // Check that proof of space is valid
931        if !PotVerifier::is_proof_valid(
932            &sector_id.derive_evaluation_seed(self.piece_offset),
933            s_bucket_audit_index,
934            &self.proof_of_space,
935        ) {
936            return Err(SolutionVerifyError::InvalidProofOfSpace);
937        };
938
939        let masked_chunk =
940            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
941
942        let solution_distance =
943            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
944
945        if !solution_distance.is_within(solution_range) {
946            return Err(SolutionVerifyError::OutsideSolutionRange {
947                solution_range,
948                solution_distance,
949            });
950        }
951
952        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
953        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
954        //  to succeed.
955        const {
956            assert!(Record::NUM_S_BUCKETS == 65536);
957        }
958        // Check that chunk belongs to the record
959        if !BalancedMerkleTree::<65536>::verify(
960            &self.record_root,
961            &self.chunk_proof,
962            usize::from(s_bucket_audit_index),
963            *self.chunk,
964        ) {
965            return Err(SolutionVerifyError::InvalidChunkProof);
966        }
967
968        if let Some(SolutionVerifyPieceCheckParams {
969            max_pieces_in_sector,
970            segment_root,
971            recent_segments,
972            recent_history_fraction,
973            min_sector_lifetime,
974            current_history_size,
975            sector_expiration_check_segment_root,
976        }) = piece_check_params
977        {
978            if &self.history_size > current_history_size {
979                return Err(SolutionVerifyError::FutureHistorySize {
980                    current: *current_history_size,
981                    solution: self.history_size,
982                });
983            }
984
985            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
986                return Err(SolutionVerifyError::InvalidPieceOffset {
987                    piece_offset: u16::from(self.piece_offset),
988                    max_pieces_in_sector: *max_pieces_in_sector,
989                });
990            }
991
992            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
993            {
994                let expiration_history_size = match sector_id.derive_expiration_history_size(
995                    self.history_size,
996                    sector_expiration_check_segment_root,
997                    *min_sector_lifetime,
998                ) {
999                    Some(expiration_history_size) => expiration_history_size,
1000                    None => {
1001                        return Err(SolutionVerifyError::InvalidHistorySize);
1002                    }
1003                };
1004
1005                if expiration_history_size <= *current_history_size {
1006                    return Err(SolutionVerifyError::SectorExpired {
1007                        expiration_history_size,
1008                        current_history_size: *current_history_size,
1009                    });
1010                }
1011            }
1012
1013            let position = sector_id
1014                .derive_piece_index(
1015                    self.piece_offset,
1016                    self.history_size,
1017                    *max_pieces_in_sector,
1018                    *recent_segments,
1019                    *recent_history_fraction,
1020                )
1021                .position();
1022
1023            // Check that piece is part of the blockchain history
1024            if !self
1025                .record_root
1026                .is_valid(segment_root, &self.record_proof, position)
1027            {
1028                return Err(SolutionVerifyError::InvalidPiece);
1029            }
1030        }
1031
1032        Ok(())
1033    }
1034}