ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::hashes::{Blake3Hash, blake3_hash_with_key};
5use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
6use crate::pos::{PosProof, PosSeed};
7use crate::pot::{PotOutput, SlotNumber};
8use crate::sectors::{SectorId, SectorIndex, SectorSlotChallenge};
9use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
10use ab_merkle_tree::balanced_hashed::BalancedHashedMerkleTree;
11use blake3::OUT_LEN;
12use core::fmt;
13use core::simd::Simd;
14use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
15#[cfg(feature = "scale-codec")]
16use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
17#[cfg(feature = "scale-codec")]
18use scale_info::TypeInfo;
19#[cfg(feature = "serde")]
20use serde::{Deserialize, Serialize};
21#[cfg(feature = "serde")]
22use serde::{Deserializer, Serializer};
23#[cfg(feature = "serde")]
24use serde_big_array::BigArray;
25
26/// Solution distance
27#[derive(
28    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
29)]
30#[cfg_attr(
31    feature = "scale-codec",
32    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
33)]
34#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
35#[repr(transparent)]
36pub struct SolutionDistance(u64);
37
38impl SolutionDistance {
39    /// Maximum value
40    pub const MAX: Self = Self(u64::MAX / 2);
41
42    // TODO: Remove once `From` is stable
43    /// Create new instance
44    #[inline(always)]
45    pub const fn from_u64(n: u64) -> Self {
46        Self(n)
47    }
48
49    /// Calculate solution distance for given parameters.
50    ///
51    /// Typically used as a primitive to check whether solution distance is within solution range
52    /// (see [`Self::is_within()`]).
53    pub fn calculate(
54        global_challenge: &Blake3Hash,
55        chunk: &[u8; 32],
56        sector_slot_challenge: &SectorSlotChallenge,
57    ) -> Self {
58        let audit_chunk = blake3_hash_with_key(sector_slot_challenge, chunk);
59        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes(
60            *audit_chunk
61                .array_chunks::<{ SolutionRange::SIZE }>()
62                .next()
63                .expect("Solution range is smaller in size than global challenge; qed"),
64        );
65        let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_bytes(
66            *global_challenge
67                .array_chunks::<{ SolutionRange::SIZE }>()
68                .next()
69                .expect("Solution range is smaller in size than global challenge; qed"),
70        );
71
72        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
73    }
74
75    /// Check if solution distance is within the provided solution range
76    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
77        self.0 <= solution_range.to_u64() / 2
78    }
79}
80
81/// Solution range
82#[derive(
83    Debug,
84    Display,
85    Default,
86    Copy,
87    Clone,
88    Ord,
89    PartialOrd,
90    Eq,
91    PartialEq,
92    Hash,
93    From,
94    Into,
95    Add,
96    AddAssign,
97    Sub,
98    SubAssign,
99)]
100#[cfg_attr(
101    feature = "scale-codec",
102    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
103)]
104#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
105#[repr(transparent)]
106pub struct SolutionRange(u64);
107
108impl SolutionRange {
109    /// Size in bytes
110    pub const SIZE: usize = size_of::<u64>();
111    /// Minimum value
112    pub const MIN: Self = Self(u64::MIN);
113    /// Maximum value
114    pub const MAX: Self = Self(u64::MAX);
115
116    // TODO: Remove once `From` is stable
117    /// Create new instance
118    #[inline(always)]
119    pub const fn from_u64(n: u64) -> Self {
120        Self(n)
121    }
122
123    // TODO: Remove once `From` is stable
124    /// Get internal representation
125    #[inline(always)]
126    pub const fn to_u64(self) -> u64 {
127        self.0
128    }
129
130    /// Create a new instance from bytes
131    #[inline(always)]
132    pub fn from_bytes(bytes: [u8; 8]) -> Self {
133        Self(u64::from_le_bytes(bytes))
134    }
135
136    /// Computes the following:
137    /// ```text
138    /// MAX * slot_probability / chunks * s_buckets / pieces
139    /// ```
140    #[inline]
141    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
142        let solution_range = u64::MAX
143            // Account for slot probability
144            / slot_probability.1 * slot_probability.0
145            // Now take the probability of hitting occupied s-bucket in a piece into account
146            / Record::NUM_CHUNKS as u64
147            * Record::NUM_S_BUCKETS as u64;
148
149        // Take the number of pieces into account
150        Self(solution_range / pieces)
151    }
152
153    /// Computes the following:
154    /// ```text
155    /// MAX * slot_probability / chunks * s_buckets / solution_range
156    /// ```
157    #[inline]
158    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
159        let pieces = u64::MAX
160            // Account for slot probability
161            / slot_probability.1 * slot_probability.0
162            // Now take the probability of hitting occupied s-bucket in sector into account
163            / Record::NUM_CHUNKS as u64
164            * Record::NUM_S_BUCKETS as u64;
165
166        // Take solution range into account
167        pieces / self.0
168    }
169
170    /// Bidirectional distance between two solution ranges
171    #[inline]
172    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
173        let a = self.0;
174        let b = other.0;
175        let diff = a.wrapping_sub(b);
176        let diff2 = b.wrapping_sub(a);
177        // Find smaller diff between 2 directions
178        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
179    }
180
181    /// Derives next solution range based on the total era slots and slot probability
182    pub fn derive_next(
183        self,
184        start_slot: SlotNumber,
185        current_slot: SlotNumber,
186        slot_probability: (u64, u64),
187        era_duration: BlockNumber,
188    ) -> Self {
189        // calculate total slots within this era
190        let era_slot_count = current_slot - start_slot;
191
192        // The idea here is to keep block production at the same pace while space pledged on the
193        // network changes. For this, we adjust the previous solution range according to actual and
194        // expected number of blocks per era.
195        //
196        // Below is code analogous to the following, but without using floats:
197        // ```rust
198        // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
199        // let expected_slots_per_block =
200        //     slot_probability.1 as f64 / slot_probability.0 as f64;
201        // let adjustment_factor =
202        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
203        //
204        // next_solution_range =
205        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
206        // ```
207        let current_solution_range = self.0;
208        let next_solution_range = u64::try_from(
209            u128::from(current_solution_range)
210                .saturating_mul(u128::from(era_slot_count))
211                .saturating_mul(u128::from(slot_probability.0))
212                / u128::from(u64::from(era_duration))
213                / u128::from(slot_probability.1),
214        );
215
216        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
217            current_solution_range / 4,
218            current_solution_range.saturating_mul(4),
219        ))
220    }
221}
222
223// Quick test to ensure the functions above are the inverse of each other
224const _: () = {
225    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
226    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
227    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
228};
229
230/// Proof for chunk contained within a record.
231#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into)]
232#[cfg_attr(
233    feature = "scale-codec",
234    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
235)]
236#[repr(transparent)]
237pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
238
239impl fmt::Debug for ChunkProof {
240    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
241        write!(f, "[")?;
242        for hash in self.0 {
243            for byte in hash {
244                write!(f, "{byte:02x}")?;
245            }
246            write!(f, ", ")?;
247        }
248        write!(f, "]")?;
249        Ok(())
250    }
251}
252
253#[cfg(feature = "serde")]
254#[derive(Serialize, Deserialize)]
255#[serde(transparent)]
256struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
257
258#[cfg(feature = "serde")]
259#[derive(Serialize, Deserialize)]
260#[serde(transparent)]
261struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
262
263#[cfg(feature = "serde")]
264#[derive(Serialize, Deserialize)]
265#[serde(transparent)]
266struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
267
268#[cfg(feature = "serde")]
269impl Serialize for ChunkProof {
270    #[inline]
271    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
272    where
273        S: Serializer,
274    {
275        if serializer.is_human_readable() {
276            // SAFETY: `ChunkProofHexHash` is `#[repr(transparent)]` and guaranteed to have the
277            // same memory layout
278            ChunkProofHex(unsafe {
279                core::mem::transmute::<
280                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
281                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
282                >(self.0)
283            })
284            .serialize(serializer)
285        } else {
286            ChunkProofBinary(self.0).serialize(serializer)
287        }
288    }
289}
290
291#[cfg(feature = "serde")]
292impl<'de> Deserialize<'de> for ChunkProof {
293    #[inline]
294    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
295    where
296        D: Deserializer<'de>,
297    {
298        Ok(Self(if deserializer.is_human_readable() {
299            // SAFETY: `ChunkProofHexHash` is `#[repr(transparent)]` and guaranteed to have the
300            // same memory layout
301            unsafe {
302                core::mem::transmute::<
303                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
304                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
305                >(ChunkProofHex::deserialize(deserializer)?.0)
306            }
307        } else {
308            ChunkProofBinary::deserialize(deserializer)?.0
309        }))
310    }
311}
312
313impl Default for ChunkProof {
314    #[inline]
315    fn default() -> Self {
316        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
317    }
318}
319
320impl AsRef<[u8]> for ChunkProof {
321    #[inline]
322    fn as_ref(&self) -> &[u8] {
323        self.0.as_flattened()
324    }
325}
326
327impl AsMut<[u8]> for ChunkProof {
328    #[inline]
329    fn as_mut(&mut self) -> &mut [u8] {
330        self.0.as_flattened_mut()
331    }
332}
333
334impl ChunkProof {
335    /// Size of chunk proof in bytes.
336    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
337    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
338}
339
340/// Solution verification errors
341#[derive(Debug, Eq, PartialEq, thiserror::Error)]
342pub enum SolutionVerifyError {
343    /// Invalid piece offset
344    #[error("Piece verification failed")]
345    InvalidPieceOffset {
346        /// Index of the piece that failed verification
347        piece_offset: u16,
348        /// How many pieces one sector is supposed to contain (max)
349        max_pieces_in_sector: u16,
350    },
351    /// Sector expired
352    #[error("Sector expired")]
353    SectorExpired {
354        /// Expiration history size
355        expiration_history_size: HistorySize,
356        /// Current history size
357        current_history_size: HistorySize,
358    },
359    /// Piece verification failed
360    #[error("Piece verification failed")]
361    InvalidPiece,
362    /// Solution is outside the solution range
363    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
364    OutsideSolutionRange {
365        /// Solution range
366        solution_range: SolutionRange,
367        /// Solution distance
368        solution_distance: SolutionDistance,
369    },
370    /// Invalid proof of space
371    #[error("Invalid proof of space")]
372    InvalidProofOfSpace,
373    /// Invalid audit chunk offset
374    #[error("Invalid audit chunk offset")]
375    InvalidAuditChunkOffset,
376    /// Invalid chunk proof
377    #[error("Invalid chunk proof")]
378    InvalidChunkProof,
379    /// Invalid history size
380    #[error("Invalid history size")]
381    InvalidHistorySize,
382}
383
384/// Parameters for checking piece validity
385#[derive(Debug, Clone)]
386#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
387pub struct SolutionVerifyPieceCheckParams {
388    /// How many pieces one sector is supposed to contain (max)
389    pub max_pieces_in_sector: u16,
390    /// Segment root of the segment to which piece belongs
391    pub segment_root: SegmentRoot,
392    /// Number of latest archived segments that are considered "recent history"
393    pub recent_segments: HistorySize,
394    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
395    pub recent_history_fraction: (HistorySize, HistorySize),
396    /// Minimum lifetime of a plotted sector, measured in archived segments
397    pub min_sector_lifetime: HistorySize,
398    /// Current size of the history
399    pub current_history_size: HistorySize,
400    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
401    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
402}
403
404/// Parameters for solution verification
405#[derive(Debug, Clone)]
406#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
407pub struct SolutionVerifyParams {
408    /// Proof of time for which solution is built
409    pub proof_of_time: PotOutput,
410    /// Solution range
411    pub solution_range: SolutionRange,
412    /// Parameters for checking piece validity.
413    ///
414    /// If `None`, piece validity check will be skipped.
415    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
416}
417
418/// Proof-of-time verifier to be used in [`Solution::verify()`]
419pub trait SolutionPotVerifier {
420    /// Check whether proof created earlier is valid
421    fn is_proof_valid(seed: &PosSeed, challenge_index: u32, proof: &PosProof) -> bool;
422}
423
424/// Farmer solution for slot challenge.
425#[derive(Clone, Debug, Eq, PartialEq)]
426#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, TypeInfo))]
427#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
428#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
429pub struct Solution {
430    /// Public key of the farmer that created the solution
431    pub public_key_hash: Blake3Hash,
432    /// Index of the sector where the solution was found
433    pub sector_index: SectorIndex,
434    /// Size of the blockchain history at the time of sector creation
435    pub history_size: HistorySize,
436    /// Pieces offset within sector
437    pub piece_offset: PieceOffset,
438    /// Record root that can use used to verify that piece was included in blockchain history
439    pub record_root: RecordRoot,
440    /// Proof for above record root
441    pub record_proof: RecordProof,
442    /// Chunk at the above offset
443    pub chunk: RecordChunk,
444    /// Proof for the above chunk
445    pub chunk_proof: ChunkProof,
446    /// Proof of space for piece offset
447    pub proof_of_space: PosProof,
448}
449
450impl Solution {
451    /// Fake solution for the genesis block
452    pub fn genesis_solution() -> Self {
453        Self {
454            public_key_hash: Blake3Hash::default(),
455            sector_index: SectorIndex::ZERO,
456            history_size: HistorySize::from(SegmentIndex::ZERO),
457            piece_offset: PieceOffset::default(),
458            record_root: RecordRoot::default(),
459            record_proof: RecordProof::default(),
460            chunk: RecordChunk::default(),
461            chunk_proof: ChunkProof::default(),
462            proof_of_space: PosProof::default(),
463        }
464    }
465
466    /// Check solution validity
467    pub fn verify<PotVerifier>(
468        &self,
469        slot: SlotNumber,
470        params: &SolutionVerifyParams,
471    ) -> Result<(), SolutionVerifyError>
472    where
473        PotVerifier: SolutionPotVerifier,
474    {
475        let SolutionVerifyParams {
476            proof_of_time,
477            solution_range,
478            piece_check_params,
479        } = params;
480
481        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
482
483        let global_challenge = proof_of_time.derive_global_challenge(slot);
484        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
485        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
486
487        // Check that proof of space is valid
488        if !PotVerifier::is_proof_valid(
489            &sector_id.derive_evaluation_seed(self.piece_offset),
490            s_bucket_audit_index.into(),
491            &self.proof_of_space,
492        ) {
493            return Err(SolutionVerifyError::InvalidProofOfSpace);
494        };
495
496        let masked_chunk =
497            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
498
499        let solution_distance =
500            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
501
502        if !solution_distance.is_within(*solution_range) {
503            return Err(SolutionVerifyError::OutsideSolutionRange {
504                solution_range: *solution_range,
505                solution_distance,
506            });
507        }
508
509        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that allows
510        //  the code to compile. Constant 16 is hardcoded here and in `if` branch below for compilation
511        //  to succeed
512        const _: () = {
513            assert!(Record::NUM_S_BUCKETS == 65536);
514        };
515        // Check that chunk belongs to the record
516        if !BalancedHashedMerkleTree::<65536>::verify(
517            &self.record_root,
518            &self.chunk_proof,
519            usize::from(s_bucket_audit_index),
520            *self.chunk,
521        ) {
522            return Err(SolutionVerifyError::InvalidChunkProof);
523        }
524
525        if let Some(SolutionVerifyPieceCheckParams {
526            max_pieces_in_sector,
527            segment_root,
528            recent_segments,
529            recent_history_fraction,
530            min_sector_lifetime,
531            current_history_size,
532            sector_expiration_check_segment_root,
533        }) = piece_check_params
534        {
535            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
536                return Err(SolutionVerifyError::InvalidPieceOffset {
537                    piece_offset: u16::from(self.piece_offset),
538                    max_pieces_in_sector: *max_pieces_in_sector,
539                });
540            }
541            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
542            {
543                let expiration_history_size = match sector_id.derive_expiration_history_size(
544                    self.history_size,
545                    sector_expiration_check_segment_root,
546                    *min_sector_lifetime,
547                ) {
548                    Some(expiration_history_size) => expiration_history_size,
549                    None => {
550                        return Err(SolutionVerifyError::InvalidHistorySize);
551                    }
552                };
553
554                if expiration_history_size <= *current_history_size {
555                    return Err(SolutionVerifyError::SectorExpired {
556                        expiration_history_size,
557                        current_history_size: *current_history_size,
558                    });
559                }
560            }
561
562            let position = sector_id
563                .derive_piece_index(
564                    self.piece_offset,
565                    self.history_size,
566                    *max_pieces_in_sector,
567                    *recent_segments,
568                    *recent_history_fraction,
569                )
570                .position();
571
572            // Check that piece is part of the blockchain history
573            if !self
574                .record_root
575                .is_valid(segment_root, &self.record_proof, position)
576            {
577                return Err(SolutionVerifyError::InvalidPiece);
578            }
579        }
580
581        Ok(())
582    }
583}