ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::hashes::Blake3Hash;
5use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
6use crate::pos::{PosProof, PosSeed};
7use crate::pot::{PotOutput, SlotNumber};
8use crate::sectors::{SectorId, SectorIndex, SectorSlotChallenge};
9use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
10use ab_io_type::trivial_type::TrivialType;
11use ab_merkle_tree::balanced_hashed::BalancedHashedMerkleTree;
12use blake3::OUT_LEN;
13use core::fmt;
14use core::simd::Simd;
15use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
16#[cfg(feature = "scale-codec")]
17use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
18#[cfg(feature = "scale-codec")]
19use scale_info::TypeInfo;
20#[cfg(feature = "serde")]
21use serde::{Deserialize, Serialize};
22#[cfg(feature = "serde")]
23use serde::{Deserializer, Serializer};
24#[cfg(feature = "serde")]
25use serde_big_array::BigArray;
26
27/// Solution distance
28#[derive(
29    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
30)]
31#[cfg_attr(
32    feature = "scale-codec",
33    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
34)]
35#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
36#[repr(C)]
37pub struct SolutionDistance(u64);
38
39impl SolutionDistance {
40    /// Maximum value
41    pub const MAX: Self = Self(u64::MAX / 2);
42
43    // TODO: Remove once `From` is stable
44    /// Create new instance
45    #[inline(always)]
46    pub const fn from_u64(n: u64) -> Self {
47        Self(n)
48    }
49
50    /// Calculate solution distance for given parameters.
51    ///
52    /// Typically used as a primitive to check whether solution distance is within solution range
53    /// (see [`Self::is_within()`]).
54    pub fn calculate(
55        global_challenge: &Blake3Hash,
56        chunk: &[u8; 32],
57        sector_slot_challenge: &SectorSlotChallenge,
58    ) -> Self {
59        // TODO: Is keyed hash really needed here?
60        let audit_chunk = *blake3::keyed_hash(sector_slot_challenge, chunk).as_bytes();
61        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
62            audit_chunk[0],
63            audit_chunk[1],
64            audit_chunk[2],
65            audit_chunk[3],
66            audit_chunk[4],
67            audit_chunk[5],
68            audit_chunk[6],
69            audit_chunk[7],
70        ]);
71        let global_challenge_as_solution_range: SolutionRange = SolutionRange::from_bytes(
72            *global_challenge
73                .array_chunks::<{ SolutionRange::SIZE }>()
74                .next()
75                .expect("Solution range is smaller in size than global challenge; qed"),
76        );
77
78        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
79    }
80
81    /// Check if solution distance is within the provided solution range
82    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
83        self.0 <= solution_range.as_u64() / 2
84    }
85}
86
87/// Solution range
88#[derive(
89    Debug,
90    Display,
91    Default,
92    Copy,
93    Clone,
94    Ord,
95    PartialOrd,
96    Eq,
97    PartialEq,
98    Hash,
99    From,
100    Into,
101    Add,
102    AddAssign,
103    Sub,
104    SubAssign,
105    TrivialType,
106)]
107#[cfg_attr(
108    feature = "scale-codec",
109    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
110)]
111#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
112#[repr(C)]
113pub struct SolutionRange(u64);
114
115impl SolutionRange {
116    /// Size in bytes
117    pub const SIZE: usize = size_of::<u64>();
118    /// Minimum value
119    pub const MIN: Self = Self(u64::MIN);
120    /// Maximum value
121    pub const MAX: Self = Self(u64::MAX);
122
123    // TODO: Remove once `From` is stable
124    /// Create new instance
125    #[inline(always)]
126    pub const fn new(n: u64) -> Self {
127        Self(n)
128    }
129
130    // TODO: Remove once `From` is stable
131    /// Get internal representation
132    #[inline(always)]
133    pub const fn as_u64(self) -> u64 {
134        self.0
135    }
136
137    /// Create a new instance from bytes
138    #[inline(always)]
139    pub fn to_bytes(self) -> [u8; 8] {
140        self.0.to_le_bytes()
141    }
142
143    /// Create a new instance from bytes
144    #[inline(always)]
145    pub fn from_bytes(bytes: [u8; 8]) -> Self {
146        Self(u64::from_le_bytes(bytes))
147    }
148
149    /// Computes the following:
150    /// ```text
151    /// MAX * slot_probability / chunks * s_buckets / pieces
152    /// ```
153    #[inline]
154    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
155        let solution_range = u64::MAX
156            // Account for slot probability
157            / slot_probability.1 * slot_probability.0
158            // Now take the probability of hitting occupied s-bucket in a piece into account
159            / Record::NUM_CHUNKS as u64
160            * Record::NUM_S_BUCKETS as u64;
161
162        // Take the number of pieces into account
163        Self(solution_range / pieces)
164    }
165
166    /// Computes the following:
167    /// ```text
168    /// MAX * slot_probability / chunks * s_buckets / solution_range
169    /// ```
170    #[inline]
171    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
172        let pieces = u64::MAX
173            // Account for slot probability
174            / slot_probability.1 * slot_probability.0
175            // Now take the probability of hitting occupied s-bucket in sector into account
176            / Record::NUM_CHUNKS as u64
177            * Record::NUM_S_BUCKETS as u64;
178
179        // Take solution range into account
180        pieces / self.0
181    }
182
183    /// Bidirectional distance between two solution ranges
184    #[inline]
185    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
186        let a = self.0;
187        let b = other.0;
188        let diff = a.wrapping_sub(b);
189        let diff2 = b.wrapping_sub(a);
190        // Find smaller diff between 2 directions
191        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
192    }
193
194    /// Derives next solution range based on the total era slots and slot probability
195    pub fn derive_next(
196        self,
197        start_slot: SlotNumber,
198        current_slot: SlotNumber,
199        slot_probability: (u64, u64),
200        era_duration: BlockNumber,
201    ) -> Self {
202        // calculate total slots within this era
203        let era_slot_count = current_slot - start_slot;
204
205        // The idea here is to keep block production at the same pace while space pledged on the
206        // network changes. For this, we adjust the previous solution range according to actual and
207        // expected number of blocks per era.
208        //
209        // Below is code analogous to the following, but without using floats:
210        // ```rust
211        // let actual_slots_per_block = era_slot_count as f64 / era_duration as f64;
212        // let expected_slots_per_block =
213        //     slot_probability.1 as f64 / slot_probability.0 as f64;
214        // let adjustment_factor =
215        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
216        //
217        // next_solution_range =
218        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
219        // ```
220        let current_solution_range = self.0;
221        let next_solution_range = u64::try_from(
222            u128::from(current_solution_range)
223                .saturating_mul(u128::from(era_slot_count))
224                .saturating_mul(u128::from(slot_probability.0))
225                / u128::from(u64::from(era_duration))
226                / u128::from(slot_probability.1),
227        );
228
229        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
230            current_solution_range / 4,
231            current_solution_range.saturating_mul(4),
232        ))
233    }
234}
235
236// Quick test to ensure the functions above are the inverse of each other
237const _: () = {
238    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
239    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
240    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
241};
242
243/// Proof for chunk contained within a record.
244#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
245#[cfg_attr(
246    feature = "scale-codec",
247    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
248)]
249#[repr(C)]
250pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
251
252impl fmt::Debug for ChunkProof {
253    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
254        write!(f, "[")?;
255        for hash in self.0 {
256            for byte in hash {
257                write!(f, "{byte:02x}")?;
258            }
259            write!(f, ", ")?;
260        }
261        write!(f, "]")?;
262        Ok(())
263    }
264}
265
266#[cfg(feature = "serde")]
267#[derive(Serialize, Deserialize)]
268#[serde(transparent)]
269struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
270
271#[cfg(feature = "serde")]
272#[derive(Serialize, Deserialize)]
273#[serde(transparent)]
274struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
275
276#[cfg(feature = "serde")]
277#[derive(Serialize, Deserialize)]
278#[serde(transparent)]
279struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
280
281#[cfg(feature = "serde")]
282impl Serialize for ChunkProof {
283    #[inline]
284    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
285    where
286        S: Serializer,
287    {
288        if serializer.is_human_readable() {
289            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
290            // same memory layout
291            ChunkProofHex(unsafe {
292                core::mem::transmute::<
293                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
294                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
295                >(self.0)
296            })
297            .serialize(serializer)
298        } else {
299            ChunkProofBinary(self.0).serialize(serializer)
300        }
301    }
302}
303
304#[cfg(feature = "serde")]
305impl<'de> Deserialize<'de> for ChunkProof {
306    #[inline]
307    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
308    where
309        D: Deserializer<'de>,
310    {
311        Ok(Self(if deserializer.is_human_readable() {
312            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
313            // same memory layout
314            unsafe {
315                core::mem::transmute::<
316                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
317                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
318                >(ChunkProofHex::deserialize(deserializer)?.0)
319            }
320        } else {
321            ChunkProofBinary::deserialize(deserializer)?.0
322        }))
323    }
324}
325
326impl Default for ChunkProof {
327    #[inline]
328    fn default() -> Self {
329        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
330    }
331}
332
333impl AsRef<[u8]> for ChunkProof {
334    #[inline]
335    fn as_ref(&self) -> &[u8] {
336        self.0.as_flattened()
337    }
338}
339
340impl AsMut<[u8]> for ChunkProof {
341    #[inline]
342    fn as_mut(&mut self) -> &mut [u8] {
343        self.0.as_flattened_mut()
344    }
345}
346
347impl ChunkProof {
348    /// Size of chunk proof in bytes.
349    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
350    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
351}
352
353/// Solution verification errors
354#[derive(Debug, Eq, PartialEq, thiserror::Error)]
355pub enum SolutionVerifyError {
356    /// Invalid piece offset
357    #[error("Piece verification failed")]
358    InvalidPieceOffset {
359        /// Index of the piece that failed verification
360        piece_offset: u16,
361        /// How many pieces one sector is supposed to contain (max)
362        max_pieces_in_sector: u16,
363    },
364    /// Sector expired
365    #[error("Sector expired")]
366    SectorExpired {
367        /// Expiration history size
368        expiration_history_size: HistorySize,
369        /// Current history size
370        current_history_size: HistorySize,
371    },
372    /// Piece verification failed
373    #[error("Piece verification failed")]
374    InvalidPiece,
375    /// Solution is outside the solution range
376    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
377    OutsideSolutionRange {
378        /// Solution range
379        solution_range: SolutionRange,
380        /// Solution distance
381        solution_distance: SolutionDistance,
382    },
383    /// Invalid proof of space
384    #[error("Invalid proof of space")]
385    InvalidProofOfSpace,
386    /// Invalid audit chunk offset
387    #[error("Invalid audit chunk offset")]
388    InvalidAuditChunkOffset,
389    /// Invalid chunk proof
390    #[error("Invalid chunk proof")]
391    InvalidChunkProof,
392    /// Invalid history size
393    #[error("Invalid history size")]
394    InvalidHistorySize,
395}
396
397/// Parameters for checking piece validity
398#[derive(Debug, Clone)]
399#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
400pub struct SolutionVerifyPieceCheckParams {
401    /// How many pieces one sector is supposed to contain (max)
402    pub max_pieces_in_sector: u16,
403    /// Segment root of the segment to which piece belongs
404    pub segment_root: SegmentRoot,
405    /// Number of latest archived segments that are considered "recent history"
406    pub recent_segments: HistorySize,
407    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
408    pub recent_history_fraction: (HistorySize, HistorySize),
409    /// Minimum lifetime of a plotted sector, measured in archived segments
410    pub min_sector_lifetime: HistorySize,
411    /// Current size of the history
412    pub current_history_size: HistorySize,
413    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
414    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
415}
416
417/// Parameters for solution verification
418#[derive(Debug, Clone)]
419#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
420pub struct SolutionVerifyParams {
421    /// Proof of time for which solution is built
422    pub proof_of_time: PotOutput,
423    /// Solution range
424    pub solution_range: SolutionRange,
425    /// Parameters for checking piece validity.
426    ///
427    /// If `None`, piece validity check will be skipped.
428    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
429}
430
431/// Proof-of-time verifier to be used in [`Solution::verify()`]
432pub trait SolutionPotVerifier {
433    /// Check whether proof created earlier is valid
434    fn is_proof_valid(seed: &PosSeed, challenge_index: u32, proof: &PosProof) -> bool;
435}
436
437/// Farmer solution for slot challenge.
438#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
439#[cfg_attr(
440    feature = "scale-codec",
441    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
442)]
443#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
444#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
445#[repr(C)]
446pub struct Solution {
447    /// Public key of the farmer that created the solution
448    pub public_key_hash: Blake3Hash,
449    /// Record root that can use used to verify that piece was included in blockchain history
450    pub record_root: RecordRoot,
451    /// Proof for above record root
452    pub record_proof: RecordProof,
453    /// Chunk at the above offset
454    pub chunk: RecordChunk,
455    /// Proof for the above chunk
456    pub chunk_proof: ChunkProof,
457    /// Proof of space for piece offset
458    pub proof_of_space: PosProof,
459    /// Size of the blockchain history at the time of sector creation
460    pub history_size: HistorySize,
461    /// Index of the sector where the solution was found
462    pub sector_index: SectorIndex,
463    /// Pieces offset within sector
464    pub piece_offset: PieceOffset,
465    /// Padding for data structure alignment
466    pub padding: [u8; 4],
467}
468
469impl Solution {
470    /// Fake solution for the genesis block
471    pub fn genesis_solution() -> Self {
472        Self {
473            public_key_hash: Blake3Hash::default(),
474            record_root: RecordRoot::default(),
475            record_proof: RecordProof::default(),
476            chunk: RecordChunk::default(),
477            chunk_proof: ChunkProof::default(),
478            proof_of_space: PosProof::default(),
479            history_size: HistorySize::from(SegmentIndex::ZERO),
480            sector_index: SectorIndex::ZERO,
481            piece_offset: PieceOffset::default(),
482            padding: [0; _],
483        }
484    }
485
486    /// Check solution validity
487    pub fn verify<PotVerifier>(
488        &self,
489        slot: SlotNumber,
490        params: &SolutionVerifyParams,
491    ) -> Result<(), SolutionVerifyError>
492    where
493        PotVerifier: SolutionPotVerifier,
494    {
495        let SolutionVerifyParams {
496            proof_of_time,
497            solution_range,
498            piece_check_params,
499        } = params;
500
501        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
502
503        let global_challenge = proof_of_time.derive_global_challenge(slot);
504        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
505        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
506
507        // Check that proof of space is valid
508        if !PotVerifier::is_proof_valid(
509            &sector_id.derive_evaluation_seed(self.piece_offset),
510            s_bucket_audit_index.into(),
511            &self.proof_of_space,
512        ) {
513            return Err(SolutionVerifyError::InvalidProofOfSpace);
514        };
515
516        let masked_chunk =
517            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
518
519        let solution_distance =
520            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
521
522        if !solution_distance.is_within(*solution_range) {
523            return Err(SolutionVerifyError::OutsideSolutionRange {
524                solution_range: *solution_range,
525                solution_distance,
526            });
527        }
528
529        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that allows
530        //  the code to compile. Constant 16 is hardcoded here and in `if` branch below for compilation
531        //  to succeed
532        const _: () = {
533            assert!(Record::NUM_S_BUCKETS == 65536);
534        };
535        // Check that chunk belongs to the record
536        if !BalancedHashedMerkleTree::<65536>::verify(
537            &self.record_root,
538            &self.chunk_proof,
539            usize::from(s_bucket_audit_index),
540            *self.chunk,
541        ) {
542            return Err(SolutionVerifyError::InvalidChunkProof);
543        }
544
545        if let Some(SolutionVerifyPieceCheckParams {
546            max_pieces_in_sector,
547            segment_root,
548            recent_segments,
549            recent_history_fraction,
550            min_sector_lifetime,
551            current_history_size,
552            sector_expiration_check_segment_root,
553        }) = piece_check_params
554        {
555            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
556                return Err(SolutionVerifyError::InvalidPieceOffset {
557                    piece_offset: u16::from(self.piece_offset),
558                    max_pieces_in_sector: *max_pieces_in_sector,
559                });
560            }
561            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
562            {
563                let expiration_history_size = match sector_id.derive_expiration_history_size(
564                    self.history_size,
565                    sector_expiration_check_segment_root,
566                    *min_sector_lifetime,
567                ) {
568                    Some(expiration_history_size) => expiration_history_size,
569                    None => {
570                        return Err(SolutionVerifyError::InvalidHistorySize);
571                    }
572                };
573
574                if expiration_history_size <= *current_history_size {
575                    return Err(SolutionVerifyError::SectorExpired {
576                        expiration_history_size,
577                        current_history_size: *current_history_size,
578                    });
579                }
580            }
581
582            let position = sector_id
583                .derive_piece_index(
584                    self.piece_offset,
585                    self.history_size,
586                    *max_pieces_in_sector,
587                    *recent_segments,
588                    *recent_history_fraction,
589                )
590                .position();
591
592            // Check that piece is part of the blockchain history
593            if !self
594                .record_root
595                .is_valid(segment_root, &self.record_proof, position)
596            {
597                return Err(SolutionVerifyError::InvalidPiece);
598            }
599        }
600
601        Ok(())
602    }
603}