ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::hashes::Blake3Hash;
5use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
6use crate::pos::{PosProof, PosSeed};
7use crate::pot::{PotOutput, SlotNumber};
8use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
9use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
10use ab_blake3::single_block_keyed_hash;
11use ab_io_type::trivial_type::TrivialType;
12use ab_merkle_tree::balanced::BalancedMerkleTree;
13use blake3::OUT_LEN;
14use core::fmt;
15use core::simd::Simd;
16use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
17#[cfg(feature = "scale-codec")]
18use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
19#[cfg(feature = "scale-codec")]
20use scale_info::TypeInfo;
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23#[cfg(feature = "serde")]
24use serde::{Deserializer, Serializer};
25#[cfg(feature = "serde")]
26use serde_big_array::BigArray;
27
28/// Solution distance
29#[derive(
30    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
31)]
32#[cfg_attr(
33    feature = "scale-codec",
34    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
35)]
36#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
37#[repr(C)]
38pub struct SolutionDistance(u64);
39
40impl SolutionDistance {
41    /// Maximum value
42    pub const MAX: Self = Self(u64::MAX / 2);
43
44    // TODO: Remove once `From` is stable
45    /// Create a new instance
46    #[inline(always)]
47    pub const fn from_u64(n: u64) -> Self {
48        Self(n)
49    }
50
51    /// Calculate solution distance for given parameters.
52    ///
53    /// Typically used as a primitive to check whether solution distance is within solution range
54    /// (see [`Self::is_within()`]).
55    pub fn calculate(
56        global_challenge: &Blake3Hash,
57        chunk: &[u8; 32],
58        sector_slot_challenge: &SectorSlotChallenge,
59    ) -> Self {
60        // TODO: Is keyed hash really needed here?
61        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
62            .expect("Less than a single block worth of bytes; qed");
63        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
64            audit_chunk[0],
65            audit_chunk[1],
66            audit_chunk[2],
67            audit_chunk[3],
68            audit_chunk[4],
69            audit_chunk[5],
70            audit_chunk[6],
71            audit_chunk[7],
72        ]);
73        let global_challenge_as_solution_range: SolutionRange =
74            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
75
76        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
77    }
78
79    /// Check if solution distance is within the provided solution range
80    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
81        self.0 <= solution_range.as_u64() / 2
82    }
83}
84
85/// Solution range
86#[derive(
87    Debug,
88    Display,
89    Default,
90    Copy,
91    Clone,
92    Ord,
93    PartialOrd,
94    Eq,
95    PartialEq,
96    Hash,
97    From,
98    Into,
99    Add,
100    AddAssign,
101    Sub,
102    SubAssign,
103    TrivialType,
104)]
105#[cfg_attr(
106    feature = "scale-codec",
107    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
108)]
109#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
110#[repr(C)]
111pub struct SolutionRange(u64);
112
113impl SolutionRange {
114    /// Size in bytes
115    pub const SIZE: usize = size_of::<u64>();
116    /// Minimum value
117    pub const MIN: Self = Self(u64::MIN);
118    /// Maximum value
119    pub const MAX: Self = Self(u64::MAX);
120
121    // TODO: Remove once `From` is stable
122    /// Create a new instance
123    #[inline(always)]
124    pub const fn new(n: u64) -> Self {
125        Self(n)
126    }
127
128    // TODO: Remove once `From` is stable
129    /// Get internal representation
130    #[inline(always)]
131    pub const fn as_u64(self) -> u64 {
132        self.0
133    }
134
135    /// Create a new instance from bytes
136    #[inline(always)]
137    pub fn to_bytes(self) -> [u8; 8] {
138        self.0.to_le_bytes()
139    }
140
141    /// Create a new instance from bytes
142    #[inline(always)]
143    pub fn from_bytes(bytes: [u8; 8]) -> Self {
144        Self(u64::from_le_bytes(bytes))
145    }
146
147    /// Computes the following:
148    /// ```text
149    /// MAX * slot_probability / chunks * s_buckets / pieces
150    /// ```
151    #[inline]
152    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
153        let solution_range = u64::MAX
154            // Account for slot probability
155            / slot_probability.1 * slot_probability.0
156            // Now take the probability of hitting occupied s-bucket in a piece into account
157            / Record::NUM_CHUNKS as u64
158            * Record::NUM_S_BUCKETS as u64;
159
160        // Take the number of pieces into account
161        Self(solution_range / pieces)
162    }
163
164    /// Computes the following:
165    /// ```text
166    /// MAX * slot_probability / chunks * s_buckets / solution_range
167    /// ```
168    #[inline]
169    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
170        let pieces = u64::MAX
171            // Account for slot probability
172            / slot_probability.1 * slot_probability.0
173            // Now take the probability of hitting occupied s-bucket in sector into account
174            / Record::NUM_CHUNKS as u64
175            * Record::NUM_S_BUCKETS as u64;
176
177        // Take solution range into account
178        pieces / self.0
179    }
180
181    /// Bidirectional distance between two solution ranges
182    #[inline]
183    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
184        let a = self.0;
185        let b = other.0;
186        let diff = a.wrapping_sub(b);
187        let diff2 = b.wrapping_sub(a);
188        // Find smaller diff between 2 directions
189        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
190    }
191
192    /// Derives next solution range
193    #[inline]
194    pub fn derive_next(
195        self,
196        slots_in_last_era: SlotNumber,
197        slot_probability: (u64, u64),
198        era_duration: BlockNumber,
199    ) -> Self {
200        // The idea here is to keep block production at the same pace while space pledged on the
201        // network changes. For this, we adjust the previous solution range according to actual and
202        // expected number of blocks per era.
203        //
204        // Below is code analogous to the following, but without using floats:
205        // ```rust
206        // let actual_slots_per_block = slots_in_last_era as f64 / era_duration as f64;
207        // let expected_slots_per_block =
208        //     slot_probability.1 as f64 / slot_probability.0 as f64;
209        // let adjustment_factor =
210        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
211        //
212        // next_solution_range =
213        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
214        // ```
215        let current_solution_range = self.0;
216        let next_solution_range = u64::try_from(
217            u128::from(current_solution_range)
218                .saturating_mul(u128::from(slots_in_last_era))
219                .saturating_mul(u128::from(slot_probability.0))
220                / u128::from(u64::from(era_duration))
221                / u128::from(slot_probability.1),
222        );
223
224        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
225            current_solution_range / 4,
226            current_solution_range.saturating_mul(4),
227        ))
228    }
229}
230
231// Quick test to ensure the functions above are the inverse of each other
232const _: () = {
233    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
234    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
235    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
236};
237
238/// Proof for chunk contained within a record.
239#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
240#[cfg_attr(
241    feature = "scale-codec",
242    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
243)]
244#[repr(C)]
245pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
246
247impl fmt::Debug for ChunkProof {
248    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
249        write!(f, "[")?;
250        for hash in self.0 {
251            for byte in hash {
252                write!(f, "{byte:02x}")?;
253            }
254            write!(f, ", ")?;
255        }
256        write!(f, "]")?;
257        Ok(())
258    }
259}
260
261#[cfg(feature = "serde")]
262#[derive(Serialize, Deserialize)]
263#[serde(transparent)]
264struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
265
266#[cfg(feature = "serde")]
267#[derive(Serialize, Deserialize)]
268#[serde(transparent)]
269struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
270
271#[cfg(feature = "serde")]
272#[derive(Serialize, Deserialize)]
273#[serde(transparent)]
274struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
275
276#[cfg(feature = "serde")]
277impl Serialize for ChunkProof {
278    #[inline]
279    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
280    where
281        S: Serializer,
282    {
283        if serializer.is_human_readable() {
284            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
285            // same memory layout
286            ChunkProofHex(unsafe {
287                core::mem::transmute::<
288                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
289                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
290                >(self.0)
291            })
292            .serialize(serializer)
293        } else {
294            ChunkProofBinary(self.0).serialize(serializer)
295        }
296    }
297}
298
299#[cfg(feature = "serde")]
300impl<'de> Deserialize<'de> for ChunkProof {
301    #[inline]
302    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
303    where
304        D: Deserializer<'de>,
305    {
306        Ok(Self(if deserializer.is_human_readable() {
307            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
308            // same memory layout
309            unsafe {
310                core::mem::transmute::<
311                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
312                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
313                >(ChunkProofHex::deserialize(deserializer)?.0)
314            }
315        } else {
316            ChunkProofBinary::deserialize(deserializer)?.0
317        }))
318    }
319}
320
321impl Default for ChunkProof {
322    #[inline]
323    fn default() -> Self {
324        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
325    }
326}
327
328impl AsRef<[u8]> for ChunkProof {
329    #[inline]
330    fn as_ref(&self) -> &[u8] {
331        self.0.as_flattened()
332    }
333}
334
335impl AsMut<[u8]> for ChunkProof {
336    #[inline]
337    fn as_mut(&mut self) -> &mut [u8] {
338        self.0.as_flattened_mut()
339    }
340}
341
342impl ChunkProof {
343    /// Size of chunk proof in bytes.
344    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
345    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
346}
347
348/// Solution verification errors
349#[derive(Debug, Eq, PartialEq, thiserror::Error)]
350pub enum SolutionVerifyError {
351    /// Invalid piece offset
352    #[error("Piece verification failed")]
353    InvalidPieceOffset {
354        /// Index of the piece that failed verification
355        piece_offset: u16,
356        /// How many pieces one sector is supposed to contain (max)
357        max_pieces_in_sector: u16,
358    },
359    /// History size is in the future
360    #[error("History size {solution} is in the future, current is {current}")]
361    FutureHistorySize {
362        /// Current history size
363        current: HistorySize,
364        /// History size solution was created for
365        solution: HistorySize,
366    },
367    /// Sector expired
368    #[error("Sector expired")]
369    SectorExpired {
370        /// Expiration history size
371        expiration_history_size: HistorySize,
372        /// Current history size
373        current_history_size: HistorySize,
374    },
375    /// Piece verification failed
376    #[error("Piece verification failed")]
377    InvalidPiece,
378    /// Solution is outside the solution range
379    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
380    OutsideSolutionRange {
381        /// Solution range
382        solution_range: SolutionRange,
383        /// Solution distance
384        solution_distance: SolutionDistance,
385    },
386    /// Invalid proof of space
387    #[error("Invalid proof of space")]
388    InvalidProofOfSpace,
389    /// Invalid chunk proof
390    #[error("Invalid chunk proof")]
391    InvalidChunkProof,
392    /// Invalid history size
393    #[error("Invalid history size")]
394    InvalidHistorySize,
395}
396
397/// Parameters for checking piece validity
398#[derive(Debug, Clone)]
399#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
400pub struct SolutionVerifyPieceCheckParams {
401    /// How many pieces one sector is supposed to contain (max)
402    pub max_pieces_in_sector: u16,
403    /// Segment root of the segment to which piece belongs
404    pub segment_root: SegmentRoot,
405    /// Number of latest archived segments that are considered "recent history"
406    pub recent_segments: HistorySize,
407    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
408    pub recent_history_fraction: (HistorySize, HistorySize),
409    /// Minimum lifetime of a plotted sector, measured in archived segments
410    pub min_sector_lifetime: HistorySize,
411    /// Current size of the history
412    pub current_history_size: HistorySize,
413    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
414    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
415}
416
417/// Parameters for solution verification
418#[derive(Debug, Clone)]
419#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
420pub struct SolutionVerifyParams {
421    /// Proof of time for which solution is built
422    pub proof_of_time: PotOutput,
423    /// Solution range
424    pub solution_range: SolutionRange,
425    /// Parameters for checking piece validity.
426    ///
427    /// If `None`, piece validity check will be skipped.
428    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
429}
430
431/// Proof-of-time verifier to be used in [`Solution::verify()`]
432pub trait SolutionPotVerifier {
433    /// Check whether proof created earlier is valid
434    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
435}
436
437/// Farmer solution for slot challenge.
438#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
439#[cfg_attr(
440    feature = "scale-codec",
441    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
442)]
443#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
444#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
445#[repr(C)]
446pub struct Solution {
447    /// Public key of the farmer that created the solution
448    pub public_key_hash: Blake3Hash,
449    /// Record root that can use used to verify that piece was included in blockchain history
450    pub record_root: RecordRoot,
451    /// Proof for above record root
452    pub record_proof: RecordProof,
453    /// Chunk at the above offset
454    pub chunk: RecordChunk,
455    /// Proof for the above chunk
456    pub chunk_proof: ChunkProof,
457    /// Proof of space for piece offset
458    pub proof_of_space: PosProof,
459    /// Size of the blockchain history at the time of sector creation
460    pub history_size: HistorySize,
461    /// Index of the sector where the solution was found
462    pub sector_index: SectorIndex,
463    /// Pieces offset within sector
464    pub piece_offset: PieceOffset,
465    /// Padding for data structure alignment
466    pub padding: [u8; 4],
467}
468
469impl Solution {
470    /// Fake solution for the genesis block
471    pub fn genesis_solution() -> Self {
472        Self {
473            public_key_hash: Blake3Hash::default(),
474            record_root: RecordRoot::default(),
475            record_proof: RecordProof::default(),
476            chunk: RecordChunk::default(),
477            chunk_proof: ChunkProof::default(),
478            proof_of_space: PosProof::default(),
479            history_size: HistorySize::from(SegmentIndex::ZERO),
480            sector_index: SectorIndex::ZERO,
481            piece_offset: PieceOffset::default(),
482            padding: [0; _],
483        }
484    }
485
486    /// Check solution validity
487    pub fn verify<PotVerifier>(
488        &self,
489        slot: SlotNumber,
490        params: &SolutionVerifyParams,
491    ) -> Result<(), SolutionVerifyError>
492    where
493        PotVerifier: SolutionPotVerifier,
494    {
495        let SolutionVerifyParams {
496            proof_of_time,
497            solution_range,
498            piece_check_params,
499        } = params;
500
501        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
502
503        let global_challenge = proof_of_time.derive_global_challenge(slot);
504        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
505        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
506
507        // Check that proof of space is valid
508        if !PotVerifier::is_proof_valid(
509            &sector_id.derive_evaluation_seed(self.piece_offset),
510            s_bucket_audit_index,
511            &self.proof_of_space,
512        ) {
513            return Err(SolutionVerifyError::InvalidProofOfSpace);
514        };
515
516        let masked_chunk =
517            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
518
519        let solution_distance =
520            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
521
522        if !solution_distance.is_within(*solution_range) {
523            return Err(SolutionVerifyError::OutsideSolutionRange {
524                solution_range: *solution_range,
525                solution_distance,
526            });
527        }
528
529        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
530        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
531        //  to succeed.
532        const _: () = {
533            assert!(Record::NUM_S_BUCKETS == 65536);
534        };
535        // Check that chunk belongs to the record
536        if !BalancedMerkleTree::<65536>::verify(
537            &self.record_root,
538            &self.chunk_proof,
539            usize::from(s_bucket_audit_index),
540            *self.chunk,
541        ) {
542            return Err(SolutionVerifyError::InvalidChunkProof);
543        }
544
545        if let Some(SolutionVerifyPieceCheckParams {
546            max_pieces_in_sector,
547            segment_root,
548            recent_segments,
549            recent_history_fraction,
550            min_sector_lifetime,
551            current_history_size,
552            sector_expiration_check_segment_root,
553        }) = piece_check_params
554        {
555            if &self.history_size > current_history_size {
556                return Err(SolutionVerifyError::FutureHistorySize {
557                    current: *current_history_size,
558                    solution: self.history_size,
559                });
560            }
561
562            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
563                return Err(SolutionVerifyError::InvalidPieceOffset {
564                    piece_offset: u16::from(self.piece_offset),
565                    max_pieces_in_sector: *max_pieces_in_sector,
566                });
567            }
568
569            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
570            {
571                let expiration_history_size = match sector_id.derive_expiration_history_size(
572                    self.history_size,
573                    sector_expiration_check_segment_root,
574                    *min_sector_lifetime,
575                ) {
576                    Some(expiration_history_size) => expiration_history_size,
577                    None => {
578                        return Err(SolutionVerifyError::InvalidHistorySize);
579                    }
580                };
581
582                if expiration_history_size <= *current_history_size {
583                    return Err(SolutionVerifyError::SectorExpired {
584                        expiration_history_size,
585                        current_history_size: *current_history_size,
586                    });
587                }
588            }
589
590            let position = sector_id
591                .derive_piece_index(
592                    self.piece_offset,
593                    self.history_size,
594                    *max_pieces_in_sector,
595                    *recent_segments,
596                    *recent_history_fraction,
597                )
598                .position();
599
600            // Check that piece is part of the blockchain history
601            if !self
602                .record_root
603                .is_valid(segment_root, &self.record_proof, position)
604            {
605                return Err(SolutionVerifyError::InvalidPiece);
606            }
607        }
608
609        Ok(())
610    }
611}