ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::hashes::Blake3Hash;
5use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
6use crate::pos::{PosProof, PosSeed};
7use crate::pot::{PotOutput, SlotNumber};
8use crate::sectors::{SectorId, SectorIndex, SectorSlotChallenge};
9use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
10use ab_blake3::single_block_keyed_hash;
11use ab_io_type::trivial_type::TrivialType;
12use ab_merkle_tree::balanced::BalancedMerkleTree;
13use blake3::OUT_LEN;
14use core::fmt;
15use core::simd::Simd;
16use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
17#[cfg(feature = "scale-codec")]
18use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
19#[cfg(feature = "scale-codec")]
20use scale_info::TypeInfo;
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23#[cfg(feature = "serde")]
24use serde::{Deserializer, Serializer};
25#[cfg(feature = "serde")]
26use serde_big_array::BigArray;
27
28/// Solution distance
29#[derive(
30    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
31)]
32#[cfg_attr(
33    feature = "scale-codec",
34    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
35)]
36#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
37#[repr(C)]
38pub struct SolutionDistance(u64);
39
40impl SolutionDistance {
41    /// Maximum value
42    pub const MAX: Self = Self(u64::MAX / 2);
43
44    // TODO: Remove once `From` is stable
45    /// Create a new instance
46    #[inline(always)]
47    pub const fn from_u64(n: u64) -> Self {
48        Self(n)
49    }
50
51    /// Calculate solution distance for given parameters.
52    ///
53    /// Typically used as a primitive to check whether solution distance is within solution range
54    /// (see [`Self::is_within()`]).
55    pub fn calculate(
56        global_challenge: &Blake3Hash,
57        chunk: &[u8; 32],
58        sector_slot_challenge: &SectorSlotChallenge,
59    ) -> Self {
60        // TODO: Is keyed hash really needed here?
61        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
62            .expect("Less than a single block worth of bytes; qed");
63        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
64            audit_chunk[0],
65            audit_chunk[1],
66            audit_chunk[2],
67            audit_chunk[3],
68            audit_chunk[4],
69            audit_chunk[5],
70            audit_chunk[6],
71            audit_chunk[7],
72        ]);
73        let global_challenge_as_solution_range: SolutionRange =
74            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
75
76        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
77    }
78
79    /// Check if solution distance is within the provided solution range
80    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
81        self.0 <= solution_range.as_u64() / 2
82    }
83}
84
85/// Solution range
86#[derive(
87    Debug,
88    Display,
89    Default,
90    Copy,
91    Clone,
92    Ord,
93    PartialOrd,
94    Eq,
95    PartialEq,
96    Hash,
97    From,
98    Into,
99    Add,
100    AddAssign,
101    Sub,
102    SubAssign,
103    TrivialType,
104)]
105#[cfg_attr(
106    feature = "scale-codec",
107    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
108)]
109#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
110#[repr(C)]
111pub struct SolutionRange(u64);
112
113impl SolutionRange {
114    /// Size in bytes
115    pub const SIZE: usize = size_of::<u64>();
116    /// Minimum value
117    pub const MIN: Self = Self(u64::MIN);
118    /// Maximum value
119    pub const MAX: Self = Self(u64::MAX);
120
121    // TODO: Remove once `From` is stable
122    /// Create a new instance
123    #[inline(always)]
124    pub const fn new(n: u64) -> Self {
125        Self(n)
126    }
127
128    // TODO: Remove once `From` is stable
129    /// Get internal representation
130    #[inline(always)]
131    pub const fn as_u64(self) -> u64 {
132        self.0
133    }
134
135    /// Create a new instance from bytes
136    #[inline(always)]
137    pub fn to_bytes(self) -> [u8; 8] {
138        self.0.to_le_bytes()
139    }
140
141    /// Create a new instance from bytes
142    #[inline(always)]
143    pub fn from_bytes(bytes: [u8; 8]) -> Self {
144        Self(u64::from_le_bytes(bytes))
145    }
146
147    /// Computes the following:
148    /// ```text
149    /// MAX * slot_probability / chunks * s_buckets / pieces
150    /// ```
151    #[inline]
152    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
153        let solution_range = u64::MAX
154            // Account for slot probability
155            / slot_probability.1 * slot_probability.0
156            // Now take the probability of hitting occupied s-bucket in a piece into account
157            / Record::NUM_CHUNKS as u64
158            * Record::NUM_S_BUCKETS as u64;
159
160        // Take the number of pieces into account
161        Self(solution_range / pieces)
162    }
163
164    /// Computes the following:
165    /// ```text
166    /// MAX * slot_probability / chunks * s_buckets / solution_range
167    /// ```
168    #[inline]
169    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
170        let pieces = u64::MAX
171            // Account for slot probability
172            / slot_probability.1 * slot_probability.0
173            // Now take the probability of hitting occupied s-bucket in sector into account
174            / Record::NUM_CHUNKS as u64
175            * Record::NUM_S_BUCKETS as u64;
176
177        // Take solution range into account
178        pieces / self.0
179    }
180
181    /// Bidirectional distance between two solution ranges
182    #[inline]
183    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
184        let a = self.0;
185        let b = other.0;
186        let diff = a.wrapping_sub(b);
187        let diff2 = b.wrapping_sub(a);
188        // Find smaller diff between 2 directions
189        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
190    }
191
192    /// Derives next solution range
193    #[inline]
194    pub fn derive_next(
195        self,
196        slots_in_last_era: SlotNumber,
197        slot_probability: (u64, u64),
198        era_duration: BlockNumber,
199    ) -> Self {
200        // The idea here is to keep block production at the same pace while space pledged on the
201        // network changes. For this, we adjust the previous solution range according to actual and
202        // expected number of blocks per era.
203        //
204        // Below is code analogous to the following, but without using floats:
205        // ```rust
206        // let actual_slots_per_block = slots_in_last_era as f64 / era_duration as f64;
207        // let expected_slots_per_block =
208        //     slot_probability.1 as f64 / slot_probability.0 as f64;
209        // let adjustment_factor =
210        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
211        //
212        // next_solution_range =
213        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
214        // ```
215        let current_solution_range = self.0;
216        let next_solution_range = u64::try_from(
217            u128::from(current_solution_range)
218                .saturating_mul(u128::from(slots_in_last_era))
219                .saturating_mul(u128::from(slot_probability.0))
220                / u128::from(u64::from(era_duration))
221                / u128::from(slot_probability.1),
222        );
223
224        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
225            current_solution_range / 4,
226            current_solution_range.saturating_mul(4),
227        ))
228    }
229}
230
231// Quick test to ensure the functions above are the inverse of each other
232const _: () = {
233    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
234    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
235    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
236};
237
238/// Proof for chunk contained within a record.
239#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
240#[cfg_attr(
241    feature = "scale-codec",
242    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
243)]
244#[repr(C)]
245pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
246
247impl fmt::Debug for ChunkProof {
248    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
249        write!(f, "[")?;
250        for hash in self.0 {
251            for byte in hash {
252                write!(f, "{byte:02x}")?;
253            }
254            write!(f, ", ")?;
255        }
256        write!(f, "]")?;
257        Ok(())
258    }
259}
260
261#[cfg(feature = "serde")]
262#[derive(Serialize, Deserialize)]
263#[serde(transparent)]
264struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
265
266#[cfg(feature = "serde")]
267#[derive(Serialize, Deserialize)]
268#[serde(transparent)]
269struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
270
271#[cfg(feature = "serde")]
272#[derive(Serialize, Deserialize)]
273#[serde(transparent)]
274struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
275
276#[cfg(feature = "serde")]
277impl Serialize for ChunkProof {
278    #[inline]
279    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
280    where
281        S: Serializer,
282    {
283        if serializer.is_human_readable() {
284            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
285            // same memory layout
286            ChunkProofHex(unsafe {
287                core::mem::transmute::<
288                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
289                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
290                >(self.0)
291            })
292            .serialize(serializer)
293        } else {
294            ChunkProofBinary(self.0).serialize(serializer)
295        }
296    }
297}
298
299#[cfg(feature = "serde")]
300impl<'de> Deserialize<'de> for ChunkProof {
301    #[inline]
302    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
303    where
304        D: Deserializer<'de>,
305    {
306        Ok(Self(if deserializer.is_human_readable() {
307            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
308            // same memory layout
309            unsafe {
310                core::mem::transmute::<
311                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
312                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
313                >(ChunkProofHex::deserialize(deserializer)?.0)
314            }
315        } else {
316            ChunkProofBinary::deserialize(deserializer)?.0
317        }))
318    }
319}
320
321impl Default for ChunkProof {
322    #[inline]
323    fn default() -> Self {
324        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
325    }
326}
327
328impl AsRef<[u8]> for ChunkProof {
329    #[inline]
330    fn as_ref(&self) -> &[u8] {
331        self.0.as_flattened()
332    }
333}
334
335impl AsMut<[u8]> for ChunkProof {
336    #[inline]
337    fn as_mut(&mut self) -> &mut [u8] {
338        self.0.as_flattened_mut()
339    }
340}
341
342impl ChunkProof {
343    /// Size of chunk proof in bytes.
344    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
345    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
346}
347
348/// Solution verification errors
349#[derive(Debug, Eq, PartialEq, thiserror::Error)]
350pub enum SolutionVerifyError {
351    /// Invalid piece offset
352    #[error("Piece verification failed")]
353    InvalidPieceOffset {
354        /// Index of the piece that failed verification
355        piece_offset: u16,
356        /// How many pieces one sector is supposed to contain (max)
357        max_pieces_in_sector: u16,
358    },
359    /// History size is in the future
360    #[error("History size {solution} is in the future, current is {current}")]
361    FutureHistorySize {
362        /// Current history size
363        current: HistorySize,
364        /// History size solution was created for
365        solution: HistorySize,
366    },
367    /// Sector expired
368    #[error("Sector expired")]
369    SectorExpired {
370        /// Expiration history size
371        expiration_history_size: HistorySize,
372        /// Current history size
373        current_history_size: HistorySize,
374    },
375    /// Piece verification failed
376    #[error("Piece verification failed")]
377    InvalidPiece,
378    /// Solution is outside the solution range
379    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
380    OutsideSolutionRange {
381        /// Solution range
382        solution_range: SolutionRange,
383        /// Solution distance
384        solution_distance: SolutionDistance,
385    },
386    /// Invalid proof of space
387    #[error("Invalid proof of space")]
388    InvalidProofOfSpace,
389    /// Invalid audit chunk offset
390    #[error("Invalid audit chunk offset")]
391    InvalidAuditChunkOffset,
392    /// Invalid chunk proof
393    #[error("Invalid chunk proof")]
394    InvalidChunkProof,
395    /// Invalid history size
396    #[error("Invalid history size")]
397    InvalidHistorySize,
398}
399
400/// Parameters for checking piece validity
401#[derive(Debug, Clone)]
402#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
403pub struct SolutionVerifyPieceCheckParams {
404    /// How many pieces one sector is supposed to contain (max)
405    pub max_pieces_in_sector: u16,
406    /// Segment root of the segment to which piece belongs
407    pub segment_root: SegmentRoot,
408    /// Number of latest archived segments that are considered "recent history"
409    pub recent_segments: HistorySize,
410    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
411    pub recent_history_fraction: (HistorySize, HistorySize),
412    /// Minimum lifetime of a plotted sector, measured in archived segments
413    pub min_sector_lifetime: HistorySize,
414    /// Current size of the history
415    pub current_history_size: HistorySize,
416    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
417    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
418}
419
420/// Parameters for solution verification
421#[derive(Debug, Clone)]
422#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
423pub struct SolutionVerifyParams {
424    /// Proof of time for which solution is built
425    pub proof_of_time: PotOutput,
426    /// Solution range
427    pub solution_range: SolutionRange,
428    /// Parameters for checking piece validity.
429    ///
430    /// If `None`, piece validity check will be skipped.
431    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
432}
433
434/// Proof-of-time verifier to be used in [`Solution::verify()`]
435pub trait SolutionPotVerifier {
436    /// Check whether proof created earlier is valid
437    fn is_proof_valid(seed: &PosSeed, challenge_index: u32, proof: &PosProof) -> bool;
438}
439
440/// Farmer solution for slot challenge.
441#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
442#[cfg_attr(
443    feature = "scale-codec",
444    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
445)]
446#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
447#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
448#[repr(C)]
449pub struct Solution {
450    /// Public key of the farmer that created the solution
451    pub public_key_hash: Blake3Hash,
452    /// Record root that can use used to verify that piece was included in blockchain history
453    pub record_root: RecordRoot,
454    /// Proof for above record root
455    pub record_proof: RecordProof,
456    /// Chunk at the above offset
457    pub chunk: RecordChunk,
458    /// Proof for the above chunk
459    pub chunk_proof: ChunkProof,
460    /// Proof of space for piece offset
461    pub proof_of_space: PosProof,
462    /// Size of the blockchain history at the time of sector creation
463    pub history_size: HistorySize,
464    /// Index of the sector where the solution was found
465    pub sector_index: SectorIndex,
466    /// Pieces offset within sector
467    pub piece_offset: PieceOffset,
468    /// Padding for data structure alignment
469    pub padding: [u8; 4],
470}
471
472impl Solution {
473    /// Fake solution for the genesis block
474    pub fn genesis_solution() -> Self {
475        Self {
476            public_key_hash: Blake3Hash::default(),
477            record_root: RecordRoot::default(),
478            record_proof: RecordProof::default(),
479            chunk: RecordChunk::default(),
480            chunk_proof: ChunkProof::default(),
481            proof_of_space: PosProof::default(),
482            history_size: HistorySize::from(SegmentIndex::ZERO),
483            sector_index: SectorIndex::ZERO,
484            piece_offset: PieceOffset::default(),
485            padding: [0; _],
486        }
487    }
488
489    /// Check solution validity
490    pub fn verify<PotVerifier>(
491        &self,
492        slot: SlotNumber,
493        params: &SolutionVerifyParams,
494    ) -> Result<(), SolutionVerifyError>
495    where
496        PotVerifier: SolutionPotVerifier,
497    {
498        let SolutionVerifyParams {
499            proof_of_time,
500            solution_range,
501            piece_check_params,
502        } = params;
503
504        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
505
506        let global_challenge = proof_of_time.derive_global_challenge(slot);
507        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
508        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
509
510        // Check that proof of space is valid
511        if !PotVerifier::is_proof_valid(
512            &sector_id.derive_evaluation_seed(self.piece_offset),
513            s_bucket_audit_index.into(),
514            &self.proof_of_space,
515        ) {
516            return Err(SolutionVerifyError::InvalidProofOfSpace);
517        };
518
519        let masked_chunk =
520            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
521
522        let solution_distance =
523            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
524
525        if !solution_distance.is_within(*solution_range) {
526            return Err(SolutionVerifyError::OutsideSolutionRange {
527                solution_range: *solution_range,
528                solution_distance,
529            });
530        }
531
532        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
533        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
534        //  to succeed.
535        const _: () = {
536            assert!(Record::NUM_S_BUCKETS == 65536);
537        };
538        // Check that chunk belongs to the record
539        if !BalancedMerkleTree::<65536>::verify(
540            &self.record_root,
541            &self.chunk_proof,
542            usize::from(s_bucket_audit_index),
543            *self.chunk,
544        ) {
545            return Err(SolutionVerifyError::InvalidChunkProof);
546        }
547
548        if let Some(SolutionVerifyPieceCheckParams {
549            max_pieces_in_sector,
550            segment_root,
551            recent_segments,
552            recent_history_fraction,
553            min_sector_lifetime,
554            current_history_size,
555            sector_expiration_check_segment_root,
556        }) = piece_check_params
557        {
558            if &self.history_size > current_history_size {
559                return Err(SolutionVerifyError::FutureHistorySize {
560                    current: *current_history_size,
561                    solution: self.history_size,
562                });
563            }
564
565            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
566                return Err(SolutionVerifyError::InvalidPieceOffset {
567                    piece_offset: u16::from(self.piece_offset),
568                    max_pieces_in_sector: *max_pieces_in_sector,
569                });
570            }
571
572            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
573            {
574                let expiration_history_size = match sector_id.derive_expiration_history_size(
575                    self.history_size,
576                    sector_expiration_check_segment_root,
577                    *min_sector_lifetime,
578                ) {
579                    Some(expiration_history_size) => expiration_history_size,
580                    None => {
581                        return Err(SolutionVerifyError::InvalidHistorySize);
582                    }
583                };
584
585                if expiration_history_size <= *current_history_size {
586                    return Err(SolutionVerifyError::SectorExpired {
587                        expiration_history_size,
588                        current_history_size: *current_history_size,
589                    });
590                }
591            }
592
593            let position = sector_id
594                .derive_piece_index(
595                    self.piece_offset,
596                    self.history_size,
597                    *max_pieces_in_sector,
598                    *recent_segments,
599                    *recent_history_fraction,
600                )
601                .position();
602
603            // Check that piece is part of the blockchain history
604            if !self
605                .record_root
606                .is_valid(segment_root, &self.record_proof, position)
607            {
608                return Err(SolutionVerifyError::InvalidPiece);
609            }
610        }
611
612        Ok(())
613    }
614}