ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::hashes::Blake3Hash;
5use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
6use crate::pos::{PosProof, PosSeed};
7use crate::pot::{PotOutput, SlotNumber};
8use crate::sectors::{SectorId, SectorIndex, SectorSlotChallenge};
9use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
10use ab_io_type::trivial_type::TrivialType;
11use ab_merkle_tree::balanced::BalancedMerkleTree;
12use blake3::OUT_LEN;
13use core::fmt;
14use core::simd::Simd;
15use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
16#[cfg(feature = "scale-codec")]
17use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
18#[cfg(feature = "scale-codec")]
19use scale_info::TypeInfo;
20#[cfg(feature = "serde")]
21use serde::{Deserialize, Serialize};
22#[cfg(feature = "serde")]
23use serde::{Deserializer, Serializer};
24#[cfg(feature = "serde")]
25use serde_big_array::BigArray;
26
27/// Solution distance
28#[derive(
29    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
30)]
31#[cfg_attr(
32    feature = "scale-codec",
33    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
34)]
35#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
36#[repr(C)]
37pub struct SolutionDistance(u64);
38
39impl SolutionDistance {
40    /// Maximum value
41    pub const MAX: Self = Self(u64::MAX / 2);
42
43    // TODO: Remove once `From` is stable
44    /// Create a new instance
45    #[inline(always)]
46    pub const fn from_u64(n: u64) -> Self {
47        Self(n)
48    }
49
50    /// Calculate solution distance for given parameters.
51    ///
52    /// Typically used as a primitive to check whether solution distance is within solution range
53    /// (see [`Self::is_within()`]).
54    pub fn calculate(
55        global_challenge: &Blake3Hash,
56        chunk: &[u8; 32],
57        sector_slot_challenge: &SectorSlotChallenge,
58    ) -> Self {
59        // TODO: Is keyed hash really needed here?
60        let audit_chunk = *blake3::keyed_hash(sector_slot_challenge, chunk).as_bytes();
61        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
62            audit_chunk[0],
63            audit_chunk[1],
64            audit_chunk[2],
65            audit_chunk[3],
66            audit_chunk[4],
67            audit_chunk[5],
68            audit_chunk[6],
69            audit_chunk[7],
70        ]);
71        let global_challenge_as_solution_range: SolutionRange =
72            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
73
74        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
75    }
76
77    /// Check if solution distance is within the provided solution range
78    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
79        self.0 <= solution_range.as_u64() / 2
80    }
81}
82
83/// Solution range
84#[derive(
85    Debug,
86    Display,
87    Default,
88    Copy,
89    Clone,
90    Ord,
91    PartialOrd,
92    Eq,
93    PartialEq,
94    Hash,
95    From,
96    Into,
97    Add,
98    AddAssign,
99    Sub,
100    SubAssign,
101    TrivialType,
102)]
103#[cfg_attr(
104    feature = "scale-codec",
105    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
106)]
107#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
108#[repr(C)]
109pub struct SolutionRange(u64);
110
111impl SolutionRange {
112    /// Size in bytes
113    pub const SIZE: usize = size_of::<u64>();
114    /// Minimum value
115    pub const MIN: Self = Self(u64::MIN);
116    /// Maximum value
117    pub const MAX: Self = Self(u64::MAX);
118
119    // TODO: Remove once `From` is stable
120    /// Create a new instance
121    #[inline(always)]
122    pub const fn new(n: u64) -> Self {
123        Self(n)
124    }
125
126    // TODO: Remove once `From` is stable
127    /// Get internal representation
128    #[inline(always)]
129    pub const fn as_u64(self) -> u64 {
130        self.0
131    }
132
133    /// Create a new instance from bytes
134    #[inline(always)]
135    pub fn to_bytes(self) -> [u8; 8] {
136        self.0.to_le_bytes()
137    }
138
139    /// Create a new instance from bytes
140    #[inline(always)]
141    pub fn from_bytes(bytes: [u8; 8]) -> Self {
142        Self(u64::from_le_bytes(bytes))
143    }
144
145    /// Computes the following:
146    /// ```text
147    /// MAX * slot_probability / chunks * s_buckets / pieces
148    /// ```
149    #[inline]
150    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
151        let solution_range = u64::MAX
152            // Account for slot probability
153            / slot_probability.1 * slot_probability.0
154            // Now take the probability of hitting occupied s-bucket in a piece into account
155            / Record::NUM_CHUNKS as u64
156            * Record::NUM_S_BUCKETS as u64;
157
158        // Take the number of pieces into account
159        Self(solution_range / pieces)
160    }
161
162    /// Computes the following:
163    /// ```text
164    /// MAX * slot_probability / chunks * s_buckets / solution_range
165    /// ```
166    #[inline]
167    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
168        let pieces = u64::MAX
169            // Account for slot probability
170            / slot_probability.1 * slot_probability.0
171            // Now take the probability of hitting occupied s-bucket in sector into account
172            / Record::NUM_CHUNKS as u64
173            * Record::NUM_S_BUCKETS as u64;
174
175        // Take solution range into account
176        pieces / self.0
177    }
178
179    /// Bidirectional distance between two solution ranges
180    #[inline]
181    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
182        let a = self.0;
183        let b = other.0;
184        let diff = a.wrapping_sub(b);
185        let diff2 = b.wrapping_sub(a);
186        // Find smaller diff between 2 directions
187        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
188    }
189
190    /// Derives next solution range
191    #[inline]
192    pub fn derive_next(
193        self,
194        slots_in_last_era: SlotNumber,
195        slot_probability: (u64, u64),
196        era_duration: BlockNumber,
197    ) -> Self {
198        // The idea here is to keep block production at the same pace while space pledged on the
199        // network changes. For this, we adjust the previous solution range according to actual and
200        // expected number of blocks per era.
201        //
202        // Below is code analogous to the following, but without using floats:
203        // ```rust
204        // let actual_slots_per_block = slots_in_last_era as f64 / era_duration as f64;
205        // let expected_slots_per_block =
206        //     slot_probability.1 as f64 / slot_probability.0 as f64;
207        // let adjustment_factor =
208        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
209        //
210        // next_solution_range =
211        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
212        // ```
213        let current_solution_range = self.0;
214        let next_solution_range = u64::try_from(
215            u128::from(current_solution_range)
216                .saturating_mul(u128::from(slots_in_last_era))
217                .saturating_mul(u128::from(slot_probability.0))
218                / u128::from(u64::from(era_duration))
219                / u128::from(slot_probability.1),
220        );
221
222        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
223            current_solution_range / 4,
224            current_solution_range.saturating_mul(4),
225        ))
226    }
227}
228
229// Quick test to ensure the functions above are the inverse of each other
230const _: () = {
231    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
232    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
233    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
234};
235
236/// Proof for chunk contained within a record.
237#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
238#[cfg_attr(
239    feature = "scale-codec",
240    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
241)]
242#[repr(C)]
243pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
244
245impl fmt::Debug for ChunkProof {
246    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
247        write!(f, "[")?;
248        for hash in self.0 {
249            for byte in hash {
250                write!(f, "{byte:02x}")?;
251            }
252            write!(f, ", ")?;
253        }
254        write!(f, "]")?;
255        Ok(())
256    }
257}
258
259#[cfg(feature = "serde")]
260#[derive(Serialize, Deserialize)]
261#[serde(transparent)]
262struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
263
264#[cfg(feature = "serde")]
265#[derive(Serialize, Deserialize)]
266#[serde(transparent)]
267struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
268
269#[cfg(feature = "serde")]
270#[derive(Serialize, Deserialize)]
271#[serde(transparent)]
272struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
273
274#[cfg(feature = "serde")]
275impl Serialize for ChunkProof {
276    #[inline]
277    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
278    where
279        S: Serializer,
280    {
281        if serializer.is_human_readable() {
282            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
283            // same memory layout
284            ChunkProofHex(unsafe {
285                core::mem::transmute::<
286                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
287                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
288                >(self.0)
289            })
290            .serialize(serializer)
291        } else {
292            ChunkProofBinary(self.0).serialize(serializer)
293        }
294    }
295}
296
297#[cfg(feature = "serde")]
298impl<'de> Deserialize<'de> for ChunkProof {
299    #[inline]
300    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
301    where
302        D: Deserializer<'de>,
303    {
304        Ok(Self(if deserializer.is_human_readable() {
305            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
306            // same memory layout
307            unsafe {
308                core::mem::transmute::<
309                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
310                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
311                >(ChunkProofHex::deserialize(deserializer)?.0)
312            }
313        } else {
314            ChunkProofBinary::deserialize(deserializer)?.0
315        }))
316    }
317}
318
319impl Default for ChunkProof {
320    #[inline]
321    fn default() -> Self {
322        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
323    }
324}
325
326impl AsRef<[u8]> for ChunkProof {
327    #[inline]
328    fn as_ref(&self) -> &[u8] {
329        self.0.as_flattened()
330    }
331}
332
333impl AsMut<[u8]> for ChunkProof {
334    #[inline]
335    fn as_mut(&mut self) -> &mut [u8] {
336        self.0.as_flattened_mut()
337    }
338}
339
340impl ChunkProof {
341    /// Size of chunk proof in bytes.
342    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
343    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
344}
345
346/// Solution verification errors
347#[derive(Debug, Eq, PartialEq, thiserror::Error)]
348pub enum SolutionVerifyError {
349    /// Invalid piece offset
350    #[error("Piece verification failed")]
351    InvalidPieceOffset {
352        /// Index of the piece that failed verification
353        piece_offset: u16,
354        /// How many pieces one sector is supposed to contain (max)
355        max_pieces_in_sector: u16,
356    },
357    /// History size is in the future
358    #[error("History size {solution} is in the future, current is {current}")]
359    FutureHistorySize {
360        /// Current history size
361        current: HistorySize,
362        /// History size solution was created for
363        solution: HistorySize,
364    },
365    /// Sector expired
366    #[error("Sector expired")]
367    SectorExpired {
368        /// Expiration history size
369        expiration_history_size: HistorySize,
370        /// Current history size
371        current_history_size: HistorySize,
372    },
373    /// Piece verification failed
374    #[error("Piece verification failed")]
375    InvalidPiece,
376    /// Solution is outside the solution range
377    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
378    OutsideSolutionRange {
379        /// Solution range
380        solution_range: SolutionRange,
381        /// Solution distance
382        solution_distance: SolutionDistance,
383    },
384    /// Invalid proof of space
385    #[error("Invalid proof of space")]
386    InvalidProofOfSpace,
387    /// Invalid audit chunk offset
388    #[error("Invalid audit chunk offset")]
389    InvalidAuditChunkOffset,
390    /// Invalid chunk proof
391    #[error("Invalid chunk proof")]
392    InvalidChunkProof,
393    /// Invalid history size
394    #[error("Invalid history size")]
395    InvalidHistorySize,
396}
397
398/// Parameters for checking piece validity
399#[derive(Debug, Clone)]
400#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
401pub struct SolutionVerifyPieceCheckParams {
402    /// How many pieces one sector is supposed to contain (max)
403    pub max_pieces_in_sector: u16,
404    /// Segment root of the segment to which piece belongs
405    pub segment_root: SegmentRoot,
406    /// Number of latest archived segments that are considered "recent history"
407    pub recent_segments: HistorySize,
408    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
409    pub recent_history_fraction: (HistorySize, HistorySize),
410    /// Minimum lifetime of a plotted sector, measured in archived segments
411    pub min_sector_lifetime: HistorySize,
412    /// Current size of the history
413    pub current_history_size: HistorySize,
414    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
415    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
416}
417
418/// Parameters for solution verification
419#[derive(Debug, Clone)]
420#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
421pub struct SolutionVerifyParams {
422    /// Proof of time for which solution is built
423    pub proof_of_time: PotOutput,
424    /// Solution range
425    pub solution_range: SolutionRange,
426    /// Parameters for checking piece validity.
427    ///
428    /// If `None`, piece validity check will be skipped.
429    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
430}
431
432/// Proof-of-time verifier to be used in [`Solution::verify()`]
433pub trait SolutionPotVerifier {
434    /// Check whether proof created earlier is valid
435    fn is_proof_valid(seed: &PosSeed, challenge_index: u32, proof: &PosProof) -> bool;
436}
437
438/// Farmer solution for slot challenge.
439#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
440#[cfg_attr(
441    feature = "scale-codec",
442    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
443)]
444#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
445#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
446#[repr(C)]
447pub struct Solution {
448    /// Public key of the farmer that created the solution
449    pub public_key_hash: Blake3Hash,
450    /// Record root that can use used to verify that piece was included in blockchain history
451    pub record_root: RecordRoot,
452    /// Proof for above record root
453    pub record_proof: RecordProof,
454    /// Chunk at the above offset
455    pub chunk: RecordChunk,
456    /// Proof for the above chunk
457    pub chunk_proof: ChunkProof,
458    /// Proof of space for piece offset
459    pub proof_of_space: PosProof,
460    /// Size of the blockchain history at the time of sector creation
461    pub history_size: HistorySize,
462    /// Index of the sector where the solution was found
463    pub sector_index: SectorIndex,
464    /// Pieces offset within sector
465    pub piece_offset: PieceOffset,
466    /// Padding for data structure alignment
467    pub padding: [u8; 4],
468}
469
470impl Solution {
471    /// Fake solution for the genesis block
472    pub fn genesis_solution() -> Self {
473        Self {
474            public_key_hash: Blake3Hash::default(),
475            record_root: RecordRoot::default(),
476            record_proof: RecordProof::default(),
477            chunk: RecordChunk::default(),
478            chunk_proof: ChunkProof::default(),
479            proof_of_space: PosProof::default(),
480            history_size: HistorySize::from(SegmentIndex::ZERO),
481            sector_index: SectorIndex::ZERO,
482            piece_offset: PieceOffset::default(),
483            padding: [0; _],
484        }
485    }
486
487    /// Check solution validity
488    pub fn verify<PotVerifier>(
489        &self,
490        slot: SlotNumber,
491        params: &SolutionVerifyParams,
492    ) -> Result<(), SolutionVerifyError>
493    where
494        PotVerifier: SolutionPotVerifier,
495    {
496        let SolutionVerifyParams {
497            proof_of_time,
498            solution_range,
499            piece_check_params,
500        } = params;
501
502        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
503
504        let global_challenge = proof_of_time.derive_global_challenge(slot);
505        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
506        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
507
508        // Check that proof of space is valid
509        if !PotVerifier::is_proof_valid(
510            &sector_id.derive_evaluation_seed(self.piece_offset),
511            s_bucket_audit_index.into(),
512            &self.proof_of_space,
513        ) {
514            return Err(SolutionVerifyError::InvalidProofOfSpace);
515        };
516
517        let masked_chunk =
518            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
519
520        let solution_distance =
521            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
522
523        if !solution_distance.is_within(*solution_range) {
524            return Err(SolutionVerifyError::OutsideSolutionRange {
525                solution_range: *solution_range,
526                solution_distance,
527            });
528        }
529
530        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
531        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
532        //  to succeed.
533        const _: () = {
534            assert!(Record::NUM_S_BUCKETS == 65536);
535        };
536        // Check that chunk belongs to the record
537        if !BalancedMerkleTree::<65536>::verify(
538            &self.record_root,
539            &self.chunk_proof,
540            usize::from(s_bucket_audit_index),
541            *self.chunk,
542        ) {
543            return Err(SolutionVerifyError::InvalidChunkProof);
544        }
545
546        if let Some(SolutionVerifyPieceCheckParams {
547            max_pieces_in_sector,
548            segment_root,
549            recent_segments,
550            recent_history_fraction,
551            min_sector_lifetime,
552            current_history_size,
553            sector_expiration_check_segment_root,
554        }) = piece_check_params
555        {
556            if &self.history_size > current_history_size {
557                return Err(SolutionVerifyError::FutureHistorySize {
558                    current: *current_history_size,
559                    solution: self.history_size,
560                });
561            }
562
563            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
564                return Err(SolutionVerifyError::InvalidPieceOffset {
565                    piece_offset: u16::from(self.piece_offset),
566                    max_pieces_in_sector: *max_pieces_in_sector,
567                });
568            }
569
570            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
571            {
572                let expiration_history_size = match sector_id.derive_expiration_history_size(
573                    self.history_size,
574                    sector_expiration_check_segment_root,
575                    *min_sector_lifetime,
576                ) {
577                    Some(expiration_history_size) => expiration_history_size,
578                    None => {
579                        return Err(SolutionVerifyError::InvalidHistorySize);
580                    }
581                };
582
583                if expiration_history_size <= *current_history_size {
584                    return Err(SolutionVerifyError::SectorExpired {
585                        expiration_history_size,
586                        current_history_size: *current_history_size,
587                    });
588                }
589            }
590
591            let position = sector_id
592                .derive_piece_index(
593                    self.piece_offset,
594                    self.history_size,
595                    *max_pieces_in_sector,
596                    *recent_segments,
597                    *recent_history_fraction,
598                )
599                .position();
600
601            // Check that piece is part of the blockchain history
602            if !self
603                .record_root
604                .is_valid(segment_root, &self.record_proof, position)
605            {
606                return Err(SolutionVerifyError::InvalidPiece);
607            }
608        }
609
610        Ok(())
611    }
612}