ab_core_primitives/
solutions.rs

1//! Solutions-related data structures and functions.
2
3use crate::block::BlockNumber;
4use crate::ed25519::Ed25519PublicKey;
5use crate::hashes::Blake3Hash;
6use crate::pieces::{PieceOffset, Record, RecordChunk, RecordProof, RecordRoot};
7use crate::pos::{PosProof, PosSeed};
8use crate::pot::{PotOutput, SlotNumber};
9use crate::sectors::{SBucket, SectorId, SectorIndex, SectorSlotChallenge};
10use crate::segments::{HistorySize, SegmentIndex, SegmentRoot};
11use ab_blake3::single_block_keyed_hash;
12use ab_io_type::trivial_type::TrivialType;
13use ab_merkle_tree::balanced::BalancedMerkleTree;
14use blake3::OUT_LEN;
15use core::fmt;
16use core::simd::Simd;
17use derive_more::{Add, AddAssign, Deref, DerefMut, Display, From, Into, Sub, SubAssign};
18#[cfg(feature = "scale-codec")]
19use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
20#[cfg(feature = "scale-codec")]
21use scale_info::TypeInfo;
22#[cfg(feature = "serde")]
23use serde::{Deserialize, Serialize};
24#[cfg(feature = "serde")]
25use serde::{Deserializer, Serializer};
26#[cfg(feature = "serde")]
27use serde_big_array::BigArray;
28
29/// Solution distance
30#[derive(
31    Debug, Display, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, From, Into,
32)]
33#[cfg_attr(
34    feature = "scale-codec",
35    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
36)]
37#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
38#[repr(C)]
39pub struct SolutionDistance(u64);
40
41impl SolutionDistance {
42    /// Maximum value
43    pub const MAX: Self = Self(u64::MAX / 2);
44
45    // TODO: Remove once `From` is stable
46    /// Create a new instance
47    #[inline(always)]
48    pub const fn from_u64(n: u64) -> Self {
49        Self(n)
50    }
51
52    /// Calculate solution distance for given parameters.
53    ///
54    /// Typically used as a primitive to check whether solution distance is within solution range
55    /// (see [`Self::is_within()`]).
56    pub fn calculate(
57        global_challenge: &Blake3Hash,
58        chunk: &[u8; 32],
59        sector_slot_challenge: &SectorSlotChallenge,
60    ) -> Self {
61        // TODO: Is keyed hash really needed here?
62        let audit_chunk = single_block_keyed_hash(sector_slot_challenge, chunk)
63            .expect("Less than a single block worth of bytes; qed");
64        let audit_chunk_as_solution_range: SolutionRange = SolutionRange::from_bytes([
65            audit_chunk[0],
66            audit_chunk[1],
67            audit_chunk[2],
68            audit_chunk[3],
69            audit_chunk[4],
70            audit_chunk[5],
71            audit_chunk[6],
72            audit_chunk[7],
73        ]);
74        let global_challenge_as_solution_range: SolutionRange =
75            SolutionRange::from_bytes(global_challenge.as_chunks().0[0]);
76
77        global_challenge_as_solution_range.bidirectional_distance(audit_chunk_as_solution_range)
78    }
79
80    /// Check if solution distance is within the provided solution range
81    pub const fn is_within(self, solution_range: SolutionRange) -> bool {
82        self.0 <= solution_range.as_u64() / 2
83    }
84}
85
86/// Solution range
87#[derive(
88    Debug,
89    Display,
90    Default,
91    Copy,
92    Clone,
93    Ord,
94    PartialOrd,
95    Eq,
96    PartialEq,
97    Hash,
98    From,
99    Into,
100    Add,
101    AddAssign,
102    Sub,
103    SubAssign,
104    TrivialType,
105)]
106#[cfg_attr(
107    feature = "scale-codec",
108    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
109)]
110#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
111#[repr(C)]
112pub struct SolutionRange(u64);
113
114impl SolutionRange {
115    /// Size in bytes
116    pub const SIZE: usize = size_of::<u64>();
117    /// Minimum value
118    pub const MIN: Self = Self(u64::MIN);
119    /// Maximum value
120    pub const MAX: Self = Self(u64::MAX);
121
122    // TODO: Remove once `From` is stable
123    /// Create a new instance
124    #[inline(always)]
125    pub const fn new(n: u64) -> Self {
126        Self(n)
127    }
128
129    // TODO: Remove once `From` is stable
130    /// Get internal representation
131    #[inline(always)]
132    pub const fn as_u64(self) -> u64 {
133        self.0
134    }
135
136    /// Create a new instance from bytes
137    #[inline(always)]
138    pub fn to_bytes(self) -> [u8; 8] {
139        self.0.to_le_bytes()
140    }
141
142    /// Create a new instance from bytes
143    #[inline(always)]
144    pub fn from_bytes(bytes: [u8; 8]) -> Self {
145        Self(u64::from_le_bytes(bytes))
146    }
147
148    /// Computes the following:
149    /// ```text
150    /// MAX * slot_probability / chunks * s_buckets / pieces
151    /// ```
152    #[inline]
153    pub const fn from_pieces(pieces: u64, slot_probability: (u64, u64)) -> Self {
154        let solution_range = u64::MAX
155            // Account for slot probability
156            / slot_probability.1 * slot_probability.0
157            // Now take the probability of hitting occupied s-bucket in a piece into account
158            / Record::NUM_CHUNKS as u64
159            * Record::NUM_S_BUCKETS as u64;
160
161        // Take the number of pieces into account
162        Self(solution_range / pieces)
163    }
164
165    /// Computes the following:
166    /// ```text
167    /// MAX * slot_probability / chunks * s_buckets / solution_range
168    /// ```
169    #[inline]
170    pub const fn to_pieces(self, slot_probability: (u64, u64)) -> u64 {
171        let pieces = u64::MAX
172            // Account for slot probability
173            / slot_probability.1 * slot_probability.0
174            // Now take the probability of hitting occupied s-bucket in sector into account
175            / Record::NUM_CHUNKS as u64
176            * Record::NUM_S_BUCKETS as u64;
177
178        // Take solution range into account
179        pieces / self.0
180    }
181
182    /// Bidirectional distance between two solution ranges
183    #[inline]
184    pub const fn bidirectional_distance(self, other: Self) -> SolutionDistance {
185        let a = self.0;
186        let b = other.0;
187        let diff = a.wrapping_sub(b);
188        let diff2 = b.wrapping_sub(a);
189        // Find smaller diff between 2 directions
190        SolutionDistance::from_u64(if diff < diff2 { diff } else { diff2 })
191    }
192
193    /// Derives next solution range
194    #[inline]
195    pub fn derive_next(
196        self,
197        slots_in_last_interval: SlotNumber,
198        slot_probability: (u64, u64),
199        retarget_interval: BlockNumber,
200    ) -> Self {
201        // The idea here is to keep block production at the same pace while space pledged on the
202        // network changes. For this, we adjust the previous solution range according to actual and
203        // expected number of blocks per retarget interval.
204        //
205        // Below is code analogous to the following, but without using floats:
206        // ```rust
207        // let actual_slots_per_block = slots_in_last_interval as f64 / retarget_interval as f64;
208        // let expected_slots_per_block =
209        //     slot_probability.1 as f64 / slot_probability.0 as f64;
210        // let adjustment_factor =
211        //     (actual_slots_per_block / expected_slots_per_block).clamp(0.25, 4.0);
212        //
213        // next_solution_range =
214        //     (solution_ranges.current as f64 * adjustment_factor).round() as u64;
215        // ```
216        let current_solution_range = self.0;
217        let next_solution_range = u64::try_from(
218            u128::from(current_solution_range)
219                .saturating_mul(u128::from(slots_in_last_interval))
220                .saturating_mul(u128::from(slot_probability.0))
221                / u128::from(u64::from(retarget_interval))
222                / u128::from(slot_probability.1),
223        );
224
225        Self(next_solution_range.unwrap_or(u64::MAX).clamp(
226            current_solution_range / 4,
227            current_solution_range.saturating_mul(4),
228        ))
229    }
230}
231
232// Quick test to ensure the functions above are the inverse of each other
233const _: () = {
234    assert!(SolutionRange::from_pieces(1, (1, 6)).to_pieces((1, 6)) == 1);
235    assert!(SolutionRange::from_pieces(3, (1, 6)).to_pieces((1, 6)) == 3);
236    assert!(SolutionRange::from_pieces(5, (1, 6)).to_pieces((1, 6)) == 5);
237};
238
239/// Proof for chunk contained within a record.
240#[derive(Copy, Clone, Eq, PartialEq, Hash, Deref, DerefMut, From, Into, TrivialType)]
241#[cfg_attr(
242    feature = "scale-codec",
243    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
244)]
245#[repr(C)]
246pub struct ChunkProof([[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
247
248impl fmt::Debug for ChunkProof {
249    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
250        write!(f, "[")?;
251        for hash in self.0 {
252            for byte in hash {
253                write!(f, "{byte:02x}")?;
254            }
255            write!(f, ", ")?;
256        }
257        write!(f, "]")?;
258        Ok(())
259    }
260}
261
262#[cfg(feature = "serde")]
263#[derive(Serialize, Deserialize)]
264#[serde(transparent)]
265struct ChunkProofBinary(#[serde(with = "BigArray")] [[u8; OUT_LEN]; ChunkProof::NUM_HASHES]);
266
267#[cfg(feature = "serde")]
268#[derive(Serialize, Deserialize)]
269#[serde(transparent)]
270struct ChunkProofHexHash(#[serde(with = "hex")] [u8; OUT_LEN]);
271
272#[cfg(feature = "serde")]
273#[derive(Serialize, Deserialize)]
274#[serde(transparent)]
275struct ChunkProofHex([ChunkProofHexHash; ChunkProof::NUM_HASHES]);
276
277#[cfg(feature = "serde")]
278impl Serialize for ChunkProof {
279    #[inline]
280    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
281    where
282        S: Serializer,
283    {
284        if serializer.is_human_readable() {
285            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
286            // same memory layout
287            ChunkProofHex(unsafe {
288                core::mem::transmute::<
289                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
290                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
291                >(self.0)
292            })
293            .serialize(serializer)
294        } else {
295            ChunkProofBinary(self.0).serialize(serializer)
296        }
297    }
298}
299
300#[cfg(feature = "serde")]
301impl<'de> Deserialize<'de> for ChunkProof {
302    #[inline]
303    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
304    where
305        D: Deserializer<'de>,
306    {
307        Ok(Self(if deserializer.is_human_readable() {
308            // SAFETY: `ChunkProofHexHash` is `#[repr(C)]` and guaranteed to have the
309            // same memory layout
310            unsafe {
311                core::mem::transmute::<
312                    [ChunkProofHexHash; ChunkProof::NUM_HASHES],
313                    [[u8; OUT_LEN]; ChunkProof::NUM_HASHES],
314                >(ChunkProofHex::deserialize(deserializer)?.0)
315            }
316        } else {
317            ChunkProofBinary::deserialize(deserializer)?.0
318        }))
319    }
320}
321
322impl Default for ChunkProof {
323    #[inline]
324    fn default() -> Self {
325        Self([[0; OUT_LEN]; ChunkProof::NUM_HASHES])
326    }
327}
328
329impl AsRef<[u8]> for ChunkProof {
330    #[inline]
331    fn as_ref(&self) -> &[u8] {
332        self.0.as_flattened()
333    }
334}
335
336impl AsMut<[u8]> for ChunkProof {
337    #[inline]
338    fn as_mut(&mut self) -> &mut [u8] {
339        self.0.as_flattened_mut()
340    }
341}
342
343impl ChunkProof {
344    /// Size of chunk proof in bytes.
345    pub const SIZE: usize = OUT_LEN * Self::NUM_HASHES;
346    const NUM_HASHES: usize = Record::NUM_S_BUCKETS.ilog2() as usize;
347}
348
349/// Solution verification errors
350#[derive(Debug, Eq, PartialEq, thiserror::Error)]
351pub enum SolutionVerifyError {
352    /// Invalid piece offset
353    #[error("Piece verification failed")]
354    InvalidPieceOffset {
355        /// Index of the piece that failed verification
356        piece_offset: u16,
357        /// How many pieces one sector is supposed to contain (max)
358        max_pieces_in_sector: u16,
359    },
360    /// History size is in the future
361    #[error("History size {solution} is in the future, current is {current}")]
362    FutureHistorySize {
363        /// Current history size
364        current: HistorySize,
365        /// History size solution was created for
366        solution: HistorySize,
367    },
368    /// Sector expired
369    #[error("Sector expired")]
370    SectorExpired {
371        /// Expiration history size
372        expiration_history_size: HistorySize,
373        /// Current history size
374        current_history_size: HistorySize,
375    },
376    /// Piece verification failed
377    #[error("Piece verification failed")]
378    InvalidPiece,
379    /// Solution is outside the solution range
380    #[error("Solution distance {solution_distance} is outside of solution range {solution_range}")]
381    OutsideSolutionRange {
382        /// Solution range
383        solution_range: SolutionRange,
384        /// Solution distance
385        solution_distance: SolutionDistance,
386    },
387    /// Invalid proof of space
388    #[error("Invalid proof of space")]
389    InvalidProofOfSpace,
390    /// Invalid chunk proof
391    #[error("Invalid chunk proof")]
392    InvalidChunkProof,
393    /// Invalid history size
394    #[error("Invalid history size")]
395    InvalidHistorySize,
396}
397
398/// Parameters for checking piece validity
399#[derive(Debug, Clone)]
400#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
401pub struct SolutionVerifyPieceCheckParams {
402    /// How many pieces one sector is supposed to contain (max)
403    pub max_pieces_in_sector: u16,
404    /// Segment root of the segment to which piece belongs
405    pub segment_root: SegmentRoot,
406    /// Number of latest archived segments that are considered "recent history"
407    pub recent_segments: HistorySize,
408    /// Fraction of pieces from the "recent history" (`recent_segments`) in each sector
409    pub recent_history_fraction: (HistorySize, HistorySize),
410    /// Minimum lifetime of a plotted sector, measured in archived segments
411    pub min_sector_lifetime: HistorySize,
412    /// Current size of the history
413    pub current_history_size: HistorySize,
414    /// Segment root at `min_sector_lifetime` from sector creation (if exists)
415    pub sector_expiration_check_segment_root: Option<SegmentRoot>,
416}
417
418/// Parameters for solution verification
419#[derive(Debug, Clone)]
420#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
421pub struct SolutionVerifyParams {
422    /// Proof of time for which solution is built
423    pub proof_of_time: PotOutput,
424    /// Solution range
425    pub solution_range: SolutionRange,
426    /// Parameters for checking piece validity.
427    ///
428    /// If `None`, piece validity check will be skipped.
429    pub piece_check_params: Option<SolutionVerifyPieceCheckParams>,
430}
431
432/// Proof-of-time verifier to be used in [`Solution::verify()`]
433pub trait SolutionPotVerifier {
434    /// Check whether proof created earlier is valid
435    fn is_proof_valid(seed: &PosSeed, s_bucket: SBucket, proof: &PosProof) -> bool;
436}
437
438/// Farmer solution for slot challenge.
439#[derive(Clone, Copy, Debug, Eq, PartialEq, TrivialType)]
440#[cfg_attr(
441    feature = "scale-codec",
442    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
443)]
444#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
445#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
446#[repr(C)]
447pub struct Solution {
448    /// Public key of the farmer that created the solution
449    pub public_key_hash: Blake3Hash,
450    /// Record root that can use used to verify that piece was included in blockchain history
451    pub record_root: RecordRoot,
452    /// Proof for above record root
453    pub record_proof: RecordProof,
454    /// Chunk at the above offset
455    pub chunk: RecordChunk,
456    /// Proof for the above chunk
457    pub chunk_proof: ChunkProof,
458    /// Proof of space for piece offset
459    pub proof_of_space: PosProof,
460    /// Size of the blockchain history at the time of sector creation
461    pub history_size: HistorySize,
462    /// Index of the sector where the solution was found
463    pub sector_index: SectorIndex,
464    /// Pieces offset within sector
465    pub piece_offset: PieceOffset,
466    /// Padding for data structure alignment
467    pub padding: [u8; 4],
468}
469
470impl Solution {
471    /// Fake solution for the genesis block
472    pub fn genesis_solution() -> Self {
473        Self {
474            public_key_hash: Ed25519PublicKey::default().hash(),
475            record_root: RecordRoot::default(),
476            record_proof: RecordProof::default(),
477            chunk: RecordChunk::default(),
478            chunk_proof: ChunkProof::default(),
479            proof_of_space: PosProof::default(),
480            history_size: HistorySize::from(SegmentIndex::ZERO),
481            sector_index: SectorIndex::ZERO,
482            piece_offset: PieceOffset::default(),
483            padding: [0; _],
484        }
485    }
486
487    /// Check solution validity
488    pub fn verify<PotVerifier>(
489        &self,
490        slot: SlotNumber,
491        params: &SolutionVerifyParams,
492    ) -> Result<(), SolutionVerifyError>
493    where
494        PotVerifier: SolutionPotVerifier,
495    {
496        let SolutionVerifyParams {
497            proof_of_time,
498            solution_range,
499            piece_check_params,
500        } = params;
501
502        let sector_id = SectorId::new(&self.public_key_hash, self.sector_index, self.history_size);
503
504        let global_challenge = proof_of_time.derive_global_challenge(slot);
505        let sector_slot_challenge = sector_id.derive_sector_slot_challenge(&global_challenge);
506        let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
507
508        // Check that proof of space is valid
509        if !PotVerifier::is_proof_valid(
510            &sector_id.derive_evaluation_seed(self.piece_offset),
511            s_bucket_audit_index,
512            &self.proof_of_space,
513        ) {
514            return Err(SolutionVerifyError::InvalidProofOfSpace);
515        };
516
517        let masked_chunk =
518            (Simd::from(*self.chunk) ^ Simd::from(*self.proof_of_space.hash())).to_array();
519
520        let solution_distance =
521            SolutionDistance::calculate(&global_challenge, &masked_chunk, &sector_slot_challenge);
522
523        if !solution_distance.is_within(*solution_range) {
524            return Err(SolutionVerifyError::OutsideSolutionRange {
525                solution_range: *solution_range,
526                solution_distance,
527            });
528        }
529
530        // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
531        //  allows the code to compile. Constant 65536 is hardcoded here and below for compilation
532        //  to succeed.
533        const _: () = {
534            assert!(Record::NUM_S_BUCKETS == 65536);
535        };
536        // Check that chunk belongs to the record
537        if !BalancedMerkleTree::<65536>::verify(
538            &self.record_root,
539            &self.chunk_proof,
540            usize::from(s_bucket_audit_index),
541            *self.chunk,
542        ) {
543            return Err(SolutionVerifyError::InvalidChunkProof);
544        }
545
546        if let Some(SolutionVerifyPieceCheckParams {
547            max_pieces_in_sector,
548            segment_root,
549            recent_segments,
550            recent_history_fraction,
551            min_sector_lifetime,
552            current_history_size,
553            sector_expiration_check_segment_root,
554        }) = piece_check_params
555        {
556            if &self.history_size > current_history_size {
557                return Err(SolutionVerifyError::FutureHistorySize {
558                    current: *current_history_size,
559                    solution: self.history_size,
560                });
561            }
562
563            if u16::from(self.piece_offset) >= *max_pieces_in_sector {
564                return Err(SolutionVerifyError::InvalidPieceOffset {
565                    piece_offset: u16::from(self.piece_offset),
566                    max_pieces_in_sector: *max_pieces_in_sector,
567                });
568            }
569
570            if let Some(sector_expiration_check_segment_root) = sector_expiration_check_segment_root
571            {
572                let expiration_history_size = match sector_id.derive_expiration_history_size(
573                    self.history_size,
574                    sector_expiration_check_segment_root,
575                    *min_sector_lifetime,
576                ) {
577                    Some(expiration_history_size) => expiration_history_size,
578                    None => {
579                        return Err(SolutionVerifyError::InvalidHistorySize);
580                    }
581                };
582
583                if expiration_history_size <= *current_history_size {
584                    return Err(SolutionVerifyError::SectorExpired {
585                        expiration_history_size,
586                        current_history_size: *current_history_size,
587                    });
588                }
589            }
590
591            let position = sector_id
592                .derive_piece_index(
593                    self.piece_offset,
594                    self.history_size,
595                    *max_pieces_in_sector,
596                    *recent_segments,
597                    *recent_history_fraction,
598                )
599                .position();
600
601            // Check that piece is part of the blockchain history
602            if !self
603                .record_root
604                .is_valid(segment_root, &self.record_proof, position)
605            {
606                return Err(SolutionVerifyError::InvalidPiece);
607            }
608        }
609
610        Ok(())
611    }
612}