Skip to main content

ab_farmer_components/
proving.rs

1//! Utilities for turning solution candidates (from auditing) into solutions (proving)
2//!
3//! Solutions generated by [`auditing`](crate::auditing) need to be converted into actual solutions
4//! before they can be sent to the node and this is exactly what this module is about.
5
6use crate::auditing::ChunkCandidate;
7use crate::reading::{
8    ReadingError, read_record_metadata, read_sector_record_chunks, recover_extended_record_chunks,
9};
10use crate::sector::{
11    RecordMetadata, SectorContentsMap, SectorContentsMapFromBytesError, SectorMetadataChecksummed,
12};
13use crate::shard_commitment::{ShardCommitmentsRootsCache, derive_solution_shard_commitment};
14use crate::{ReadAt, ReadAtSync};
15use ab_core_primitives::hashes::Blake3Hash;
16use ab_core_primitives::pieces::{PieceOffset, Record, RecordChunk, RecordRoot};
17use ab_core_primitives::pos::PosSeed;
18use ab_core_primitives::sectors::{SBucket, SectorId};
19use ab_core_primitives::shard::NumShards;
20use ab_core_primitives::solutions::{
21    ChunkProof, ShardMembershipEntropy, Solution, SolutionDistance,
22};
23use ab_erasure_coding::ErasureCoding;
24use ab_merkle_tree::balanced::BalancedMerkleTree;
25use ab_proof_of_space::PosProofs;
26use futures::FutureExt;
27use std::collections::VecDeque;
28use std::io;
29use thiserror::Error;
30
31/// Solutions that can be proven if necessary.
32///
33/// Solutions are generated on demand during iteration.
34pub trait ProvableSolutions: ExactSizeIterator {
35    /// Best solution distance found, `None` in case there are no solutions
36    fn best_solution_distance(&self) -> Option<SolutionDistance>;
37}
38
39/// Errors that happen during proving
40#[derive(Debug, Error)]
41pub enum ProvingError {
42    /// Failed to create polynomial for record
43    #[error("Failed to create polynomial for record at offset {piece_offset}: {error}")]
44    FailedToCreatePolynomialForRecord {
45        /// Piece offset
46        piece_offset: PieceOffset,
47        /// Lower-level error
48        error: String,
49    },
50    /// Failed to decode sector contents map
51    #[error("Failed to decode sector contents map: {0}")]
52    FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
53    /// I/O error occurred
54    #[error("Proving I/O error: {0}")]
55    Io(#[from] io::Error),
56    /// Record reading error
57    #[error("Record reading error: {0}")]
58    RecordReadingError(#[from] ReadingError),
59}
60
61impl ProvingError {
62    /// Whether this error is fatal and makes farm unusable
63    pub fn is_fatal(&self) -> bool {
64        match self {
65            ProvingError::FailedToCreatePolynomialForRecord { .. } => false,
66            ProvingError::FailedToDecodeSectorContentsMap(_) => false,
67            ProvingError::Io(_) => true,
68            ProvingError::RecordReadingError(error) => error.is_fatal(),
69        }
70    }
71}
72
73#[derive(Debug, Clone)]
74struct WinningChunk {
75    /// Piece offset in a sector
76    piece_offset: PieceOffset,
77    /// Solution distance of this chunk
78    solution_distance: SolutionDistance,
79}
80
81/// Container for solution candidates.
82///
83/// [`SolutionCandidates::into_solutions`] is used to get an iterator over proven solutions that are
84/// generated on demand during iteration.
85#[derive(Debug)]
86pub struct SolutionCandidates<'a, Sector>
87where
88    Sector: 'a,
89{
90    public_key_hash: &'a Blake3Hash,
91    sector_id: SectorId,
92    shard_commitments_roots_cache: &'a ShardCommitmentsRootsCache,
93    shard_membership_entropy: ShardMembershipEntropy,
94    num_shards: NumShards,
95    s_bucket: SBucket,
96    sector: Sector,
97    sector_metadata: &'a SectorMetadataChecksummed,
98    chunk_candidates: VecDeque<ChunkCandidate>,
99}
100
101impl<'a, Sector> Clone for SolutionCandidates<'a, Sector>
102where
103    Sector: Clone + 'a,
104{
105    fn clone(&self) -> Self {
106        Self {
107            public_key_hash: self.public_key_hash,
108            sector_id: self.sector_id,
109            shard_commitments_roots_cache: self.shard_commitments_roots_cache,
110            shard_membership_entropy: self.shard_membership_entropy,
111            num_shards: self.num_shards,
112            s_bucket: self.s_bucket,
113            sector: self.sector.clone(),
114            sector_metadata: self.sector_metadata,
115            chunk_candidates: self.chunk_candidates.clone(),
116        }
117    }
118}
119
120impl<'a, Sector> SolutionCandidates<'a, Sector>
121where
122    Sector: ReadAtSync + 'a,
123{
124    #[expect(clippy::too_many_arguments, reason = "Private API")]
125    pub(crate) fn new(
126        public_key_hash: &'a Blake3Hash,
127        sector_id: SectorId,
128        shard_commitments_roots_cache: &'a ShardCommitmentsRootsCache,
129        shard_membership_entropy: ShardMembershipEntropy,
130        num_shards: NumShards,
131        s_bucket: SBucket,
132        sector: Sector,
133        sector_metadata: &'a SectorMetadataChecksummed,
134        chunk_candidates: VecDeque<ChunkCandidate>,
135    ) -> Self {
136        Self {
137            public_key_hash,
138            sector_id,
139            shard_commitments_roots_cache,
140            shard_membership_entropy,
141            num_shards,
142            s_bucket,
143            sector,
144            sector_metadata,
145            chunk_candidates,
146        }
147    }
148
149    /// Total number of candidates
150    pub fn len(&self) -> usize {
151        self.chunk_candidates.len()
152    }
153
154    /// Returns true if no candidates inside
155    pub fn is_empty(&self) -> bool {
156        self.chunk_candidates.is_empty()
157    }
158
159    /// Turn solution candidates into actual solutions
160    pub fn into_solutions<PosProofGenerator>(
161        self,
162        erasure_coding: &'a ErasureCoding,
163        table_generator: PosProofGenerator,
164    ) -> Result<impl ProvableSolutions<Item = MaybeSolution> + 'a, ProvingError>
165    where
166        PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
167    {
168        SolutionsIterator::<'a, _, _>::new(
169            self.public_key_hash,
170            self.sector_id,
171            self.shard_commitments_roots_cache,
172            self.shard_membership_entropy,
173            self.num_shards,
174            self.s_bucket,
175            self.sector,
176            self.sector_metadata,
177            erasure_coding,
178            self.chunk_candidates,
179            table_generator,
180        )
181    }
182}
183
184type MaybeSolution = Result<Solution, ProvingError>;
185
186struct SolutionsIterator<'a, PosProofGenerator, Sector>
187where
188    Sector: ReadAtSync + 'a,
189    PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
190{
191    public_key_hash: &'a Blake3Hash,
192    sector_id: SectorId,
193    shard_commitments_roots_cache: &'a ShardCommitmentsRootsCache,
194    shard_membership_entropy: ShardMembershipEntropy,
195    num_shards: NumShards,
196    s_bucket: SBucket,
197    sector_metadata: &'a SectorMetadataChecksummed,
198    s_bucket_offsets: Box<[u32; Record::NUM_S_BUCKETS]>,
199    erasure_coding: &'a ErasureCoding,
200    sector_contents_map: SectorContentsMap,
201    sector: ReadAt<Sector, !>,
202    winning_chunks: VecDeque<WinningChunk>,
203    count: usize,
204    best_solution_distance: Option<SolutionDistance>,
205    table_generator: PosProofGenerator,
206}
207
208impl<'a, PosProofGenerator, Sector> ExactSizeIterator
209    for SolutionsIterator<'a, PosProofGenerator, Sector>
210where
211    Sector: ReadAtSync + 'a,
212    PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
213{
214}
215
216impl<'a, PosProofGenerator, Sector> Iterator for SolutionsIterator<'a, PosProofGenerator, Sector>
217where
218    Sector: ReadAtSync + 'a,
219    PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
220{
221    type Item = MaybeSolution;
222
223    fn next(&mut self) -> Option<Self::Item> {
224        let WinningChunk {
225            piece_offset,
226            solution_distance: _,
227        } = self.winning_chunks.pop_front()?;
228
229        self.count -= 1;
230
231        // Derive PoSpace proofs
232        let pos_proofs =
233            (self.table_generator)(&self.sector_id.derive_evaluation_seed(piece_offset));
234
235        let maybe_solution = try {
236            let sector_record_chunks_fut = read_sector_record_chunks(
237                piece_offset,
238                self.sector_metadata.pieces_in_sector,
239                &self.s_bucket_offsets,
240                &self.sector_contents_map,
241                &pos_proofs,
242                &self.sector,
243            );
244            let sector_record_chunks = sector_record_chunks_fut
245                .now_or_never()
246                .expect("Sync reader; qed")
247                .map_err(ProvingError::RecordReadingError)?;
248
249            let chunk = sector_record_chunks
250                .get(usize::from(self.s_bucket))
251                .expect("Within s-bucket range; qed")
252                .expect("Winning chunk was plotted; qed");
253
254            let chunks = recover_extended_record_chunks(
255                &sector_record_chunks,
256                piece_offset,
257                self.erasure_coding,
258            )
259            .map_err(ProvingError::RecordReadingError)?;
260            drop(sector_record_chunks);
261
262            // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
263            //  allows the code to compile. Constant 65536 is hardcoded here and below for
264            //  compilation to succeed.
265            const {
266                assert!(Record::NUM_S_BUCKETS == 65536);
267            }
268            let record_merkle_tree = BalancedMerkleTree::<65536>::new_boxed(
269                RecordChunk::slice_to_repr(chunks.as_slice())
270                    .try_into()
271                    .expect("Statically guaranteed to have correct length; qed"),
272            );
273
274            // NOTE: We do not check plot consistency using checksum because it is more
275            // expensive and consensus will verify validity of the proof anyway
276            let record_metadata_fut = read_record_metadata(
277                piece_offset,
278                self.sector_metadata.pieces_in_sector,
279                &self.sector,
280            );
281            let RecordMetadata {
282                piece_header,
283                piece_checksum: _,
284            } = record_metadata_fut
285                .now_or_never()
286                .expect("Sync reader; qed")
287                .map_err(ProvingError::RecordReadingError)?;
288
289            let proof_of_space = pos_proofs.for_s_bucket(self.s_bucket).expect(
290                "Proof exists for this s-bucket, otherwise it wouldn't be a winning chunk; qed",
291            );
292
293            let chunk_proof = record_merkle_tree
294                .all_proofs()
295                .nth(usize::from(self.s_bucket))
296                .expect("Chunk offset is valid, hence corresponding proof exists; qed");
297
298            let history_size = self.sector_metadata.history_size;
299            let shard_commitment = derive_solution_shard_commitment(
300                self.public_key_hash,
301                &self.shard_commitments_roots_cache.shard_commitments_seed(),
302                &self.shard_commitments_roots_cache.get(history_size),
303                history_size,
304                &self.shard_membership_entropy,
305                self.num_shards,
306            );
307
308            Solution {
309                public_key_hash: *self.public_key_hash,
310                shard_commitment,
311                piece_local_segment_index: piece_header.local_segment_index.as_inner(),
312                piece_super_segment_index: piece_header.super_segment_index.as_inner(),
313                segment_root: piece_header.segment_root,
314                segment_proof: piece_header.segment_proof,
315                record_root: RecordRoot::from(record_merkle_tree.root()),
316                record_proof: piece_header.record_proof,
317                chunk,
318                chunk_proof: ChunkProof::from(chunk_proof),
319                proof_of_space,
320                history_size,
321                sector_index: self.sector_metadata.sector_index,
322                piece_offset,
323                segment_position: piece_header.segment_position.as_inner(),
324                piece_shard_index: piece_header.shard_index.as_inner(),
325                padding: [0; _],
326            }
327        };
328
329        match maybe_solution {
330            Ok(solution) => Some(Ok(solution)),
331            Err(error) => Some(Err(error)),
332        }
333    }
334
335    fn size_hint(&self) -> (usize, Option<usize>) {
336        (self.count, Some(self.count))
337    }
338}
339
340impl<'a, PosProofGenerator, Sector> ProvableSolutions
341    for SolutionsIterator<'a, PosProofGenerator, Sector>
342where
343    Sector: ReadAtSync + 'a,
344    PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
345{
346    fn best_solution_distance(&self) -> Option<SolutionDistance> {
347        self.best_solution_distance
348    }
349}
350
351impl<'a, PosProofGenerator, Sector> SolutionsIterator<'a, PosProofGenerator, Sector>
352where
353    Sector: ReadAtSync + 'a,
354    PosProofGenerator: (FnMut(&PosSeed) -> Box<PosProofs>) + 'a,
355{
356    #[expect(clippy::too_many_arguments)]
357    fn new(
358        public_key_hash: &'a Blake3Hash,
359        sector_id: SectorId,
360        shard_commitments_roots_cache: &'a ShardCommitmentsRootsCache,
361        shard_membership_entropy: ShardMembershipEntropy,
362        num_shards: NumShards,
363        s_bucket: SBucket,
364        sector: Sector,
365        sector_metadata: &'a SectorMetadataChecksummed,
366        erasure_coding: &'a ErasureCoding,
367        chunk_candidates: VecDeque<ChunkCandidate>,
368        table_generator: PosProofGenerator,
369    ) -> Result<Self, ProvingError> {
370        let sector_contents_map = {
371            let mut sector_contents_map_bytes =
372                vec![0; SectorContentsMap::encoded_size(sector_metadata.pieces_in_sector)];
373
374            sector.read_at(&mut sector_contents_map_bytes, 0)?;
375
376            SectorContentsMap::from_bytes(
377                &sector_contents_map_bytes,
378                sector_metadata.pieces_in_sector,
379            )?
380        };
381
382        let s_bucket_piece_offsets = sector_contents_map
383            .iter_s_bucket_piece_offsets(s_bucket)
384            .expect("S-bucket audit index is guaranteed to be in range; qed")
385            .collect::<Vec<_>>();
386        let winning_chunks = chunk_candidates
387            .into_iter()
388            .map(move |chunk_candidate| {
389                let piece_offset = s_bucket_piece_offsets
390                    .get(chunk_candidate.chunk_offset as usize)
391                    .expect("Wouldn't be a candidate if wasn't within s-bucket; qed");
392
393                WinningChunk {
394                    piece_offset: *piece_offset,
395                    solution_distance: chunk_candidate.solution_distance,
396                }
397            })
398            .collect::<VecDeque<_>>();
399
400        let best_solution_distance = winning_chunks
401            .front()
402            .map(|winning_chunk| winning_chunk.solution_distance);
403
404        let s_bucket_offsets = sector_metadata.s_bucket_offsets();
405
406        let count = winning_chunks.len();
407
408        Ok(Self {
409            public_key_hash,
410            sector_id,
411            shard_commitments_roots_cache,
412            shard_membership_entropy,
413            num_shards,
414            s_bucket,
415            sector_metadata,
416            s_bucket_offsets,
417            erasure_coding,
418            sector_contents_map,
419            sector: ReadAt::from_sync(sector),
420            winning_chunks,
421            count,
422            best_solution_distance,
423            table_generator,
424        })
425    }
426}