ab_farmer_components/
proving.rs

1//! Utilities for turning solution candidates (from auditing) into solutions (proving)
2//!
3//! Solutions generated by [`auditing`](crate::auditing) need to be converted into actual solutions
4//! before they can be sent to the node and this is exactly what this module is about.
5
6use crate::auditing::ChunkCandidate;
7use crate::reading::{
8    ReadSectorRecordChunksMode, ReadingError, read_record_metadata, read_sector_record_chunks,
9    recover_extended_record_chunks,
10};
11use crate::sector::{
12    SectorContentsMap, SectorContentsMapFromBytesError, SectorMetadataChecksummed,
13};
14use crate::{ReadAt, ReadAtSync};
15use ab_core_primitives::hashes::Blake3Hash;
16use ab_core_primitives::pieces::{PieceOffset, Record, RecordChunk};
17use ab_core_primitives::pos::PosSeed;
18use ab_core_primitives::sectors::{SBucket, SectorId};
19use ab_core_primitives::solutions::{ChunkProof, Solution, SolutionDistance};
20use ab_erasure_coding::ErasureCoding;
21use ab_merkle_tree::balanced::BalancedMerkleTree;
22use ab_proof_of_space::Table;
23use futures::FutureExt;
24use std::collections::VecDeque;
25use std::io;
26use thiserror::Error;
27
28/// Solutions that can be proven if necessary.
29///
30/// Solutions are generated on demand during iteration.
31pub trait ProvableSolutions: ExactSizeIterator {
32    /// Best solution distance found, `None` in case there are no solutions
33    fn best_solution_distance(&self) -> Option<SolutionDistance>;
34}
35
36/// Errors that happen during proving
37#[derive(Debug, Error)]
38pub enum ProvingError {
39    /// Failed to create polynomial for record
40    #[error("Failed to create polynomial for record at offset {piece_offset}: {error}")]
41    FailedToCreatePolynomialForRecord {
42        /// Piece offset
43        piece_offset: PieceOffset,
44        /// Lower-level error
45        error: String,
46    },
47    /// Failed to decode sector contents map
48    #[error("Failed to decode sector contents map: {0}")]
49    FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
50    /// I/O error occurred
51    #[error("Proving I/O error: {0}")]
52    Io(#[from] io::Error),
53    /// Record reading error
54    #[error("Record reading error: {0}")]
55    RecordReadingError(#[from] ReadingError),
56}
57
58impl ProvingError {
59    /// Whether this error is fatal and makes farm unusable
60    pub fn is_fatal(&self) -> bool {
61        match self {
62            ProvingError::FailedToCreatePolynomialForRecord { .. } => false,
63            ProvingError::FailedToDecodeSectorContentsMap(_) => false,
64            ProvingError::Io(_) => true,
65            ProvingError::RecordReadingError(error) => error.is_fatal(),
66        }
67    }
68}
69
70#[derive(Debug, Clone)]
71struct WinningChunk {
72    /// Piece offset in a sector
73    piece_offset: PieceOffset,
74    /// Solution distance of this chunk
75    solution_distance: SolutionDistance,
76}
77
78/// Container for solution candidates.
79///
80/// [`SolutionCandidates::into_solutions`] is used to get an iterator over proven solutions that are
81/// generated on demand during iteration.
82#[derive(Debug)]
83pub struct SolutionCandidates<'a, Sector>
84where
85    Sector: 'a,
86{
87    public_key_hash: &'a Blake3Hash,
88    sector_id: SectorId,
89    s_bucket: SBucket,
90    sector: Sector,
91    sector_metadata: &'a SectorMetadataChecksummed,
92    chunk_candidates: VecDeque<ChunkCandidate>,
93}
94
95impl<'a, Sector> Clone for SolutionCandidates<'a, Sector>
96where
97    Sector: Clone + 'a,
98{
99    fn clone(&self) -> Self {
100        Self {
101            public_key_hash: self.public_key_hash,
102            sector_id: self.sector_id,
103            s_bucket: self.s_bucket,
104            sector: self.sector.clone(),
105            sector_metadata: self.sector_metadata,
106            chunk_candidates: self.chunk_candidates.clone(),
107        }
108    }
109}
110
111impl<'a, Sector> SolutionCandidates<'a, Sector>
112where
113    Sector: ReadAtSync + 'a,
114{
115    pub(crate) fn new(
116        public_key_hash: &'a Blake3Hash,
117        sector_id: SectorId,
118        s_bucket: SBucket,
119        sector: Sector,
120        sector_metadata: &'a SectorMetadataChecksummed,
121        chunk_candidates: VecDeque<ChunkCandidate>,
122    ) -> Self {
123        Self {
124            public_key_hash,
125            sector_id,
126            s_bucket,
127            sector,
128            sector_metadata,
129            chunk_candidates,
130        }
131    }
132
133    /// Total number of candidates
134    pub fn len(&self) -> usize {
135        self.chunk_candidates.len()
136    }
137
138    /// Returns true if no candidates inside
139    pub fn is_empty(&self) -> bool {
140        self.chunk_candidates.is_empty()
141    }
142
143    /// Turn solution candidates into actual solutions
144    pub fn into_solutions<PosTable, TableGenerator>(
145        self,
146        erasure_coding: &'a ErasureCoding,
147        mode: ReadSectorRecordChunksMode,
148        table_generator: TableGenerator,
149    ) -> Result<impl ProvableSolutions<Item = MaybeSolution> + 'a, ProvingError>
150    where
151        PosTable: Table,
152        TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
153    {
154        SolutionsIterator::<'a, PosTable, _, _>::new(
155            self.public_key_hash,
156            self.sector_id,
157            self.s_bucket,
158            self.sector,
159            self.sector_metadata,
160            erasure_coding,
161            self.chunk_candidates,
162            mode,
163            table_generator,
164        )
165    }
166}
167
168type MaybeSolution = Result<Solution, ProvingError>;
169
170struct SolutionsIterator<'a, PosTable, TableGenerator, Sector>
171where
172    Sector: ReadAtSync + 'a,
173    PosTable: Table,
174    TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
175{
176    public_key_hash: &'a Blake3Hash,
177    sector_id: SectorId,
178    s_bucket: SBucket,
179    sector_metadata: &'a SectorMetadataChecksummed,
180    s_bucket_offsets: Box<[u32; Record::NUM_S_BUCKETS]>,
181    erasure_coding: &'a ErasureCoding,
182    sector_contents_map: SectorContentsMap,
183    sector: ReadAt<Sector, !>,
184    winning_chunks: VecDeque<WinningChunk>,
185    count: usize,
186    best_solution_distance: Option<SolutionDistance>,
187    mode: ReadSectorRecordChunksMode,
188    table_generator: TableGenerator,
189}
190
191impl<'a, PosTable, TableGenerator, Sector> ExactSizeIterator
192    for SolutionsIterator<'a, PosTable, TableGenerator, Sector>
193where
194    Sector: ReadAtSync + 'a,
195    PosTable: Table,
196    TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
197{
198}
199
200impl<'a, PosTable, TableGenerator, Sector> Iterator
201    for SolutionsIterator<'a, PosTable, TableGenerator, Sector>
202where
203    Sector: ReadAtSync + 'a,
204    PosTable: Table,
205    TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
206{
207    type Item = MaybeSolution;
208
209    fn next(&mut self) -> Option<Self::Item> {
210        let WinningChunk {
211            piece_offset,
212            solution_distance: _,
213        } = self.winning_chunks.pop_front()?;
214
215        self.count -= 1;
216
217        // Derive PoSpace table
218        let pos_table =
219            (self.table_generator)(&self.sector_id.derive_evaluation_seed(piece_offset));
220
221        let maybe_solution: Result<_, ProvingError> = try {
222            let sector_record_chunks_fut = read_sector_record_chunks(
223                piece_offset,
224                self.sector_metadata.pieces_in_sector,
225                &self.s_bucket_offsets,
226                &self.sector_contents_map,
227                &pos_table,
228                &self.sector,
229                self.mode,
230            );
231            let sector_record_chunks = sector_record_chunks_fut
232                .now_or_never()
233                .expect("Sync reader; qed")?;
234
235            let chunk = sector_record_chunks
236                .get(usize::from(self.s_bucket))
237                .expect("Within s-bucket range; qed")
238                .expect("Winning chunk was plotted; qed");
239
240            let chunks = recover_extended_record_chunks(
241                &sector_record_chunks,
242                piece_offset,
243                self.erasure_coding,
244            )?;
245            drop(sector_record_chunks);
246
247            // TODO: This is a workaround for https://github.com/rust-lang/rust/issues/139866 that
248            //  allows the code to compile. Constant 65536 is hardcoded here and below for
249            //  compilation to succeed.
250            const _: () = {
251                assert!(Record::NUM_S_BUCKETS == 65536);
252            };
253            let record_merkle_tree = BalancedMerkleTree::<65536>::new_boxed(
254                RecordChunk::slice_to_repr(chunks.as_slice())
255                    .try_into()
256                    .expect("Statically guaranteed to have correct length; qed"),
257            );
258
259            // NOTE: We do not check plot consistency using checksum because it is more
260            // expensive and consensus will verify validity of the proof anyway
261            let record_metadata_fut = read_record_metadata(
262                piece_offset,
263                self.sector_metadata.pieces_in_sector,
264                &self.sector,
265            );
266            let record_metadata = record_metadata_fut
267                .now_or_never()
268                .expect("Sync reader; qed")?;
269
270            let proof_of_space = pos_table.find_proof(self.s_bucket.into()).expect(
271                "Quality exists for this s-bucket, otherwise it wouldn't be a winning chunk; qed",
272            );
273
274            let chunk_proof = record_merkle_tree
275                .all_proofs()
276                .nth(usize::from(self.s_bucket))
277                .expect("Chunk offset is valid, hence corresponding proof exists; qed");
278
279            Solution {
280                public_key_hash: *self.public_key_hash,
281                record_root: record_metadata.root,
282                record_proof: record_metadata.proof,
283                chunk,
284                chunk_proof: ChunkProof::from(chunk_proof),
285                proof_of_space,
286                history_size: self.sector_metadata.history_size,
287                sector_index: self.sector_metadata.sector_index,
288                piece_offset,
289                padding: [0; _],
290            }
291        };
292
293        match maybe_solution {
294            Ok(solution) => Some(Ok(solution)),
295            Err(error) => Some(Err(error)),
296        }
297    }
298
299    fn size_hint(&self) -> (usize, Option<usize>) {
300        (self.count, Some(self.count))
301    }
302}
303
304impl<'a, PosTable, TableGenerator, Sector> ProvableSolutions
305    for SolutionsIterator<'a, PosTable, TableGenerator, Sector>
306where
307    Sector: ReadAtSync + 'a,
308    PosTable: Table,
309    TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
310{
311    fn best_solution_distance(&self) -> Option<SolutionDistance> {
312        self.best_solution_distance
313    }
314}
315
316impl<'a, PosTable, TableGenerator, Sector> SolutionsIterator<'a, PosTable, TableGenerator, Sector>
317where
318    Sector: ReadAtSync + 'a,
319    PosTable: Table,
320    TableGenerator: (FnMut(&PosSeed) -> PosTable) + 'a,
321{
322    #[allow(clippy::too_many_arguments)]
323    fn new(
324        public_key_hash: &'a Blake3Hash,
325        sector_id: SectorId,
326        s_bucket: SBucket,
327        sector: Sector,
328        sector_metadata: &'a SectorMetadataChecksummed,
329        erasure_coding: &'a ErasureCoding,
330        chunk_candidates: VecDeque<ChunkCandidate>,
331        mode: ReadSectorRecordChunksMode,
332        table_generator: TableGenerator,
333    ) -> Result<Self, ProvingError> {
334        let sector_contents_map = {
335            let mut sector_contents_map_bytes =
336                vec![0; SectorContentsMap::encoded_size(sector_metadata.pieces_in_sector)];
337
338            sector.read_at(&mut sector_contents_map_bytes, 0)?;
339
340            SectorContentsMap::from_bytes(
341                &sector_contents_map_bytes,
342                sector_metadata.pieces_in_sector,
343            )?
344        };
345
346        let s_bucket_records = sector_contents_map
347            .iter_s_bucket_records(s_bucket)
348            .expect("S-bucket audit index is guaranteed to be in range; qed")
349            .collect::<Vec<_>>();
350        let winning_chunks = chunk_candidates
351            .into_iter()
352            .filter_map(move |chunk_candidate| {
353                let (piece_offset, encoded_chunk_used) = s_bucket_records
354                    .get(chunk_candidate.chunk_offset as usize)
355                    .expect("Wouldn't be a candidate if wasn't within s-bucket; qed");
356
357                encoded_chunk_used.then_some(WinningChunk {
358                    piece_offset: *piece_offset,
359                    solution_distance: chunk_candidate.solution_distance,
360                })
361            })
362            .collect::<VecDeque<_>>();
363
364        let best_solution_distance = winning_chunks
365            .front()
366            .map(|winning_chunk| winning_chunk.solution_distance);
367
368        let s_bucket_offsets = sector_metadata.s_bucket_offsets();
369
370        let count = winning_chunks.len();
371
372        Ok(Self {
373            public_key_hash,
374            sector_id,
375            s_bucket,
376            sector_metadata,
377            s_bucket_offsets,
378            erasure_coding,
379            sector_contents_map,
380            sector: ReadAt::from_sync(sector),
381            winning_chunks,
382            count,
383            best_solution_distance,
384            mode,
385            table_generator,
386        })
387    }
388}