ab_farmer_components/
sector.rs

1//! Sector-related data structures
2//!
3//! Sectors and corresponding metadata created by functions in [`plotting`](crate::plotting) module
4//! have a specific structure, represented by data structured in this module.
5//!
6//! It is typically not needed to construct these data structures explicitly outside of this crate,
7//! instead they will be returned as a result of certain operations (like plotting).
8
9use ab_core_primitives::checksum::Blake3Checksummed;
10use ab_core_primitives::hashes::Blake3Hash;
11use ab_core_primitives::pieces::{PieceOffset, Record, RecordChunksRoot, RecordProof, RecordRoot};
12use ab_core_primitives::sectors::{SBucket, SectorIndex};
13use ab_core_primitives::segments::{HistorySize, SegmentIndex};
14use ab_io_type::trivial_type::TrivialType;
15use bitvec::prelude::*;
16use parity_scale_codec::{Decode, Encode};
17use rayon::prelude::*;
18use std::mem::ManuallyDrop;
19use std::ops::{Deref, DerefMut};
20use std::{mem, slice};
21use thiserror::Error;
22use tracing::debug;
23
24/// Size of the part of the plot containing record chunks (s-buckets).
25///
26/// Total size of the plot can be computed with [`sector_size()`].
27#[inline]
28pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
29    pieces_in_sector as usize * Record::SIZE
30}
31
32/// Size of the part of the plot containing record metadata.
33///
34/// Total size of the plot can be computed with [`sector_size()`].
35#[inline]
36pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
37    pieces_in_sector as usize * RecordMetadata::encoded_size()
38}
39
40/// Exact sector plot size (sector contents map, record chunks, record metadata).
41///
42/// NOTE: Each sector also has corresponding fixed size metadata whose size can be obtained with
43/// [`SectorMetadataChecksummed::encoded_size()`], size of the record chunks (s-buckets) with
44/// [`sector_record_chunks_size()`] and size of record roots and proofs with
45/// [`sector_record_metadata_size()`]. This function just combines those three together for
46/// convenience.
47#[inline]
48pub const fn sector_size(pieces_in_sector: u16) -> usize {
49    sector_record_chunks_size(pieces_in_sector)
50        + sector_record_metadata_size(pieces_in_sector)
51        + SectorContentsMap::encoded_size(pieces_in_sector)
52        + Blake3Hash::SIZE
53}
54
55/// Metadata of the plotted sector
56#[derive(Debug, Encode, Decode, Clone)]
57pub struct SectorMetadata {
58    /// Sector index
59    pub sector_index: SectorIndex,
60    /// Number of pieces stored in this sector
61    pub pieces_in_sector: u16,
62    /// S-bucket sizes in a sector
63    pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
64    /// Size of the blockchain history at time of sector creation
65    pub history_size: HistorySize,
66}
67
68impl SectorMetadata {
69    /// Returns offsets of each s-bucket relatively to the beginning of the sector (in chunks)
70    pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
71        let s_bucket_offsets = self
72            .s_bucket_sizes
73            .iter()
74            .map({
75                let mut base_offset = 0;
76
77                move |s_bucket_size| {
78                    let offset = base_offset;
79                    base_offset += u32::from(*s_bucket_size);
80                    offset
81                }
82            })
83            .collect::<Box<_>>();
84
85        assert_eq!(s_bucket_offsets.len(), Record::NUM_S_BUCKETS);
86        let mut s_bucket_offsets = ManuallyDrop::new(s_bucket_offsets);
87        // SAFETY: Original memory is not dropped, number of elements checked above
88        unsafe { Box::from_raw(s_bucket_offsets.as_mut_ptr() as *mut [u32; Record::NUM_S_BUCKETS]) }
89    }
90}
91
92/// Same as [`SectorMetadata`], but with checksums verified during SCALE encoding/decoding
93#[derive(Debug, Clone, Encode, Decode)]
94pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
95
96impl From<SectorMetadata> for SectorMetadataChecksummed {
97    #[inline]
98    fn from(value: SectorMetadata) -> Self {
99        Self(Blake3Checksummed(value))
100    }
101}
102
103impl Deref for SectorMetadataChecksummed {
104    type Target = SectorMetadata;
105
106    #[inline]
107    fn deref(&self) -> &Self::Target {
108        &self.0.0
109    }
110}
111
112impl DerefMut for SectorMetadataChecksummed {
113    #[inline]
114    fn deref_mut(&mut self) -> &mut Self::Target {
115        &mut self.0.0
116    }
117}
118
119impl SectorMetadataChecksummed {
120    /// Size of encoded checksummed sector metadata.
121    ///
122    /// For sector plot size use [`sector_size()`].
123    #[inline]
124    pub fn encoded_size() -> usize {
125        let default = SectorMetadataChecksummed::from(SectorMetadata {
126            sector_index: SectorIndex::ZERO,
127            pieces_in_sector: 0,
128            // TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
129            // SAFETY: Data structure filled with zeroes is a valid invariant
130            s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
131            history_size: HistorySize::from(SegmentIndex::ZERO),
132        });
133
134        default.encoded_size()
135    }
136}
137
138/// Root and proof corresponding to the same record
139#[derive(Debug, Default, Clone, Encode, Decode)]
140pub(crate) struct RecordMetadata {
141    /// Record root
142    pub(crate) root: RecordRoot,
143    /// Parity chunks root
144    pub(crate) parity_chunks_root: RecordChunksRoot,
145    /// Record proof
146    pub(crate) proof: RecordProof,
147    /// Checksum (hash) of the whole piece
148    pub(crate) piece_checksum: Blake3Hash,
149}
150
151impl RecordMetadata {
152    pub(crate) const fn encoded_size() -> usize {
153        RecordProof::SIZE + RecordRoot::SIZE + RecordChunksRoot::SIZE + Blake3Hash::SIZE
154    }
155}
156
157/// Raw sector before it is transformed and written to plot, used during plotting
158#[derive(Debug, Clone)]
159pub(crate) struct RawSector {
160    /// List of records, likely downloaded from the network
161    pub(crate) records: Vec<Record>,
162    /// Metadata (root and proof) corresponding to the same record
163    pub(crate) metadata: Vec<RecordMetadata>,
164}
165
166impl RawSector {
167    /// Create new raw sector with internal vectors being allocated and filled with default values
168    pub(crate) fn new(pieces_in_sector: u16) -> Self {
169        Self {
170            records: Record::new_zero_vec(usize::from(pieces_in_sector)),
171            metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
172        }
173    }
174}
175
176// Bit array containing space for bits equal to the number of s-buckets in a record
177type SingleRecordBitArray = BitArray<[u8; Record::NUM_S_BUCKETS / u8::BITS as usize]>;
178
179const SINGLE_RECORD_BIT_ARRAY_SIZE: usize = mem::size_of::<SingleRecordBitArray>();
180
181// TODO: I really tried to avoid `count_ones()`, but wasn't able to with safe Rust due to lifetimes
182/// Wrapper data structure that allows to iterate mutably over encoded chunks bitfields, while
183/// maintaining up-to-date number of encoded chunks
184///
185/// ## Panics
186/// Panics on drop if too many chunks are encoded
187#[derive(Debug)]
188pub struct EncodedChunksUsed<'a> {
189    encoded_record_chunks_used: &'a mut SingleRecordBitArray,
190    num_encoded_record_chunks: &'a mut SBucket,
191    potentially_updated: bool,
192}
193
194impl Drop for EncodedChunksUsed<'_> {
195    fn drop(&mut self) {
196        if self.potentially_updated {
197            let num_encoded_record_chunks = self.encoded_record_chunks_used.count_ones();
198            assert!(num_encoded_record_chunks <= SBucket::MAX.into());
199            *self.num_encoded_record_chunks = SBucket::try_from(num_encoded_record_chunks)
200                .expect("Checked with explicit assert above; qed");
201        }
202    }
203}
204
205impl EncodedChunksUsed<'_> {
206    /// Produces an iterator over encoded chunks bitfields.
207    pub fn iter(&self) -> impl ExactSizeIterator<Item = impl Deref<Target = bool> + '_> + '_ {
208        self.encoded_record_chunks_used.iter()
209    }
210
211    /// Produces a mutable iterator over encoded chunks bitfields.
212    pub fn iter_mut(
213        &mut self,
214    ) -> impl ExactSizeIterator<Item = impl DerefMut<Target = bool> + '_> + '_ {
215        self.potentially_updated = true;
216        self.encoded_record_chunks_used.iter_mut()
217    }
218}
219
220/// Error happening when trying to create [`SectorContentsMap`] from bytes
221#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
222pub enum SectorContentsMapFromBytesError {
223    /// Invalid bytes length
224    #[error("Invalid bytes length, expected {expected}, actual {actual}")]
225    InvalidBytesLength {
226        /// Expected length
227        expected: usize,
228        /// Actual length
229        actual: usize,
230    },
231    /// Invalid number of encoded record chunks
232    #[error("Invalid number of encoded record chunks: {actual}")]
233    InvalidEncodedRecordChunks {
234        /// Actual number of encoded record chunks
235        actual: usize,
236        /// Max supported
237        max: usize,
238    },
239    /// Checksum mismatch
240    #[error("Checksum mismatch")]
241    ChecksumMismatch,
242}
243
244/// Error happening when trying to encode [`SectorContentsMap`] into bytes
245#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
246pub enum SectorContentsMapEncodeIntoError {
247    /// Invalid bytes length
248    #[error("Invalid bytes length, expected {expected}, actual {actual}")]
249    InvalidBytesLength {
250        /// Expected length
251        expected: usize,
252        /// Actual length
253        actual: usize,
254    },
255}
256
257/// Error happening when trying to create [`SectorContentsMap`] from bytes
258#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
259pub enum SectorContentsMapIterationError {
260    /// S-bucket provided is out of range
261    #[error("S-bucket provided {provided} is out of range, max {max}")]
262    SBucketOutOfRange {
263        /// Provided s-bucket
264        provided: usize,
265        /// Max s-bucket
266        max: usize,
267    },
268}
269
270/// Map of sector contents.
271///
272/// Abstraction on top of bitfields that allow making sense of sector contents that contains both
273/// encoded (meaning erasure coded and encoded with existing PoSpace quality) and unencoded chunks
274/// (just erasure coded) used at the same time both in records (before writing to plot) and
275/// s-buckets (written into the plot) format
276#[derive(Debug, Clone, Eq, PartialEq)]
277pub struct SectorContentsMap {
278    /// Number of encoded chunks used in each record.
279    ///
280    /// This is technically redundant, but allows to drastically decrease amount of work in
281    /// [`Self::iter_s_bucket_records()`] and other places, which become unusably slow otherwise.
282    num_encoded_record_chunks: Vec<SBucket>,
283    /// Bitfields for each record, each bit is `true` if encoded chunk at corresponding position was
284    /// used
285    encoded_record_chunks_used: Vec<SingleRecordBitArray>,
286}
287
288impl SectorContentsMap {
289    /// Create new sector contents map initialized with zeroes to store data for `pieces_in_sector`
290    /// records
291    pub fn new(pieces_in_sector: u16) -> Self {
292        Self {
293            num_encoded_record_chunks: vec![SBucket::default(); usize::from(pieces_in_sector)],
294            encoded_record_chunks_used: vec![
295                SingleRecordBitArray::default();
296                usize::from(pieces_in_sector)
297            ],
298        }
299    }
300
301    /// Reconstruct sector contents map from bytes.
302    ///
303    /// Returns error if length of the vector doesn't match [`Self::encoded_size()`] for
304    /// `pieces_in_sector`.
305    pub fn from_bytes(
306        bytes: &[u8],
307        pieces_in_sector: u16,
308    ) -> Result<Self, SectorContentsMapFromBytesError> {
309        if bytes.len() != Self::encoded_size(pieces_in_sector) {
310            return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
311                expected: Self::encoded_size(pieces_in_sector),
312                actual: bytes.len(),
313            });
314        }
315
316        let (single_records_bit_arrays, expected_checksum) =
317            bytes.split_at(bytes.len() - Blake3Hash::SIZE);
318        // SAFETY: All bit patterns are valid
319        let expected_checksum = unsafe {
320            Blake3Hash::from_bytes(expected_checksum).expect("No alignment requirements; qed")
321        };
322        // Verify checksum
323        let actual_checksum = Blake3Hash::from(blake3::hash(single_records_bit_arrays));
324        if &actual_checksum != expected_checksum {
325            debug!(
326                %actual_checksum,
327                %expected_checksum,
328                "Hash doesn't match, corrupted bytes"
329            );
330
331            return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
332        }
333
334        let mut encoded_record_chunks_used =
335            vec![SingleRecordBitArray::default(); pieces_in_sector.into()];
336
337        let num_encoded_record_chunks = encoded_record_chunks_used
338            .iter_mut()
339            .zip(single_records_bit_arrays.array_chunks::<{ SINGLE_RECORD_BIT_ARRAY_SIZE }>())
340            .map(|(encoded_record_chunks_used, bytes)| {
341                encoded_record_chunks_used
342                    .as_raw_mut_slice()
343                    .copy_from_slice(bytes);
344                let num_encoded_record_chunks = encoded_record_chunks_used.count_ones();
345                if num_encoded_record_chunks > Record::NUM_CHUNKS {
346                    return Err(
347                        SectorContentsMapFromBytesError::InvalidEncodedRecordChunks {
348                            actual: num_encoded_record_chunks,
349                            max: Record::NUM_CHUNKS,
350                        },
351                    );
352                }
353                Ok(SBucket::try_from(num_encoded_record_chunks).expect("Verified above; qed"))
354            })
355            .collect::<Result<Vec<_>, _>>()?;
356
357        Ok(Self {
358            num_encoded_record_chunks,
359            encoded_record_chunks_used,
360        })
361    }
362
363    /// Size of sector contents map when encoded and stored in the plot for specified number of
364    /// pieces in sector
365    pub const fn encoded_size(pieces_in_sector: u16) -> usize {
366        SINGLE_RECORD_BIT_ARRAY_SIZE * pieces_in_sector as usize + Blake3Hash::SIZE
367    }
368
369    /// Encode internal contents into `output`
370    pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
371        if output.len() != Self::encoded_size(self.encoded_record_chunks_used.len() as u16) {
372            return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
373                expected: Self::encoded_size(self.encoded_record_chunks_used.len() as u16),
374                actual: output.len(),
375            });
376        }
377
378        let slice = self.encoded_record_chunks_used.as_slice();
379        // SAFETY: `BitArray` is a transparent data structure containing array of bytes
380        let slice = unsafe {
381            slice::from_raw_parts(
382                slice.as_ptr() as *const u8,
383                slice.len() * SINGLE_RECORD_BIT_ARRAY_SIZE,
384            )
385        };
386
387        // Write data and checksum
388        output[..slice.len()].copy_from_slice(slice);
389        output[slice.len()..].copy_from_slice(blake3::hash(slice).as_bytes());
390
391        Ok(())
392    }
393
394    /// Number of encoded chunks in each record
395    pub fn num_encoded_record_chunks(&self) -> &[SBucket] {
396        &self.num_encoded_record_chunks
397    }
398
399    /// Iterate over individual record bitfields
400    pub fn iter_record_bitfields(&self) -> &[SingleRecordBitArray] {
401        &self.encoded_record_chunks_used
402    }
403
404    /// Iterate mutably over individual record bitfields
405    pub fn iter_record_bitfields_mut(
406        &mut self,
407    ) -> impl ExactSizeIterator<Item = EncodedChunksUsed<'_>> + '_ {
408        self.encoded_record_chunks_used
409            .iter_mut()
410            .zip(&mut self.num_encoded_record_chunks)
411            .map(
412                |(encoded_record_chunks_used, num_encoded_record_chunks)| EncodedChunksUsed {
413                    encoded_record_chunks_used,
414                    num_encoded_record_chunks,
415                    potentially_updated: false,
416                },
417            )
418    }
419
420    /// Returns sizes of each s-bucket
421    pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
422        // Rayon doesn't support iteration over custom types yet
423        let s_bucket_sizes = (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
424            .into_par_iter()
425            .map(SBucket::from)
426            .map(|s_bucket| {
427                self.iter_s_bucket_records(s_bucket)
428                    .expect("S-bucket guaranteed to be in range; qed")
429                    .count() as u16
430            })
431            .collect::<Box<_>>();
432
433        assert_eq!(s_bucket_sizes.len(), Record::NUM_S_BUCKETS);
434        let mut s_bucket_sizes = ManuallyDrop::new(s_bucket_sizes);
435        // SAFETY: Original memory is not dropped, number of elements checked above
436        unsafe { Box::from_raw(s_bucket_sizes.as_mut_ptr() as *mut [u16; Record::NUM_S_BUCKETS]) }
437    }
438
439    /// Creates an iterator of `(s_bucket, encoded_chunk_used, chunk_location)`, where `s_bucket` is
440    /// position of the chunk in the erasure coded record, `encoded_chunk_used` indicates whether it
441    /// was encoded and `chunk_location` is the offset of the chunk in the plot (across all
442    /// s-buckets).
443    pub fn iter_record_chunk_to_plot(
444        &self,
445        piece_offset: PieceOffset,
446    ) -> impl Iterator<Item = (SBucket, bool, usize)> + '_ {
447        // Iterate over all s-buckets
448        (SBucket::ZERO..=SBucket::MAX)
449            // In each s-bucket map all records used
450            .flat_map(|s_bucket| {
451                self.iter_s_bucket_records(s_bucket)
452                    .expect("S-bucket guaranteed to be in range; qed")
453                    .map(move |(current_piece_offset, encoded_chunk_used)| {
454                        (s_bucket, current_piece_offset, encoded_chunk_used)
455                    })
456            })
457            // We've got contents of all s-buckets in a flat iterator, enumerating them so it is
458            // possible to find in the plot later if desired
459            .enumerate()
460            // Everything about the piece offset we care about
461            .filter_map(
462                move |(chunk_location, (s_bucket, current_piece_offset, encoded_chunk_used))| {
463                    // In case record for `piece_offset` is found, return necessary information
464                    (current_piece_offset == piece_offset).then_some((
465                        s_bucket,
466                        encoded_chunk_used,
467                        chunk_location,
468                    ))
469                },
470            )
471            // Tiny optimization in case we have found chunks for all records already
472            .take(Record::NUM_CHUNKS)
473    }
474
475    /// Creates an iterator of `Option<(chunk_offset, encoded_chunk_used)>`, where each entry
476    /// corresponds s-bucket/position of the chunk in the erasure coded record, `encoded_chunk_used`
477    /// indicates whether it was encoded and `chunk_offset` is the offset of the chunk in the
478    /// corresponding s-bucket.
479    ///
480    /// Similar to `Self::iter_record_chunk_to_plot()`, but runs in parallel, returns entries for
481    /// all s-buckets and offsets are within corresponding s-buckets rather than the whole plot.
482    pub fn par_iter_record_chunk_to_plot(
483        &self,
484        piece_offset: PieceOffset,
485    ) -> impl IndexedParallelIterator<Item = Option<(usize, bool)>> + '_ {
486        let piece_offset = usize::from(piece_offset);
487        (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
488            .into_par_iter()
489            .map(SBucket::from)
490            // In each s-bucket map all records used
491            .map(move |s_bucket| {
492                let encoded_chunk_used = record_has_s_bucket_chunk(
493                    s_bucket.into(),
494                    &self.encoded_record_chunks_used[piece_offset],
495                    usize::from(self.num_encoded_record_chunks[piece_offset]),
496                )?;
497
498                // How many other record chunks we have in s-bucket before piece offset we care
499                // about
500                let chunk_offset = self
501                    .encoded_record_chunks_used
502                    .iter()
503                    .zip(&self.num_encoded_record_chunks)
504                    .take(piece_offset)
505                    .filter(move |(record_bitfields, num_encoded_record_chunks)| {
506                        record_has_s_bucket_chunk(
507                            s_bucket.into(),
508                            record_bitfields,
509                            usize::from(**num_encoded_record_chunks),
510                        )
511                        .is_some()
512                    })
513                    .count();
514
515                Some((chunk_offset, encoded_chunk_used))
516            })
517    }
518
519    /// Creates an iterator of `(piece_offset, encoded_chunk_used)`, where `piece_offset`
520    /// corresponds to the record to which chunk belongs and `encoded_chunk_used` indicates whether
521    /// it was encoded.
522    ///
523    /// Returns error if `s_bucket` is outside of [`Record::NUM_S_BUCKETS`] range.
524    pub fn iter_s_bucket_records(
525        &self,
526        s_bucket: SBucket,
527    ) -> Result<impl Iterator<Item = (PieceOffset, bool)> + '_, SectorContentsMapIterationError>
528    {
529        let s_bucket = usize::from(s_bucket);
530
531        if s_bucket >= Record::NUM_S_BUCKETS {
532            return Err(SectorContentsMapIterationError::SBucketOutOfRange {
533                provided: s_bucket,
534                max: Record::NUM_S_BUCKETS,
535            });
536        }
537
538        Ok((PieceOffset::ZERO..)
539            .zip(
540                self.encoded_record_chunks_used
541                    .iter()
542                    .zip(&self.num_encoded_record_chunks),
543            )
544            .filter_map(
545                move |(piece_offset, (record_bitfields, num_encoded_record_chunks))| {
546                    let encoded_chunk_used = record_has_s_bucket_chunk(
547                        s_bucket,
548                        record_bitfields,
549                        usize::from(*num_encoded_record_chunks),
550                    )?;
551
552                    Some((piece_offset, encoded_chunk_used))
553                },
554            ))
555    }
556
557    /// Iterate over chunks of s-bucket indicating if encoded chunk is used at corresponding
558    /// position
559    ///
560    /// ## Panics
561    /// Panics if `s_bucket` is outside of [`Record::NUM_S_BUCKETS`] range.
562    pub fn iter_s_bucket_encoded_record_chunks_used(
563        &self,
564        s_bucket: SBucket,
565    ) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
566        let s_bucket = usize::from(s_bucket);
567
568        if s_bucket >= Record::NUM_S_BUCKETS {
569            return Err(SectorContentsMapIterationError::SBucketOutOfRange {
570                provided: s_bucket,
571                max: Record::NUM_S_BUCKETS,
572            });
573        }
574
575        Ok(self
576            .encoded_record_chunks_used
577            .iter()
578            .map(move |record_bitfields| record_bitfields[s_bucket]))
579    }
580}
581
582/// Checks if record has corresponding s-bucket chunk, returns `Some(true)` if yes and chunk is
583/// encoded, `Some(false)` if yes and chunk is not encoded, `None` if chunk at corresponding
584/// s-bucket is not found.
585fn record_has_s_bucket_chunk(
586    s_bucket: usize,
587    record_bitfields: &SingleRecordBitArray,
588    num_encoded_record_chunks: usize,
589) -> Option<bool> {
590    if record_bitfields[s_bucket] {
591        // Bit is explicitly set to `true`, easy case
592        Some(true)
593    } else if num_encoded_record_chunks == Record::NUM_CHUNKS {
594        None
595    } else {
596        // Count how many encoded chunks we have before current offset
597        let encoded_before = record_bitfields[..s_bucket].count_ones();
598        let unencoded_before = s_bucket - encoded_before;
599        // And how many unencoded we have total and how many before current offset
600        // (we know that total number of used chunks is always `Record::NUM_CHUNKS`)
601        let unencoded_total = Record::NUM_CHUNKS.saturating_sub(num_encoded_record_chunks);
602
603        if unencoded_before < unencoded_total {
604            // Have not seen all unencoded chunks before current offset yet, hence
605            // current offset qualifies
606            Some(false)
607        } else {
608            None
609        }
610    }
611}