1use ab_core_primitives::checksum::Blake3Checksummed;
10use ab_core_primitives::hashes::Blake3Hash;
11use ab_core_primitives::pieces::{PieceOffset, Record, RecordChunksRoot, RecordProof, RecordRoot};
12use ab_core_primitives::sectors::{SBucket, SectorIndex};
13use ab_core_primitives::segments::{HistorySize, SegmentIndex};
14use ab_io_type::trivial_type::TrivialType;
15use parity_scale_codec::{Decode, Encode};
16use rayon::prelude::*;
17use std::ops::{Deref, DerefMut};
18use thiserror::Error;
19use tracing::debug;
20
21#[inline]
25pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
26 pieces_in_sector as usize * Record::SIZE
27}
28
29#[inline]
33pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
34 pieces_in_sector as usize * RecordMetadata::encoded_size()
35}
36
37#[inline]
45pub const fn sector_size(pieces_in_sector: u16) -> usize {
46 sector_record_chunks_size(pieces_in_sector)
47 + sector_record_metadata_size(pieces_in_sector)
48 + SectorContentsMap::encoded_size(pieces_in_sector)
49 + Blake3Hash::SIZE
50}
51
52#[derive(Debug, Encode, Decode, Clone)]
54pub struct SectorMetadata {
55 pub sector_index: SectorIndex,
57 pub pieces_in_sector: u16,
59 pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
61 pub history_size: HistorySize,
63}
64
65impl SectorMetadata {
66 pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
68 let s_bucket_offsets = self
69 .s_bucket_sizes
70 .iter()
71 .map({
72 let mut base_offset = 0;
73
74 move |s_bucket_size| {
75 let offset = base_offset;
76 base_offset += u32::from(*s_bucket_size);
77 offset
78 }
79 })
80 .collect::<Box<_>>();
81
82 assert_eq!(s_bucket_offsets.len(), Record::NUM_S_BUCKETS);
83 unsafe {
85 Box::from_raw(Box::into_raw(s_bucket_offsets).cast::<[u32; Record::NUM_S_BUCKETS]>())
86 }
87 }
88}
89
90#[derive(Debug, Clone, Encode, Decode)]
92pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
93
94impl From<SectorMetadata> for SectorMetadataChecksummed {
95 #[inline]
96 fn from(value: SectorMetadata) -> Self {
97 Self(Blake3Checksummed(value))
98 }
99}
100
101impl Deref for SectorMetadataChecksummed {
102 type Target = SectorMetadata;
103
104 #[inline]
105 fn deref(&self) -> &Self::Target {
106 &self.0.0
107 }
108}
109
110impl DerefMut for SectorMetadataChecksummed {
111 #[inline]
112 fn deref_mut(&mut self) -> &mut Self::Target {
113 &mut self.0.0
114 }
115}
116
117impl SectorMetadataChecksummed {
118 #[inline]
122 pub fn encoded_size() -> usize {
123 let default = SectorMetadataChecksummed::from(SectorMetadata {
124 sector_index: SectorIndex::ZERO,
125 pieces_in_sector: 0,
126 s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
129 history_size: HistorySize::from(SegmentIndex::ZERO),
130 });
131
132 default.encoded_size()
133 }
134}
135
136#[derive(Debug, Default, Clone, Encode, Decode)]
138pub(crate) struct RecordMetadata {
139 pub(crate) root: RecordRoot,
141 pub(crate) parity_chunks_root: RecordChunksRoot,
143 pub(crate) proof: RecordProof,
145 pub(crate) piece_checksum: Blake3Hash,
147}
148
149impl RecordMetadata {
150 pub(crate) const fn encoded_size() -> usize {
151 RecordProof::SIZE + RecordRoot::SIZE + RecordChunksRoot::SIZE + Blake3Hash::SIZE
152 }
153}
154
155#[derive(Debug, Clone)]
157pub(crate) struct RawSector {
158 pub(crate) records: Vec<Record>,
160 pub(crate) metadata: Vec<RecordMetadata>,
162}
163
164impl RawSector {
165 pub(crate) fn new(pieces_in_sector: u16) -> Self {
167 Self {
168 records: Record::new_zero_vec(usize::from(pieces_in_sector)),
169 metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
170 }
171 }
172}
173
174pub type FoundProofs = [u8; Record::NUM_S_BUCKETS / u8::BITS as usize];
182
183#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
185pub enum SectorContentsMapFromBytesError {
186 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
188 InvalidBytesLength {
189 expected: usize,
191 actual: usize,
193 },
194 #[error("Checksum mismatch")]
196 ChecksumMismatch,
197}
198
199#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
201pub enum SectorContentsMapEncodeIntoError {
202 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
204 InvalidBytesLength {
205 expected: usize,
207 actual: usize,
209 },
210}
211
212#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
214pub enum SectorContentsMapIterationError {
215 #[error("S-bucket provided {provided} is out of range, max {max}")]
217 SBucketOutOfRange {
218 provided: usize,
220 max: usize,
222 },
223}
224
225#[derive(Debug, Clone, Eq, PartialEq)]
231pub struct SectorContentsMap {
232 record_chunks_used: Vec<FoundProofs>,
235}
236
237impl SectorContentsMap {
238 pub fn new(pieces_in_sector: u16) -> Self {
241 Self {
242 record_chunks_used: vec![[0; _]; usize::from(pieces_in_sector)],
243 }
244 }
245
246 pub fn from_bytes(
251 bytes: &[u8],
252 pieces_in_sector: u16,
253 ) -> Result<Self, SectorContentsMapFromBytesError> {
254 if bytes.len() != Self::encoded_size(pieces_in_sector) {
255 return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
256 expected: Self::encoded_size(pieces_in_sector),
257 actual: bytes.len(),
258 });
259 }
260
261 let (single_records_bit_arrays, expected_checksum) =
262 bytes.split_at(bytes.len() - Blake3Hash::SIZE);
263 let expected_checksum = unsafe {
265 Blake3Hash::from_bytes(expected_checksum).expect("No alignment requirements; qed")
266 };
267 let actual_checksum = Blake3Hash::from(blake3::hash(single_records_bit_arrays));
269 if &actual_checksum != expected_checksum {
270 debug!(
271 %actual_checksum,
272 %expected_checksum,
273 "Hash doesn't match, corrupted bytes"
274 );
275
276 return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
277 }
278
279 let mut record_chunks_used = vec![[0; _]; pieces_in_sector.into()];
280
281 record_chunks_used
282 .as_flattened_mut()
283 .copy_from_slice(single_records_bit_arrays);
284
285 Ok(Self { record_chunks_used })
286 }
287
288 pub const fn encoded_size(pieces_in_sector: u16) -> usize {
291 size_of::<FoundProofs>() * pieces_in_sector as usize + Blake3Hash::SIZE
292 }
293
294 pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
296 if output.len() != Self::encoded_size(self.record_chunks_used.len() as u16) {
297 return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
298 expected: Self::encoded_size(self.record_chunks_used.len() as u16),
299 actual: output.len(),
300 });
301 }
302
303 let slice = self.record_chunks_used.as_flattened();
304 output[..slice.len()].copy_from_slice(slice);
306 output[slice.len()..].copy_from_slice(blake3::hash(slice).as_bytes());
307
308 Ok(())
309 }
310
311 pub fn iter_record_chunks_used(&self) -> &[FoundProofs] {
313 &self.record_chunks_used
314 }
315
316 pub fn iter_record_chunks_used_mut(&mut self) -> &mut [FoundProofs] {
318 &mut self.record_chunks_used
319 }
320
321 pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
323 let s_bucket_sizes = (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
325 .into_par_iter()
326 .map(SBucket::from)
327 .map(|s_bucket| {
328 self.iter_s_bucket_piece_offsets(s_bucket)
329 .expect("S-bucket guaranteed to be in range; qed")
330 .count() as u16
331 })
332 .collect::<Box<_>>();
333
334 assert_eq!(s_bucket_sizes.len(), Record::NUM_S_BUCKETS);
335
336 unsafe {
338 Box::from_raw(Box::into_raw(s_bucket_sizes).cast::<[u16; Record::NUM_S_BUCKETS]>())
339 }
340 }
341
342 pub fn iter_record_chunk_to_plot(
346 &self,
347 piece_offset: PieceOffset,
348 ) -> impl Iterator<Item = (SBucket, usize)> + '_ {
349 (SBucket::ZERO..=SBucket::MAX)
351 .flat_map(|s_bucket| {
353 self.iter_s_bucket_piece_offsets(s_bucket)
354 .expect("S-bucket guaranteed to be in range; qed")
355 .map(move |current_piece_offset| (s_bucket, current_piece_offset))
356 })
357 .enumerate()
360 .filter_map(move |(chunk_location, (s_bucket, current_piece_offset))| {
362 (current_piece_offset == piece_offset).then_some((s_bucket, chunk_location))
364 })
365 .take(Record::NUM_CHUNKS)
367 }
368
369 pub fn par_iter_record_chunk_to_plot(
376 &self,
377 piece_offset: PieceOffset,
378 ) -> impl IndexedParallelIterator<Item = Option<usize>> + '_ {
379 let piece_offset = usize::from(piece_offset);
380 (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
381 .into_par_iter()
382 .map(SBucket::from)
383 .map(move |s_bucket| {
385 let byte_offset = usize::from(s_bucket) / u8::BITS as usize;
386 let bit_mask = 1 << (usize::from(s_bucket) % u8::BITS as usize);
387
388 if self.record_chunks_used[piece_offset][byte_offset] & bit_mask == 0 {
389 return None;
390 }
391
392 let chunk_offset = self
395 .record_chunks_used
396 .iter()
397 .take(piece_offset)
398 .filter(move |record_chunks_used| {
399 record_chunks_used[byte_offset] & bit_mask != 0
400 })
401 .count();
402
403 Some(chunk_offset)
404 })
405 }
406
407 pub fn iter_s_bucket_piece_offsets(
411 &self,
412 s_bucket: SBucket,
413 ) -> Result<impl Iterator<Item = PieceOffset> + '_, SectorContentsMapIterationError> {
414 let s_bucket = usize::from(s_bucket);
415
416 if s_bucket >= Record::NUM_S_BUCKETS {
417 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
418 provided: s_bucket,
419 max: Record::NUM_S_BUCKETS,
420 });
421 }
422
423 Ok((PieceOffset::ZERO..)
424 .zip(&self.record_chunks_used)
425 .filter_map(move |(piece_offset, record_chunks_used)| {
426 let byte_offset = s_bucket / u8::BITS as usize;
427 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
428
429 (record_chunks_used[byte_offset] & bit_mask != 0).then_some(piece_offset)
430 }))
431 }
432
433 pub fn iter_s_bucket_used_record_chunks_used(
439 &self,
440 s_bucket: SBucket,
441 ) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
442 let s_bucket = usize::from(s_bucket);
443
444 if s_bucket >= Record::NUM_S_BUCKETS {
445 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
446 provided: s_bucket,
447 max: Record::NUM_S_BUCKETS,
448 });
449 }
450
451 Ok(self
452 .record_chunks_used
453 .iter()
454 .map(move |record_chunks_used| {
455 let byte_offset = s_bucket / u8::BITS as usize;
456 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
457
458 record_chunks_used[byte_offset] & bit_mask != 0
459 }))
460 }
461}