1use ab_core_primitives::checksum::Blake3Checksummed;
10use ab_core_primitives::hashes::Blake3Hash;
11use ab_core_primitives::pieces::{PieceHeader, PieceOffset, Record};
12use ab_core_primitives::sectors::{SBucket, SectorIndex};
13use ab_core_primitives::segments::{HistorySize, SegmentIndex};
14use ab_io_type::trivial_type::TrivialType;
15use parity_scale_codec::{Decode, Encode};
16use rayon::prelude::*;
17use std::ops::{Deref, DerefMut};
18use thiserror::Error;
19use tracing::debug;
20
21#[inline]
25pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
26 pieces_in_sector as usize * Record::SIZE
27}
28
29#[inline]
33pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
34 pieces_in_sector as usize * RecordMetadata::encoded_size()
35}
36
37#[inline]
45pub const fn sector_size(pieces_in_sector: u16) -> usize {
46 sector_record_chunks_size(pieces_in_sector)
47 + sector_record_metadata_size(pieces_in_sector)
48 + SectorContentsMap::encoded_size(pieces_in_sector)
49 + Blake3Hash::SIZE
50}
51
52#[derive(Debug, Encode, Decode, Clone)]
54pub struct SectorMetadata {
55 pub sector_index: SectorIndex,
57 pub pieces_in_sector: u16,
59 pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
61 pub history_size: HistorySize,
63}
64
65impl SectorMetadata {
66 pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
68 let s_bucket_offsets = self
69 .s_bucket_sizes
70 .iter()
71 .map({
72 let mut base_offset = 0;
73
74 move |s_bucket_size| {
75 let offset = base_offset;
76 base_offset += u32::from(*s_bucket_size);
77 offset
78 }
79 })
80 .collect::<Box<_>>();
81
82 assert_eq!(s_bucket_offsets.len(), Record::NUM_S_BUCKETS);
83 unsafe {
85 Box::from_raw(Box::into_raw(s_bucket_offsets).cast::<[u32; Record::NUM_S_BUCKETS]>())
86 }
87 }
88}
89
90#[derive(Debug, Clone, Encode, Decode)]
92pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
93
94impl From<SectorMetadata> for SectorMetadataChecksummed {
95 #[inline]
96 fn from(value: SectorMetadata) -> Self {
97 Self(Blake3Checksummed(value))
98 }
99}
100
101impl Deref for SectorMetadataChecksummed {
102 type Target = SectorMetadata;
103
104 #[inline]
105 fn deref(&self) -> &Self::Target {
106 &self.0.0
107 }
108}
109
110impl DerefMut for SectorMetadataChecksummed {
111 #[inline]
112 fn deref_mut(&mut self) -> &mut Self::Target {
113 &mut self.0.0
114 }
115}
116
117impl SectorMetadataChecksummed {
118 #[inline]
122 pub fn encoded_size() -> usize {
123 let default = SectorMetadataChecksummed::from(SectorMetadata {
124 sector_index: SectorIndex::ZERO,
125 pieces_in_sector: 0,
126 s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
129 history_size: HistorySize::from(SegmentIndex::ZERO),
130 });
131
132 default.encoded_size()
133 }
134}
135
136#[derive(Debug, Default, Copy, Clone, Encode, Decode)]
138#[repr(C)]
139pub(crate) struct RecordMetadata {
140 pub(crate) piece_header: PieceHeader,
142 pub(crate) piece_checksum: Blake3Hash,
144}
145
146const {
147 assert!(align_of::<RecordMetadata>() == 1);
148}
149
150impl RecordMetadata {
151 pub(crate) const fn encoded_size() -> usize {
152 size_of::<Self>()
153 }
154}
155
156#[derive(Debug, Clone)]
158pub(crate) struct RawSector {
159 pub(crate) records: Vec<Record>,
161 pub(crate) metadata: Vec<RecordMetadata>,
163}
164
165impl RawSector {
166 pub(crate) fn new(pieces_in_sector: u16) -> Self {
168 Self {
169 records: Record::new_zero_vec(usize::from(pieces_in_sector)),
170 metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
171 }
172 }
173}
174
175pub type FoundProofs = [u8; Record::NUM_S_BUCKETS / u8::BITS as usize];
183
184#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
186pub enum SectorContentsMapFromBytesError {
187 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
189 InvalidBytesLength {
190 expected: usize,
192 actual: usize,
194 },
195 #[error("Checksum mismatch")]
197 ChecksumMismatch,
198}
199
200#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
202pub enum SectorContentsMapEncodeIntoError {
203 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
205 InvalidBytesLength {
206 expected: usize,
208 actual: usize,
210 },
211}
212
213#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
215pub enum SectorContentsMapIterationError {
216 #[error("S-bucket provided {provided} is out of range, max {max}")]
218 SBucketOutOfRange {
219 provided: usize,
221 max: usize,
223 },
224}
225
226#[derive(Debug, Clone, Eq, PartialEq)]
232pub struct SectorContentsMap {
233 record_chunks_used: Vec<FoundProofs>,
236}
237
238impl SectorContentsMap {
239 pub fn new(pieces_in_sector: u16) -> Self {
242 Self {
243 record_chunks_used: vec![[0; _]; usize::from(pieces_in_sector)],
244 }
245 }
246
247 pub fn from_bytes(
252 bytes: &[u8],
253 pieces_in_sector: u16,
254 ) -> Result<Self, SectorContentsMapFromBytesError> {
255 if bytes.len() != Self::encoded_size(pieces_in_sector) {
256 return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
257 expected: Self::encoded_size(pieces_in_sector),
258 actual: bytes.len(),
259 });
260 }
261
262 let (single_records_bit_arrays, expected_checksum) =
263 bytes.split_at(bytes.len() - Blake3Hash::SIZE);
264 let expected_checksum = unsafe {
266 Blake3Hash::from_bytes(expected_checksum).expect("No alignment requirements; qed")
267 };
268 let actual_checksum = Blake3Hash::from(blake3::hash(single_records_bit_arrays));
270 if &actual_checksum != expected_checksum {
271 debug!(
272 %actual_checksum,
273 %expected_checksum,
274 "Hash doesn't match, corrupted bytes"
275 );
276
277 return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
278 }
279
280 let mut record_chunks_used = vec![[0; _]; pieces_in_sector.into()];
281
282 record_chunks_used
283 .as_flattened_mut()
284 .copy_from_slice(single_records_bit_arrays);
285
286 Ok(Self { record_chunks_used })
287 }
288
289 pub const fn encoded_size(pieces_in_sector: u16) -> usize {
292 size_of::<FoundProofs>() * pieces_in_sector as usize + Blake3Hash::SIZE
293 }
294
295 pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
297 if output.len() != Self::encoded_size(self.record_chunks_used.len() as u16) {
298 return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
299 expected: Self::encoded_size(self.record_chunks_used.len() as u16),
300 actual: output.len(),
301 });
302 }
303
304 let slice = self.record_chunks_used.as_flattened();
305 output[..slice.len()].copy_from_slice(slice);
307 output[slice.len()..].copy_from_slice(blake3::hash(slice).as_bytes());
308
309 Ok(())
310 }
311
312 pub fn iter_record_chunks_used(&self) -> &[FoundProofs] {
314 &self.record_chunks_used
315 }
316
317 pub fn iter_record_chunks_used_mut(&mut self) -> &mut [FoundProofs] {
319 &mut self.record_chunks_used
320 }
321
322 pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
324 let s_bucket_sizes = (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
326 .into_par_iter()
327 .map(SBucket::from)
328 .map(|s_bucket| {
329 self.iter_s_bucket_piece_offsets(s_bucket)
330 .expect("S-bucket guaranteed to be in range; qed")
331 .count() as u16
332 })
333 .collect::<Box<_>>();
334
335 assert_eq!(s_bucket_sizes.len(), Record::NUM_S_BUCKETS);
336
337 unsafe {
339 Box::from_raw(Box::into_raw(s_bucket_sizes).cast::<[u16; Record::NUM_S_BUCKETS]>())
340 }
341 }
342
343 pub fn iter_record_chunk_to_plot(
347 &self,
348 piece_offset: PieceOffset,
349 ) -> impl Iterator<Item = (SBucket, usize)> + '_ {
350 (SBucket::ZERO..=SBucket::MAX)
352 .flat_map(|s_bucket| {
354 self.iter_s_bucket_piece_offsets(s_bucket)
355 .expect("S-bucket guaranteed to be in range; qed")
356 .map(move |current_piece_offset| (s_bucket, current_piece_offset))
357 })
358 .enumerate()
361 .filter_map(move |(chunk_location, (s_bucket, current_piece_offset))| {
363 (current_piece_offset == piece_offset).then_some((s_bucket, chunk_location))
365 })
366 .take(Record::NUM_CHUNKS)
368 }
369
370 pub fn par_iter_record_chunk_to_plot(
377 &self,
378 piece_offset: PieceOffset,
379 ) -> impl IndexedParallelIterator<Item = Option<usize>> + '_ {
380 let piece_offset = usize::from(piece_offset);
381 (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
382 .into_par_iter()
383 .map(SBucket::from)
384 .map(move |s_bucket| {
386 let byte_offset = usize::from(s_bucket) / u8::BITS as usize;
387 let bit_mask = 1 << (usize::from(s_bucket) % u8::BITS as usize);
388
389 if self.record_chunks_used[piece_offset][byte_offset] & bit_mask == 0 {
390 return None;
391 }
392
393 let chunk_offset = self
396 .record_chunks_used
397 .iter()
398 .take(piece_offset)
399 .filter(move |record_chunks_used| {
400 record_chunks_used[byte_offset] & bit_mask != 0
401 })
402 .count();
403
404 Some(chunk_offset)
405 })
406 }
407
408 pub fn iter_s_bucket_piece_offsets(
412 &self,
413 s_bucket: SBucket,
414 ) -> Result<impl Iterator<Item = PieceOffset> + '_, SectorContentsMapIterationError> {
415 let s_bucket = usize::from(s_bucket);
416
417 if s_bucket >= Record::NUM_S_BUCKETS {
418 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
419 provided: s_bucket,
420 max: Record::NUM_S_BUCKETS,
421 });
422 }
423
424 Ok((PieceOffset::ZERO..)
425 .zip(&self.record_chunks_used)
426 .filter_map(move |(piece_offset, record_chunks_used)| {
427 let byte_offset = s_bucket / u8::BITS as usize;
428 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
429
430 (record_chunks_used[byte_offset] & bit_mask != 0).then_some(piece_offset)
431 }))
432 }
433
434 pub fn iter_s_bucket_used_record_chunks_used(
440 &self,
441 s_bucket: SBucket,
442 ) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
443 let s_bucket = usize::from(s_bucket);
444
445 if s_bucket >= Record::NUM_S_BUCKETS {
446 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
447 provided: s_bucket,
448 max: Record::NUM_S_BUCKETS,
449 });
450 }
451
452 Ok(self
453 .record_chunks_used
454 .iter()
455 .map(move |record_chunks_used| {
456 let byte_offset = s_bucket / u8::BITS as usize;
457 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
458
459 record_chunks_used[byte_offset] & bit_mask != 0
460 }))
461 }
462}