1use ab_core_primitives::checksum::Blake3Checksummed;
10use ab_core_primitives::hashes::Blake3Hash;
11use ab_core_primitives::pieces::{PieceHeader, PieceOffset, Record};
12use ab_core_primitives::sectors::{SBucket, SectorIndex};
13use ab_core_primitives::segments::{HistorySize, SegmentIndex};
14use ab_io_type::trivial_type::TrivialType;
15use parity_scale_codec::{Decode, Encode};
16use rayon::prelude::*;
17use std::ops::{Deref, DerefMut};
18use thiserror::Error;
19use tracing::debug;
20
21#[inline]
25pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
26 pieces_in_sector as usize * Record::SIZE
27}
28
29#[inline]
33pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
34 pieces_in_sector as usize * RecordMetadata::encoded_size()
35}
36
37#[inline]
45pub const fn sector_size(pieces_in_sector: u16) -> usize {
46 sector_record_chunks_size(pieces_in_sector)
47 + sector_record_metadata_size(pieces_in_sector)
48 + SectorContentsMap::encoded_size(pieces_in_sector)
49 + Blake3Hash::SIZE
50}
51
52#[derive(Debug, Encode, Decode, Clone)]
54pub struct SectorMetadata {
55 pub sector_index: SectorIndex,
57 pub pieces_in_sector: u16,
59 pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
61 pub history_size: HistorySize,
64}
65
66impl SectorMetadata {
67 pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
69 let s_bucket_offsets = self
70 .s_bucket_sizes
71 .iter()
72 .map({
73 let mut base_offset = 0;
74
75 move |s_bucket_size| {
76 let offset = base_offset;
77 base_offset += u32::from(*s_bucket_size);
78 offset
79 }
80 })
81 .collect::<Box<_>>();
82
83 assert_eq!(s_bucket_offsets.len(), Record::NUM_S_BUCKETS);
84 unsafe {
86 Box::from_raw(Box::into_raw(s_bucket_offsets).cast::<[u32; Record::NUM_S_BUCKETS]>())
87 }
88 }
89}
90
91#[derive(Debug, Clone, Encode, Decode)]
93pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
94
95impl From<SectorMetadata> for SectorMetadataChecksummed {
96 #[inline]
97 fn from(value: SectorMetadata) -> Self {
98 Self(Blake3Checksummed(value))
99 }
100}
101
102impl Deref for SectorMetadataChecksummed {
103 type Target = SectorMetadata;
104
105 #[inline]
106 fn deref(&self) -> &Self::Target {
107 &self.0.0
108 }
109}
110
111impl DerefMut for SectorMetadataChecksummed {
112 #[inline]
113 fn deref_mut(&mut self) -> &mut Self::Target {
114 &mut self.0.0
115 }
116}
117
118impl SectorMetadataChecksummed {
119 #[inline]
123 pub fn encoded_size() -> usize {
124 let default = SectorMetadataChecksummed::from(SectorMetadata {
125 sector_index: SectorIndex::ZERO,
126 pieces_in_sector: 0,
127 s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
130 history_size: HistorySize::from(SegmentIndex::ZERO),
131 });
132
133 default.encoded_size()
134 }
135}
136
137#[derive(Debug, Default, Copy, Clone, Encode, Decode)]
139#[repr(C)]
140pub(crate) struct RecordMetadata {
141 pub(crate) piece_header: PieceHeader,
143 pub(crate) piece_checksum: Blake3Hash,
145}
146
147const {
148 assert!(align_of::<RecordMetadata>() == 1);
149}
150
151impl RecordMetadata {
152 pub(crate) const fn encoded_size() -> usize {
153 size_of::<Self>()
154 }
155}
156
157#[derive(Debug, Clone)]
159pub(crate) struct RawSector {
160 pub(crate) records: Vec<Record>,
162 pub(crate) metadata: Vec<RecordMetadata>,
164}
165
166impl RawSector {
167 pub(crate) fn new(pieces_in_sector: u16) -> Self {
169 Self {
170 records: Record::new_zero_vec(usize::from(pieces_in_sector)),
171 metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
172 }
173 }
174}
175
176pub type FoundProofs = [u8; Record::NUM_S_BUCKETS / u8::BITS as usize];
184
185#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
187pub enum SectorContentsMapFromBytesError {
188 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
190 InvalidBytesLength {
191 expected: usize,
193 actual: usize,
195 },
196 #[error("Checksum mismatch")]
198 ChecksumMismatch,
199}
200
201#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
203pub enum SectorContentsMapEncodeIntoError {
204 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
206 InvalidBytesLength {
207 expected: usize,
209 actual: usize,
211 },
212}
213
214#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
216pub enum SectorContentsMapIterationError {
217 #[error("S-bucket provided {provided} is out of range, max {max}")]
219 SBucketOutOfRange {
220 provided: usize,
222 max: usize,
224 },
225}
226
227#[derive(Debug, Clone, Eq, PartialEq)]
233pub struct SectorContentsMap {
234 record_chunks_used: Vec<FoundProofs>,
237}
238
239impl SectorContentsMap {
240 pub fn new(pieces_in_sector: u16) -> Self {
243 Self {
244 record_chunks_used: vec![[0; _]; usize::from(pieces_in_sector)],
245 }
246 }
247
248 pub fn from_bytes(
253 bytes: &[u8],
254 pieces_in_sector: u16,
255 ) -> Result<Self, SectorContentsMapFromBytesError> {
256 if bytes.len() != Self::encoded_size(pieces_in_sector) {
257 return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
258 expected: Self::encoded_size(pieces_in_sector),
259 actual: bytes.len(),
260 });
261 }
262
263 let (single_records_bit_arrays, expected_checksum) =
264 bytes.split_at(bytes.len() - Blake3Hash::SIZE);
265 let expected_checksum = unsafe {
267 Blake3Hash::from_bytes(expected_checksum).expect("No alignment requirements; qed")
268 };
269 let actual_checksum = Blake3Hash::from(blake3::hash(single_records_bit_arrays));
271 if &actual_checksum != expected_checksum {
272 debug!(
273 %actual_checksum,
274 %expected_checksum,
275 "Hash doesn't match, corrupted bytes"
276 );
277
278 return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
279 }
280
281 let mut record_chunks_used = vec![[0; _]; pieces_in_sector.into()];
282
283 record_chunks_used
284 .as_flattened_mut()
285 .copy_from_slice(single_records_bit_arrays);
286
287 Ok(Self { record_chunks_used })
288 }
289
290 pub const fn encoded_size(pieces_in_sector: u16) -> usize {
293 size_of::<FoundProofs>() * pieces_in_sector as usize + Blake3Hash::SIZE
294 }
295
296 pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
298 if output.len() != Self::encoded_size(self.record_chunks_used.len() as u16) {
299 return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
300 expected: Self::encoded_size(self.record_chunks_used.len() as u16),
301 actual: output.len(),
302 });
303 }
304
305 let slice = self.record_chunks_used.as_flattened();
306 output[..slice.len()].copy_from_slice(slice);
308 output[slice.len()..].copy_from_slice(blake3::hash(slice).as_bytes());
309
310 Ok(())
311 }
312
313 pub fn iter_record_chunks_used(&self) -> &[FoundProofs] {
315 &self.record_chunks_used
316 }
317
318 pub fn iter_record_chunks_used_mut(&mut self) -> &mut [FoundProofs] {
320 &mut self.record_chunks_used
321 }
322
323 pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
325 let s_bucket_sizes = (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
327 .into_par_iter()
328 .map(SBucket::from)
329 .map(|s_bucket| {
330 self.iter_s_bucket_piece_offsets(s_bucket)
331 .expect("S-bucket guaranteed to be in range; qed")
332 .count() as u16
333 })
334 .collect::<Box<_>>();
335
336 assert_eq!(s_bucket_sizes.len(), Record::NUM_S_BUCKETS);
337
338 unsafe {
340 Box::from_raw(Box::into_raw(s_bucket_sizes).cast::<[u16; Record::NUM_S_BUCKETS]>())
341 }
342 }
343
344 pub fn iter_record_chunk_to_plot(
348 &self,
349 piece_offset: PieceOffset,
350 ) -> impl Iterator<Item = (SBucket, usize)> + '_ {
351 (SBucket::ZERO..=SBucket::MAX)
353 .flat_map(|s_bucket| {
355 self.iter_s_bucket_piece_offsets(s_bucket)
356 .expect("S-bucket guaranteed to be in range; qed")
357 .map(move |current_piece_offset| (s_bucket, current_piece_offset))
358 })
359 .enumerate()
362 .filter_map(move |(chunk_location, (s_bucket, current_piece_offset))| {
364 (current_piece_offset == piece_offset).then_some((s_bucket, chunk_location))
366 })
367 .take(Record::NUM_CHUNKS)
369 }
370
371 pub fn par_iter_record_chunk_to_plot(
378 &self,
379 piece_offset: PieceOffset,
380 ) -> impl IndexedParallelIterator<Item = Option<usize>> + '_ {
381 let piece_offset = usize::from(piece_offset);
382 (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
383 .into_par_iter()
384 .map(SBucket::from)
385 .map(move |s_bucket| {
387 let byte_offset = usize::from(s_bucket) / u8::BITS as usize;
388 let bit_mask = 1 << (usize::from(s_bucket) % u8::BITS as usize);
389
390 if self.record_chunks_used[piece_offset][byte_offset] & bit_mask == 0 {
391 return None;
392 }
393
394 let chunk_offset = self
397 .record_chunks_used
398 .iter()
399 .take(piece_offset)
400 .filter(move |record_chunks_used| {
401 record_chunks_used[byte_offset] & bit_mask != 0
402 })
403 .count();
404
405 Some(chunk_offset)
406 })
407 }
408
409 pub fn iter_s_bucket_piece_offsets(
413 &self,
414 s_bucket: SBucket,
415 ) -> Result<impl Iterator<Item = PieceOffset> + '_, SectorContentsMapIterationError> {
416 let s_bucket = usize::from(s_bucket);
417
418 if s_bucket >= Record::NUM_S_BUCKETS {
419 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
420 provided: s_bucket,
421 max: Record::NUM_S_BUCKETS,
422 });
423 }
424
425 Ok((PieceOffset::ZERO..)
426 .zip(&self.record_chunks_used)
427 .filter_map(move |(piece_offset, record_chunks_used)| {
428 let byte_offset = s_bucket / u8::BITS as usize;
429 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
430
431 (record_chunks_used[byte_offset] & bit_mask != 0).then_some(piece_offset)
432 }))
433 }
434
435 pub fn iter_s_bucket_used_record_chunks_used(
441 &self,
442 s_bucket: SBucket,
443 ) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
444 let s_bucket = usize::from(s_bucket);
445
446 if s_bucket >= Record::NUM_S_BUCKETS {
447 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
448 provided: s_bucket,
449 max: Record::NUM_S_BUCKETS,
450 });
451 }
452
453 Ok(self
454 .record_chunks_used
455 .iter()
456 .map(move |record_chunks_used| {
457 let byte_offset = s_bucket / u8::BITS as usize;
458 let bit_mask = 1 << (s_bucket % u8::BITS as usize);
459
460 record_chunks_used[byte_offset] & bit_mask != 0
461 }))
462 }
463}