1use ab_core_primitives::checksum::Blake3Checksummed;
10use ab_core_primitives::hashes::Blake3Hash;
11use ab_core_primitives::pieces::{PieceOffset, Record, RecordChunksRoot, RecordProof, RecordRoot};
12use ab_core_primitives::sectors::{SBucket, SectorIndex};
13use ab_core_primitives::segments::{HistorySize, SegmentIndex};
14use ab_io_type::trivial_type::TrivialType;
15use bitvec::prelude::*;
16use parity_scale_codec::{Decode, Encode};
17use rayon::prelude::*;
18use std::ops::{Deref, DerefMut};
19use std::slice;
20use thiserror::Error;
21use tracing::debug;
22
23#[inline]
27pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
28 pieces_in_sector as usize * Record::SIZE
29}
30
31#[inline]
35pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
36 pieces_in_sector as usize * RecordMetadata::encoded_size()
37}
38
39#[inline]
47pub const fn sector_size(pieces_in_sector: u16) -> usize {
48 sector_record_chunks_size(pieces_in_sector)
49 + sector_record_metadata_size(pieces_in_sector)
50 + SectorContentsMap::encoded_size(pieces_in_sector)
51 + Blake3Hash::SIZE
52}
53
54#[derive(Debug, Encode, Decode, Clone)]
56pub struct SectorMetadata {
57 pub sector_index: SectorIndex,
59 pub pieces_in_sector: u16,
61 pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
63 pub history_size: HistorySize,
65}
66
67impl SectorMetadata {
68 pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
70 let s_bucket_offsets = self
71 .s_bucket_sizes
72 .iter()
73 .map({
74 let mut base_offset = 0;
75
76 move |s_bucket_size| {
77 let offset = base_offset;
78 base_offset += u32::from(*s_bucket_size);
79 offset
80 }
81 })
82 .collect::<Box<_>>();
83
84 assert_eq!(s_bucket_offsets.len(), Record::NUM_S_BUCKETS);
85 unsafe {
87 Box::from_raw(Box::into_raw(s_bucket_offsets).cast::<[u32; Record::NUM_S_BUCKETS]>())
88 }
89 }
90}
91
92#[derive(Debug, Clone, Encode, Decode)]
94pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
95
96impl From<SectorMetadata> for SectorMetadataChecksummed {
97 #[inline]
98 fn from(value: SectorMetadata) -> Self {
99 Self(Blake3Checksummed(value))
100 }
101}
102
103impl Deref for SectorMetadataChecksummed {
104 type Target = SectorMetadata;
105
106 #[inline]
107 fn deref(&self) -> &Self::Target {
108 &self.0.0
109 }
110}
111
112impl DerefMut for SectorMetadataChecksummed {
113 #[inline]
114 fn deref_mut(&mut self) -> &mut Self::Target {
115 &mut self.0.0
116 }
117}
118
119impl SectorMetadataChecksummed {
120 #[inline]
124 pub fn encoded_size() -> usize {
125 let default = SectorMetadataChecksummed::from(SectorMetadata {
126 sector_index: SectorIndex::ZERO,
127 pieces_in_sector: 0,
128 s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
131 history_size: HistorySize::from(SegmentIndex::ZERO),
132 });
133
134 default.encoded_size()
135 }
136}
137
138#[derive(Debug, Default, Clone, Encode, Decode)]
140pub(crate) struct RecordMetadata {
141 pub(crate) root: RecordRoot,
143 pub(crate) parity_chunks_root: RecordChunksRoot,
145 pub(crate) proof: RecordProof,
147 pub(crate) piece_checksum: Blake3Hash,
149}
150
151impl RecordMetadata {
152 pub(crate) const fn encoded_size() -> usize {
153 RecordProof::SIZE + RecordRoot::SIZE + RecordChunksRoot::SIZE + Blake3Hash::SIZE
154 }
155}
156
157#[derive(Debug, Clone)]
159pub(crate) struct RawSector {
160 pub(crate) records: Vec<Record>,
162 pub(crate) metadata: Vec<RecordMetadata>,
164}
165
166impl RawSector {
167 pub(crate) fn new(pieces_in_sector: u16) -> Self {
169 Self {
170 records: Record::new_zero_vec(usize::from(pieces_in_sector)),
171 metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
172 }
173 }
174}
175
176pub type SingleRecordBitArray = BitArray<[u8; Record::NUM_S_BUCKETS / u8::BITS as usize]>;
178
179#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
181pub enum SectorContentsMapFromBytesError {
182 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
184 InvalidBytesLength {
185 expected: usize,
187 actual: usize,
189 },
190 #[error("Checksum mismatch")]
192 ChecksumMismatch,
193}
194
195#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
197pub enum SectorContentsMapEncodeIntoError {
198 #[error("Invalid bytes length, expected {expected}, actual {actual}")]
200 InvalidBytesLength {
201 expected: usize,
203 actual: usize,
205 },
206}
207
208#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
210pub enum SectorContentsMapIterationError {
211 #[error("S-bucket provided {provided} is out of range, max {max}")]
213 SBucketOutOfRange {
214 provided: usize,
216 max: usize,
218 },
219}
220
221#[derive(Debug, Clone, Eq, PartialEq)]
227pub struct SectorContentsMap {
228 record_chunks_used: Vec<SingleRecordBitArray>,
231}
232
233impl SectorContentsMap {
234 pub fn new(pieces_in_sector: u16) -> Self {
237 Self {
238 record_chunks_used: vec![
239 SingleRecordBitArray::default();
240 usize::from(pieces_in_sector)
241 ],
242 }
243 }
244
245 pub fn from_bytes(
250 bytes: &[u8],
251 pieces_in_sector: u16,
252 ) -> Result<Self, SectorContentsMapFromBytesError> {
253 if bytes.len() != Self::encoded_size(pieces_in_sector) {
254 return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
255 expected: Self::encoded_size(pieces_in_sector),
256 actual: bytes.len(),
257 });
258 }
259
260 let (single_records_bit_arrays, expected_checksum) =
261 bytes.split_at(bytes.len() - Blake3Hash::SIZE);
262 let expected_checksum = unsafe {
264 Blake3Hash::from_bytes(expected_checksum).expect("No alignment requirements; qed")
265 };
266 let actual_checksum = Blake3Hash::from(blake3::hash(single_records_bit_arrays));
268 if &actual_checksum != expected_checksum {
269 debug!(
270 %actual_checksum,
271 %expected_checksum,
272 "Hash doesn't match, corrupted bytes"
273 );
274
275 return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
276 }
277
278 let mut record_chunks_used = vec![SingleRecordBitArray::default(); pieces_in_sector.into()];
279
280 for (record_chunks_used, bytes) in record_chunks_used.iter_mut().zip(
281 single_records_bit_arrays
282 .as_chunks::<{ size_of::<SingleRecordBitArray>() }>()
283 .0,
284 ) {
285 record_chunks_used.as_raw_mut_slice().copy_from_slice(bytes);
286 }
287
288 Ok(Self { record_chunks_used })
289 }
290
291 pub const fn encoded_size(pieces_in_sector: u16) -> usize {
294 size_of::<SingleRecordBitArray>() * pieces_in_sector as usize + Blake3Hash::SIZE
295 }
296
297 pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
299 if output.len() != Self::encoded_size(self.record_chunks_used.len() as u16) {
300 return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
301 expected: Self::encoded_size(self.record_chunks_used.len() as u16),
302 actual: output.len(),
303 });
304 }
305
306 let slice = self.record_chunks_used.as_slice();
307 let slice =
309 unsafe { slice::from_raw_parts(slice.as_ptr().cast::<u8>(), size_of_val(slice)) };
310
311 output[..slice.len()].copy_from_slice(slice);
313 output[slice.len()..].copy_from_slice(blake3::hash(slice).as_bytes());
314
315 Ok(())
316 }
317
318 pub fn iter_record_chunks_used(&self) -> &[SingleRecordBitArray] {
320 &self.record_chunks_used
321 }
322
323 pub fn iter_record_chunks_used_mut(&mut self) -> &mut [SingleRecordBitArray] {
325 &mut self.record_chunks_used
326 }
327
328 pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
330 let s_bucket_sizes = (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
332 .into_par_iter()
333 .map(SBucket::from)
334 .map(|s_bucket| {
335 self.iter_s_bucket_piece_offsets(s_bucket)
336 .expect("S-bucket guaranteed to be in range; qed")
337 .count() as u16
338 })
339 .collect::<Box<_>>();
340
341 assert_eq!(s_bucket_sizes.len(), Record::NUM_S_BUCKETS);
342
343 unsafe {
345 Box::from_raw(Box::into_raw(s_bucket_sizes).cast::<[u16; Record::NUM_S_BUCKETS]>())
346 }
347 }
348
349 pub fn iter_record_chunk_to_plot(
353 &self,
354 piece_offset: PieceOffset,
355 ) -> impl Iterator<Item = (SBucket, usize)> + '_ {
356 (SBucket::ZERO..=SBucket::MAX)
358 .flat_map(|s_bucket| {
360 self.iter_s_bucket_piece_offsets(s_bucket)
361 .expect("S-bucket guaranteed to be in range; qed")
362 .map(move |current_piece_offset| (s_bucket, current_piece_offset))
363 })
364 .enumerate()
367 .filter_map(move |(chunk_location, (s_bucket, current_piece_offset))| {
369 (current_piece_offset == piece_offset).then_some((s_bucket, chunk_location))
371 })
372 .take(Record::NUM_CHUNKS)
374 }
375
376 pub fn par_iter_record_chunk_to_plot(
383 &self,
384 piece_offset: PieceOffset,
385 ) -> impl IndexedParallelIterator<Item = Option<usize>> + '_ {
386 let piece_offset = usize::from(piece_offset);
387 (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
388 .into_par_iter()
389 .map(SBucket::from)
390 .map(move |s_bucket| {
392 if !self.record_chunks_used[piece_offset][usize::from(s_bucket)] {
393 return None;
394 }
395
396 let chunk_offset = self
399 .record_chunks_used
400 .iter()
401 .take(piece_offset)
402 .filter(move |record_chunks_used| record_chunks_used[usize::from(s_bucket)])
403 .count();
404
405 Some(chunk_offset)
406 })
407 }
408
409 pub fn iter_s_bucket_piece_offsets(
413 &self,
414 s_bucket: SBucket,
415 ) -> Result<impl Iterator<Item = PieceOffset> + '_, SectorContentsMapIterationError> {
416 let s_bucket = usize::from(s_bucket);
417
418 if s_bucket >= Record::NUM_S_BUCKETS {
419 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
420 provided: s_bucket,
421 max: Record::NUM_S_BUCKETS,
422 });
423 }
424
425 Ok((PieceOffset::ZERO..)
426 .zip(&self.record_chunks_used)
427 .filter_map(move |(piece_offset, record_chunks_used)| {
428 record_chunks_used[s_bucket].then_some(piece_offset)
429 }))
430 }
431
432 pub fn iter_s_bucket_used_record_chunks_used(
438 &self,
439 s_bucket: SBucket,
440 ) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
441 let s_bucket = usize::from(s_bucket);
442
443 if s_bucket >= Record::NUM_S_BUCKETS {
444 return Err(SectorContentsMapIterationError::SBucketOutOfRange {
445 provided: s_bucket,
446 max: Record::NUM_S_BUCKETS,
447 });
448 }
449
450 Ok(self
451 .record_chunks_used
452 .iter()
453 .map(move |record_chunks_used| record_chunks_used[s_bucket]))
454 }
455}