1#[cfg(test)]
4mod tests;
5
6use crate::hashes::Blake3Hash;
7use crate::nano_u256::NanoU256;
8use crate::pieces::{PieceIndex, PieceOffset, Record};
9use crate::pos::PosSeed;
10use crate::segments::{HistorySize, SuperSegmentRoot};
11use crate::solutions::ShardCommitmentHash;
12use ab_blake3::{single_block_hash, single_block_keyed_hash};
13use ab_io_type::trivial_type::TrivialType;
14use core::hash::Hash;
15use core::iter::Step;
16use core::num::{NonZeroU64, TryFromIntError};
17use core::simd::Simd;
18use derive_more::{Add, AddAssign, Deref, Display, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
19#[cfg(feature = "scale-codec")]
20use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23
24#[derive(
26 Debug,
27 Display,
28 Default,
29 Copy,
30 Clone,
31 Ord,
32 PartialOrd,
33 Eq,
34 PartialEq,
35 Hash,
36 Add,
37 AddAssign,
38 Sub,
39 SubAssign,
40 Mul,
41 MulAssign,
42 Div,
43 DivAssign,
44 TrivialType,
45)]
46#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
47#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
48#[repr(C)]
49pub struct SectorIndex(u16);
50
51impl Step for SectorIndex {
52 #[inline(always)]
53 fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
54 u16::steps_between(&start.0, &end.0)
55 }
56
57 #[inline(always)]
58 fn forward_checked(start: Self, count: usize) -> Option<Self> {
59 u16::forward_checked(start.0, count).map(Self)
60 }
61
62 #[inline(always)]
63 fn backward_checked(start: Self, count: usize) -> Option<Self> {
64 u16::backward_checked(start.0, count).map(Self)
65 }
66}
67
68impl const From<u16> for SectorIndex {
69 #[inline(always)]
70 fn from(value: u16) -> Self {
71 Self(value)
72 }
73}
74
75impl const From<SectorIndex> for u16 {
76 #[inline(always)]
77 fn from(value: SectorIndex) -> Self {
78 value.0
79 }
80}
81
82impl const From<SectorIndex> for u32 {
83 #[inline(always)]
84 fn from(original: SectorIndex) -> Self {
85 u32::from(original.0)
86 }
87}
88
89impl const From<SectorIndex> for u64 {
90 #[inline(always)]
91 fn from(original: SectorIndex) -> Self {
92 u64::from(original.0)
93 }
94}
95
96impl const From<SectorIndex> for usize {
97 #[inline(always)]
98 fn from(original: SectorIndex) -> Self {
99 usize::from(original.0)
100 }
101}
102
103impl SectorIndex {
104 pub const SIZE: usize = size_of::<u16>();
106 pub const ZERO: Self = Self(0);
108 pub const MAX: Self = Self(u16::MAX);
110
111 #[inline(always)]
113 pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
114 Self(u16::from_le_bytes(bytes))
115 }
116
117 #[inline(always)]
119 pub const fn to_bytes(self) -> [u8; Self::SIZE] {
120 self.0.to_le_bytes()
121 }
122}
123
124#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
126pub struct SectorSlotChallenge(Blake3Hash);
127
128impl SectorSlotChallenge {
129 #[inline]
131 pub fn s_bucket_audit_index(&self) -> SBucket {
132 const {
135 assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
136 }
137 SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
138 }
139}
140
141#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
143#[cfg_attr(feature = "scale-codec", derive(Encode, Decode))]
144#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
145pub struct SectorId(Blake3Hash);
146
147impl AsRef<[u8]> for SectorId {
148 #[inline]
149 fn as_ref(&self) -> &[u8] {
150 self.0.as_ref()
151 }
152}
153
154impl SectorId {
155 const SIZE: usize = Blake3Hash::SIZE;
157
158 #[inline]
160 pub fn new(
161 public_key_hash: &Blake3Hash,
162 shard_commitments_root: &ShardCommitmentHash,
163 sector_index: SectorIndex,
164 history_size: HistorySize,
165 ) -> Self {
166 let mut bytes_to_hash =
167 [0; SectorIndex::SIZE + HistorySize::SIZE as usize + ShardCommitmentHash::SIZE];
168 bytes_to_hash[..SectorIndex::SIZE].copy_from_slice(§or_index.to_bytes());
169 bytes_to_hash[SectorIndex::SIZE..][..HistorySize::SIZE as usize]
170 .copy_from_slice(&history_size.as_non_zero_u64().get().to_le_bytes());
171 bytes_to_hash[SectorIndex::SIZE + HistorySize::SIZE as usize..]
172 .copy_from_slice(shard_commitments_root.as_bytes());
173 Self(Blake3Hash::new(
175 single_block_keyed_hash(public_key_hash, &bytes_to_hash)
176 .expect("Less than a single block worth of bytes; qed"),
177 ))
178 }
179
180 pub fn derive_piece_index(
183 &self,
184 piece_offset: PieceOffset,
185 history_size: HistorySize,
186 max_pieces_in_sector: u16,
187 recent_segments: HistorySize,
188 recent_history_fraction: (HistorySize, HistorySize),
189 ) -> PieceIndex {
190 let recent_segments_in_pieces = recent_segments.in_pieces().get();
191 let min_history_size_in_pieces = recent_segments_in_pieces
194 * recent_history_fraction.1.in_pieces().get()
195 / recent_history_fraction.0.in_pieces().get();
196 let input_hash = {
197 let piece_offset_bytes = piece_offset.to_bytes();
198 let mut key = [0; 32];
199 key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
200 NanoU256::from_le_bytes(
202 single_block_keyed_hash(&key, self.as_ref())
203 .expect("Less than a single block worth of bytes; qed"),
204 )
205 };
206 let history_size_in_pieces = history_size.in_pieces().get();
207 let num_interleaved_pieces = 1.max(
208 u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
209 / recent_history_fraction.1.in_pieces().get()
210 * 2,
211 );
212
213 let piece_index = if history_size_in_pieces > min_history_size_in_pieces
214 && u64::from(piece_offset) < num_interleaved_pieces
215 && u16::from(piece_offset) % 2 == 1
216 {
217 (input_hash % recent_segments_in_pieces)
220 + (history_size_in_pieces - recent_segments_in_pieces)
221 } else {
222 input_hash % history_size_in_pieces
223 };
224
225 PieceIndex::from(piece_index)
226 }
227
228 pub fn derive_sector_slot_challenge(
230 &self,
231 global_challenge: &Blake3Hash,
232 ) -> SectorSlotChallenge {
233 let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
234 SectorSlotChallenge(sector_slot_challenge.to_array().into())
235 }
236
237 pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
239 let mut bytes_to_hash = [0; Self::SIZE + PieceOffset::SIZE];
240 bytes_to_hash[..Self::SIZE].copy_from_slice(self.as_ref());
241 bytes_to_hash[Self::SIZE..].copy_from_slice(&piece_offset.to_bytes());
242 let evaluation_seed = single_block_hash(&bytes_to_hash)
243 .expect("Less than a single block worth of bytes; qed");
244
245 PosSeed::from(evaluation_seed)
246 }
247
248 pub fn derive_expiration_history_size(
252 &self,
253 history_size: HistorySize,
254 sector_expiration_check_super_segment_root: &SuperSegmentRoot,
255 min_sector_lifetime: HistorySize,
256 ) -> Option<HistorySize> {
257 let sector_expiration_check_history_size = history_size
258 .sector_expiration_check(min_sector_lifetime)?
259 .as_non_zero_u64();
260
261 let input_hash = NanoU256::from_le_bytes(
262 single_block_hash(
263 [*self.0, **sector_expiration_check_super_segment_root].as_flattened(),
264 )
265 .expect("Less than a single block worth of bytes; qed"),
266 );
267
268 let last_possible_expiration = min_sector_lifetime
269 .as_non_zero_u64()
270 .checked_add(history_size.as_non_zero_u64().get().checked_mul(4u64)?)?;
271 let expires_in = input_hash
272 % last_possible_expiration
273 .get()
274 .checked_sub(sector_expiration_check_history_size.get())?;
275
276 let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
277 let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
278 "History size is not zero, so result is not zero even if expires immediately; qed",
279 );
280 Some(HistorySize::new(expiration_history_size))
281 }
282}
283
284#[derive(
286 Debug,
287 Display,
288 Default,
289 Copy,
290 Clone,
291 Ord,
292 PartialOrd,
293 Eq,
294 PartialEq,
295 Hash,
296 Add,
297 AddAssign,
298 Sub,
299 SubAssign,
300 Mul,
301 MulAssign,
302 Div,
303 DivAssign,
304)]
305#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
306#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
307#[repr(C)]
308pub struct SBucket(u16);
309
310impl Step for SBucket {
311 #[inline]
312 fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
313 u16::steps_between(&start.0, &end.0)
314 }
315
316 #[inline]
317 fn forward_checked(start: Self, count: usize) -> Option<Self> {
318 u16::forward_checked(start.0, count).map(Self)
319 }
320
321 #[inline]
322 fn backward_checked(start: Self, count: usize) -> Option<Self> {
323 u16::backward_checked(start.0, count).map(Self)
324 }
325}
326
327impl const From<u16> for SBucket {
328 #[inline(always)]
329 fn from(value: u16) -> Self {
330 Self(value)
331 }
332}
333
334impl const From<SBucket> for u16 {
335 #[inline(always)]
336 fn from(value: SBucket) -> Self {
337 value.0
338 }
339}
340
341impl TryFrom<usize> for SBucket {
342 type Error = TryFromIntError;
343
344 #[inline]
345 fn try_from(value: usize) -> Result<Self, Self::Error> {
346 Ok(Self(u16::try_from(value)?))
347 }
348}
349
350impl From<SBucket> for u32 {
351 #[inline]
352 fn from(original: SBucket) -> Self {
353 u32::from(original.0)
354 }
355}
356
357impl From<SBucket> for usize {
358 #[inline]
359 fn from(original: SBucket) -> Self {
360 usize::from(original.0)
361 }
362}
363
364impl SBucket {
365 pub const ZERO: SBucket = SBucket(0);
367 pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
369}