1mod nano_u256;
4#[cfg(test)]
5mod tests;
6
7use crate::hashes::Blake3Hash;
8use crate::pieces::{PieceIndex, PieceOffset, Record};
9use crate::pos::PosSeed;
10use crate::sectors::nano_u256::NanoU256;
11use crate::segments::{HistorySize, SegmentRoot};
12use ab_blake3::{single_block_hash, single_block_keyed_hash};
13use ab_io_type::trivial_type::TrivialType;
14use core::hash::Hash;
15use core::iter::Step;
16use core::num::{NonZeroU64, TryFromIntError};
17use core::simd::Simd;
18use derive_more::{
19 Add, AddAssign, Deref, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub, SubAssign,
20};
21#[cfg(feature = "scale-codec")]
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23#[cfg(feature = "scale-codec")]
24use scale_info::TypeInfo;
25#[cfg(feature = "serde")]
26use serde::{Deserialize, Serialize};
27
28#[derive(
30 Debug,
31 Display,
32 Default,
33 Copy,
34 Clone,
35 Ord,
36 PartialOrd,
37 Eq,
38 PartialEq,
39 Hash,
40 From,
41 Into,
42 Add,
43 AddAssign,
44 Sub,
45 SubAssign,
46 Mul,
47 MulAssign,
48 Div,
49 DivAssign,
50 TrivialType,
51)]
52#[cfg_attr(
53 feature = "scale-codec",
54 derive(Encode, Decode, TypeInfo, MaxEncodedLen)
55)]
56#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
57#[repr(C)]
58pub struct SectorIndex(u16);
59
60impl Step for SectorIndex {
61 #[inline(always)]
62 fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
63 u16::steps_between(&start.0, &end.0)
64 }
65
66 #[inline(always)]
67 fn forward_checked(start: Self, count: usize) -> Option<Self> {
68 u16::forward_checked(start.0, count).map(Self)
69 }
70
71 #[inline(always)]
72 fn backward_checked(start: Self, count: usize) -> Option<Self> {
73 u16::backward_checked(start.0, count).map(Self)
74 }
75}
76
77impl From<SectorIndex> for u32 {
78 #[inline(always)]
79 fn from(original: SectorIndex) -> Self {
80 u32::from(original.0)
81 }
82}
83
84impl From<SectorIndex> for u64 {
85 #[inline(always)]
86 fn from(original: SectorIndex) -> Self {
87 u64::from(original.0)
88 }
89}
90
91impl From<SectorIndex> for usize {
92 #[inline(always)]
93 fn from(original: SectorIndex) -> Self {
94 usize::from(original.0)
95 }
96}
97
98impl SectorIndex {
99 pub const SIZE: usize = size_of::<u16>();
101 pub const ZERO: Self = Self(0);
103 pub const MAX: Self = Self(u16::MAX);
105
106 #[inline(always)]
108 pub const fn new(n: u16) -> Self {
109 Self(n)
110 }
111
112 #[inline(always)]
114 pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
115 Self(u16::from_le_bytes(bytes))
116 }
117
118 #[inline(always)]
120 pub const fn to_bytes(self) -> [u8; Self::SIZE] {
121 self.0.to_le_bytes()
122 }
123}
124
125#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
127pub struct SectorSlotChallenge(Blake3Hash);
128
129impl SectorSlotChallenge {
130 #[inline]
132 pub fn s_bucket_audit_index(&self) -> SBucket {
133 const _: () = const {
136 assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
137 };
138 SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
139 }
140}
141
142#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
144#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, TypeInfo))]
145#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
146pub struct SectorId(Blake3Hash);
147
148impl AsRef<[u8]> for SectorId {
149 #[inline]
150 fn as_ref(&self) -> &[u8] {
151 self.0.as_ref()
152 }
153}
154
155impl SectorId {
156 const SIZE: usize = Blake3Hash::SIZE;
158
159 #[inline]
161 pub fn new(
162 public_key_hash: &Blake3Hash,
163 sector_index: SectorIndex,
164 history_size: HistorySize,
165 ) -> Self {
166 let mut bytes_to_hash = [0; SectorIndex::SIZE + HistorySize::SIZE as usize];
167 bytes_to_hash[..SectorIndex::SIZE].copy_from_slice(§or_index.to_bytes());
168 bytes_to_hash[SectorIndex::SIZE..]
169 .copy_from_slice(&history_size.as_non_zero_u64().get().to_le_bytes());
170 Self(Blake3Hash::new(
172 single_block_keyed_hash(public_key_hash, &bytes_to_hash)
173 .expect("Less than a single block worth of bytes; qed"),
174 ))
175 }
176
177 pub fn derive_piece_index(
180 &self,
181 piece_offset: PieceOffset,
182 history_size: HistorySize,
183 max_pieces_in_sector: u16,
184 recent_segments: HistorySize,
185 recent_history_fraction: (HistorySize, HistorySize),
186 ) -> PieceIndex {
187 let recent_segments_in_pieces = recent_segments.in_pieces().get();
188 let min_history_size_in_pieces = recent_segments_in_pieces
191 * recent_history_fraction.1.in_pieces().get()
192 / recent_history_fraction.0.in_pieces().get();
193 let input_hash = {
194 let piece_offset_bytes = piece_offset.to_bytes();
195 let mut key = [0; 32];
196 key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
197 NanoU256::from_le_bytes(
199 single_block_keyed_hash(&key, self.as_ref())
200 .expect("Less than a single block worth of bytes; qed"),
201 )
202 };
203 let history_size_in_pieces = history_size.in_pieces().get();
204 let num_interleaved_pieces = 1.max(
205 u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
206 / recent_history_fraction.1.in_pieces().get()
207 * 2,
208 );
209
210 let piece_index = if history_size_in_pieces > min_history_size_in_pieces
211 && u64::from(piece_offset) < num_interleaved_pieces
212 && u16::from(piece_offset) % 2 == 1
213 {
214 (input_hash % recent_segments_in_pieces)
217 + (history_size_in_pieces - recent_segments_in_pieces)
218 } else {
219 input_hash % history_size_in_pieces
220 };
221
222 PieceIndex::from(piece_index)
223 }
224
225 pub fn derive_sector_slot_challenge(
227 &self,
228 global_challenge: &Blake3Hash,
229 ) -> SectorSlotChallenge {
230 let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
231 SectorSlotChallenge(sector_slot_challenge.to_array().into())
232 }
233
234 pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
236 let mut bytes_to_hash = [0; Self::SIZE + PieceOffset::SIZE];
237 bytes_to_hash[..Self::SIZE].copy_from_slice(self.as_ref());
238 bytes_to_hash[Self::SIZE..].copy_from_slice(&piece_offset.to_bytes());
239 let evaluation_seed = single_block_hash(&bytes_to_hash)
240 .expect("Less than a single block worth of bytes; qed");
241
242 PosSeed::from(evaluation_seed)
243 }
244
245 pub fn derive_expiration_history_size(
249 &self,
250 history_size: HistorySize,
251 sector_expiration_check_segment_root: &SegmentRoot,
252 min_sector_lifetime: HistorySize,
253 ) -> Option<HistorySize> {
254 let sector_expiration_check_history_size = history_size
255 .sector_expiration_check(min_sector_lifetime)?
256 .as_non_zero_u64();
257
258 let input_hash = NanoU256::from_le_bytes(
259 single_block_hash([*self.0, **sector_expiration_check_segment_root].as_flattened())
260 .expect("Less than a single block worth of bytes; qed"),
261 );
262
263 let last_possible_expiration = min_sector_lifetime
264 .as_non_zero_u64()
265 .checked_add(history_size.as_non_zero_u64().get().checked_mul(4u64)?)?;
266 let expires_in = input_hash
267 % last_possible_expiration
268 .get()
269 .checked_sub(sector_expiration_check_history_size.get())?;
270
271 let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
272 let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
273 "History size is not zero, so result is not zero even if expires immediately; qed",
274 );
275 Some(HistorySize::new(expiration_history_size))
276 }
277}
278
279#[derive(
281 Debug,
282 Display,
283 Default,
284 Copy,
285 Clone,
286 Ord,
287 PartialOrd,
288 Eq,
289 PartialEq,
290 Hash,
291 From,
292 Into,
293 Add,
294 AddAssign,
295 Sub,
296 SubAssign,
297 Mul,
298 MulAssign,
299 Div,
300 DivAssign,
301)]
302#[cfg_attr(
303 feature = "scale-codec",
304 derive(Encode, Decode, TypeInfo, MaxEncodedLen)
305)]
306#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
307#[repr(C)]
308pub struct SBucket(u16);
309
310impl Step for SBucket {
311 #[inline]
312 fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
313 u16::steps_between(&start.0, &end.0)
314 }
315
316 #[inline]
317 fn forward_checked(start: Self, count: usize) -> Option<Self> {
318 u16::forward_checked(start.0, count).map(Self)
319 }
320
321 #[inline]
322 fn backward_checked(start: Self, count: usize) -> Option<Self> {
323 u16::backward_checked(start.0, count).map(Self)
324 }
325}
326
327impl TryFrom<usize> for SBucket {
328 type Error = TryFromIntError;
329
330 #[inline]
331 fn try_from(value: usize) -> Result<Self, Self::Error> {
332 Ok(Self(u16::try_from(value)?))
333 }
334}
335
336impl From<SBucket> for u32 {
337 #[inline]
338 fn from(original: SBucket) -> Self {
339 u32::from(original.0)
340 }
341}
342
343impl From<SBucket> for usize {
344 #[inline]
345 fn from(original: SBucket) -> Self {
346 usize::from(original.0)
347 }
348}
349
350impl SBucket {
351 pub const ZERO: SBucket = SBucket(0);
353 pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
355}