ab_core_primitives/
sectors.rs

1//! Sectors-related data structures.
2
3mod nano_u256;
4#[cfg(test)]
5mod tests;
6
7use crate::hashes::{
8    Blake3Hash, blake3_hash_list, blake3_hash_list_with_key, blake3_hash_with_key,
9};
10use crate::pieces::{PieceIndex, PieceOffset, Record};
11use crate::pos::PosSeed;
12use crate::sectors::nano_u256::NanoU256;
13use crate::segments::{HistorySize, SegmentRoot};
14use core::hash::Hash;
15use core::iter::Step;
16use core::num::{NonZeroU64, TryFromIntError};
17use core::simd::Simd;
18use derive_more::{
19    Add, AddAssign, Deref, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub, SubAssign,
20};
21#[cfg(feature = "scale-codec")]
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23#[cfg(feature = "scale-codec")]
24use scale_info::TypeInfo;
25#[cfg(feature = "serde")]
26use serde::{Deserialize, Serialize};
27
28/// Sector index in consensus
29#[derive(
30    Debug,
31    Display,
32    Default,
33    Copy,
34    Clone,
35    Ord,
36    PartialOrd,
37    Eq,
38    PartialEq,
39    Hash,
40    From,
41    Into,
42    Add,
43    AddAssign,
44    Sub,
45    SubAssign,
46    Mul,
47    MulAssign,
48    Div,
49    DivAssign,
50)]
51#[cfg_attr(
52    feature = "scale-codec",
53    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
54)]
55#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
56#[repr(transparent)]
57pub struct SectorIndex(u16);
58
59impl Step for SectorIndex {
60    #[inline(always)]
61    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
62        u16::steps_between(&start.0, &end.0)
63    }
64
65    #[inline(always)]
66    fn forward_checked(start: Self, count: usize) -> Option<Self> {
67        u16::forward_checked(start.0, count).map(Self)
68    }
69
70    #[inline(always)]
71    fn backward_checked(start: Self, count: usize) -> Option<Self> {
72        u16::backward_checked(start.0, count).map(Self)
73    }
74}
75
76impl From<SectorIndex> for u32 {
77    #[inline(always)]
78    fn from(original: SectorIndex) -> Self {
79        u32::from(original.0)
80    }
81}
82
83impl From<SectorIndex> for u64 {
84    #[inline(always)]
85    fn from(original: SectorIndex) -> Self {
86        u64::from(original.0)
87    }
88}
89
90impl From<SectorIndex> for usize {
91    #[inline(always)]
92    fn from(original: SectorIndex) -> Self {
93        usize::from(original.0)
94    }
95}
96
97impl SectorIndex {
98    /// Size in bytes
99    pub const SIZE: usize = size_of::<u16>();
100    /// Sector index 0
101    pub const ZERO: Self = Self(0);
102    /// Max sector index
103    pub const MAX: Self = Self(u16::MAX);
104
105    /// Create new instance
106    #[inline(always)]
107    pub const fn new(n: u16) -> Self {
108        Self(n)
109    }
110
111    /// Create sector index from bytes.
112    #[inline(always)]
113    pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
114        Self(u16::from_le_bytes(bytes))
115    }
116
117    /// Convert sector index to bytes.
118    #[inline(always)]
119    pub const fn to_bytes(self) -> [u8; Self::SIZE] {
120        self.0.to_le_bytes()
121    }
122}
123
124/// Challenge used for a particular sector for particular slot
125#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
126pub struct SectorSlotChallenge(Blake3Hash);
127
128impl SectorSlotChallenge {
129    /// Index of s-bucket within sector to be audited
130    #[inline]
131    pub fn s_bucket_audit_index(&self) -> SBucket {
132        // As long as number of s-buckets is 2^16, we can pick first two bytes instead of actually
133        // calculating `U256::from_le_bytes(self.0) % Record::NUM_S_BUCKETS)`
134        const _: () = const {
135            assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
136        };
137        SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
138    }
139}
140
141/// Data structure representing sector ID in farmer's plot
142#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
143#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, TypeInfo))]
144#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
145pub struct SectorId(Blake3Hash);
146
147impl AsRef<[u8]> for SectorId {
148    #[inline]
149    fn as_ref(&self) -> &[u8] {
150        self.0.as_ref()
151    }
152}
153
154impl SectorId {
155    /// Create a new sector ID by deriving it from public key and sector index
156    #[inline]
157    pub fn new(
158        public_key_hash: &Blake3Hash,
159        sector_index: SectorIndex,
160        history_size: HistorySize,
161    ) -> Self {
162        Self(blake3_hash_list_with_key(
163            public_key_hash,
164            &[&sector_index.to_bytes(), &history_size.get().to_le_bytes()],
165        ))
166    }
167
168    /// Derive piece index that should be stored in sector at `piece_offset` for specified size of
169    /// blockchain history
170    pub fn derive_piece_index(
171        &self,
172        piece_offset: PieceOffset,
173        history_size: HistorySize,
174        max_pieces_in_sector: u16,
175        recent_segments: HistorySize,
176        recent_history_fraction: (HistorySize, HistorySize),
177    ) -> PieceIndex {
178        let recent_segments_in_pieces = recent_segments.in_pieces().get();
179        // Recent history must be at most `recent_history_fraction` of all history to use separate
180        // policy for recent pieces
181        let min_history_size_in_pieces = recent_segments_in_pieces
182            * recent_history_fraction.1.in_pieces().get()
183            / recent_history_fraction.0.in_pieces().get();
184        let input_hash = {
185            let piece_offset_bytes = piece_offset.to_bytes();
186            let mut key = [0; 32];
187            key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
188            NanoU256::from_le_bytes(*blake3_hash_with_key(&key, self.as_ref()))
189        };
190        let history_size_in_pieces = history_size.in_pieces().get();
191        let num_interleaved_pieces = 1.max(
192            u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
193                / recent_history_fraction.1.in_pieces().get()
194                * 2,
195        );
196
197        let piece_index = if history_size_in_pieces > min_history_size_in_pieces
198            && u64::from(piece_offset) < num_interleaved_pieces
199            && u16::from(piece_offset) % 2 == 1
200        {
201            // For odd piece offsets at the beginning of the sector pick pieces at random from
202            // recent history only
203            (input_hash % recent_segments_in_pieces)
204                + (history_size_in_pieces - recent_segments_in_pieces)
205        } else {
206            input_hash % history_size_in_pieces
207        };
208
209        PieceIndex::from(piece_index)
210    }
211
212    /// Derive sector slot challenge for this sector from provided global challenge
213    pub fn derive_sector_slot_challenge(
214        &self,
215        global_challenge: &Blake3Hash,
216    ) -> SectorSlotChallenge {
217        let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
218        SectorSlotChallenge(sector_slot_challenge.to_array().into())
219    }
220
221    /// Derive evaluation seed
222    pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
223        let evaluation_seed = blake3_hash_list(&[self.as_ref(), &piece_offset.to_bytes()]);
224
225        PosSeed::from(*evaluation_seed)
226    }
227
228    /// Derive history size when sector created at `history_size` expires.
229    ///
230    /// Returns `None` on overflow.
231    pub fn derive_expiration_history_size(
232        &self,
233        history_size: HistorySize,
234        sector_expiration_check_segment_root: &SegmentRoot,
235        min_sector_lifetime: HistorySize,
236    ) -> Option<HistorySize> {
237        let sector_expiration_check_history_size =
238            history_size.sector_expiration_check(min_sector_lifetime)?;
239
240        let input_hash = NanoU256::from_le_bytes(*blake3_hash_list(&[
241            self.as_ref(),
242            sector_expiration_check_segment_root.as_ref(),
243        ]));
244
245        let last_possible_expiration =
246            min_sector_lifetime.checked_add(history_size.get().checked_mul(4u64)?)?;
247        let expires_in = input_hash
248            % last_possible_expiration
249                .get()
250                .checked_sub(sector_expiration_check_history_size.get())?;
251
252        let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
253        let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
254            "History size is not zero, so result is not zero even if expires immediately; qed",
255        );
256        Some(HistorySize::from(expiration_history_size))
257    }
258}
259
260/// S-bucket used in consensus
261#[derive(
262    Debug,
263    Display,
264    Default,
265    Copy,
266    Clone,
267    Ord,
268    PartialOrd,
269    Eq,
270    PartialEq,
271    Hash,
272    From,
273    Into,
274    Add,
275    AddAssign,
276    Sub,
277    SubAssign,
278    Mul,
279    MulAssign,
280    Div,
281    DivAssign,
282)]
283#[cfg_attr(
284    feature = "scale-codec",
285    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
286)]
287#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
288#[repr(transparent)]
289pub struct SBucket(u16);
290
291impl Step for SBucket {
292    #[inline]
293    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
294        u16::steps_between(&start.0, &end.0)
295    }
296
297    #[inline]
298    fn forward_checked(start: Self, count: usize) -> Option<Self> {
299        u16::forward_checked(start.0, count).map(Self)
300    }
301
302    #[inline]
303    fn backward_checked(start: Self, count: usize) -> Option<Self> {
304        u16::backward_checked(start.0, count).map(Self)
305    }
306}
307
308impl TryFrom<usize> for SBucket {
309    type Error = TryFromIntError;
310
311    #[inline]
312    fn try_from(value: usize) -> Result<Self, Self::Error> {
313        Ok(Self(u16::try_from(value)?))
314    }
315}
316
317impl From<SBucket> for u32 {
318    #[inline]
319    fn from(original: SBucket) -> Self {
320        u32::from(original.0)
321    }
322}
323
324impl From<SBucket> for usize {
325    #[inline]
326    fn from(original: SBucket) -> Self {
327        usize::from(original.0)
328    }
329}
330
331impl SBucket {
332    /// S-bucket 0.
333    pub const ZERO: SBucket = SBucket(0);
334    /// Max s-bucket index
335    pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
336}