ab_core_primitives/
sectors.rs

1//! Sectors-related data structures.
2
3mod nano_u256;
4#[cfg(test)]
5mod tests;
6
7use crate::hashes::Blake3Hash;
8use crate::pieces::{PieceIndex, PieceOffset, Record};
9use crate::pos::PosSeed;
10use crate::sectors::nano_u256::NanoU256;
11use crate::segments::{HistorySize, SegmentRoot};
12use ab_io_type::trivial_type::TrivialType;
13use core::hash::Hash;
14use core::iter::Step;
15use core::num::{NonZeroU64, TryFromIntError};
16use core::simd::Simd;
17use derive_more::{
18    Add, AddAssign, Deref, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub, SubAssign,
19};
20#[cfg(feature = "scale-codec")]
21use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
22#[cfg(feature = "scale-codec")]
23use scale_info::TypeInfo;
24#[cfg(feature = "serde")]
25use serde::{Deserialize, Serialize};
26
27/// Sector index in consensus
28#[derive(
29    Debug,
30    Display,
31    Default,
32    Copy,
33    Clone,
34    Ord,
35    PartialOrd,
36    Eq,
37    PartialEq,
38    Hash,
39    From,
40    Into,
41    Add,
42    AddAssign,
43    Sub,
44    SubAssign,
45    Mul,
46    MulAssign,
47    Div,
48    DivAssign,
49    TrivialType,
50)]
51#[cfg_attr(
52    feature = "scale-codec",
53    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
54)]
55#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
56#[repr(C)]
57pub struct SectorIndex(u16);
58
59impl Step for SectorIndex {
60    #[inline(always)]
61    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
62        u16::steps_between(&start.0, &end.0)
63    }
64
65    #[inline(always)]
66    fn forward_checked(start: Self, count: usize) -> Option<Self> {
67        u16::forward_checked(start.0, count).map(Self)
68    }
69
70    #[inline(always)]
71    fn backward_checked(start: Self, count: usize) -> Option<Self> {
72        u16::backward_checked(start.0, count).map(Self)
73    }
74}
75
76impl From<SectorIndex> for u32 {
77    #[inline(always)]
78    fn from(original: SectorIndex) -> Self {
79        u32::from(original.0)
80    }
81}
82
83impl From<SectorIndex> for u64 {
84    #[inline(always)]
85    fn from(original: SectorIndex) -> Self {
86        u64::from(original.0)
87    }
88}
89
90impl From<SectorIndex> for usize {
91    #[inline(always)]
92    fn from(original: SectorIndex) -> Self {
93        usize::from(original.0)
94    }
95}
96
97impl SectorIndex {
98    /// Size in bytes
99    pub const SIZE: usize = size_of::<u16>();
100    /// Sector index 0
101    pub const ZERO: Self = Self(0);
102    /// Max sector index
103    pub const MAX: Self = Self(u16::MAX);
104
105    /// Create new instance
106    #[inline(always)]
107    pub const fn new(n: u16) -> Self {
108        Self(n)
109    }
110
111    /// Create sector index from bytes.
112    #[inline(always)]
113    pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
114        Self(u16::from_le_bytes(bytes))
115    }
116
117    /// Convert sector index to bytes.
118    #[inline(always)]
119    pub const fn to_bytes(self) -> [u8; Self::SIZE] {
120        self.0.to_le_bytes()
121    }
122}
123
124/// Challenge used for a particular sector for particular slot
125#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
126pub struct SectorSlotChallenge(Blake3Hash);
127
128impl SectorSlotChallenge {
129    /// Index of s-bucket within sector to be audited
130    #[inline]
131    pub fn s_bucket_audit_index(&self) -> SBucket {
132        // As long as number of s-buckets is 2^16, we can pick first two bytes instead of actually
133        // calculating `U256::from_le_bytes(self.0) % Record::NUM_S_BUCKETS)`
134        const _: () = const {
135            assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
136        };
137        SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
138    }
139}
140
141/// Data structure representing sector ID in farmer's plot
142#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
143#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, TypeInfo))]
144#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
145pub struct SectorId(Blake3Hash);
146
147impl AsRef<[u8]> for SectorId {
148    #[inline]
149    fn as_ref(&self) -> &[u8] {
150        self.0.as_ref()
151    }
152}
153
154impl SectorId {
155    /// Size in bytes
156    const SIZE: usize = Blake3Hash::SIZE;
157
158    /// Create a new sector ID by deriving it from public key and sector index
159    #[inline]
160    pub fn new(
161        public_key_hash: &Blake3Hash,
162        sector_index: SectorIndex,
163        history_size: HistorySize,
164    ) -> Self {
165        let mut bytes_to_hash = [0; SectorIndex::SIZE + HistorySize::SIZE as usize];
166        bytes_to_hash[..SectorIndex::SIZE].copy_from_slice(&sector_index.to_bytes());
167        bytes_to_hash[SectorIndex::SIZE..]
168            .copy_from_slice(&history_size.as_non_zero_u64().get().to_le_bytes());
169        // TODO: Is keyed hash really needed here?
170        Self(blake3::keyed_hash(public_key_hash, &bytes_to_hash).into())
171    }
172
173    /// Derive piece index that should be stored in sector at `piece_offset` for specified size of
174    /// blockchain history
175    pub fn derive_piece_index(
176        &self,
177        piece_offset: PieceOffset,
178        history_size: HistorySize,
179        max_pieces_in_sector: u16,
180        recent_segments: HistorySize,
181        recent_history_fraction: (HistorySize, HistorySize),
182    ) -> PieceIndex {
183        let recent_segments_in_pieces = recent_segments.in_pieces().get();
184        // Recent history must be at most `recent_history_fraction` of all history to use separate
185        // policy for recent pieces
186        let min_history_size_in_pieces = recent_segments_in_pieces
187            * recent_history_fraction.1.in_pieces().get()
188            / recent_history_fraction.0.in_pieces().get();
189        let input_hash = {
190            let piece_offset_bytes = piece_offset.to_bytes();
191            let mut key = [0; 32];
192            key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
193            // TODO: Is keyed hash really needed here?
194            NanoU256::from_le_bytes(*blake3::keyed_hash(&key, self.as_ref()).as_bytes())
195        };
196        let history_size_in_pieces = history_size.in_pieces().get();
197        let num_interleaved_pieces = 1.max(
198            u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
199                / recent_history_fraction.1.in_pieces().get()
200                * 2,
201        );
202
203        let piece_index = if history_size_in_pieces > min_history_size_in_pieces
204            && u64::from(piece_offset) < num_interleaved_pieces
205            && u16::from(piece_offset) % 2 == 1
206        {
207            // For odd piece offsets at the beginning of the sector pick pieces at random from
208            // recent history only
209            (input_hash % recent_segments_in_pieces)
210                + (history_size_in_pieces - recent_segments_in_pieces)
211        } else {
212            input_hash % history_size_in_pieces
213        };
214
215        PieceIndex::from(piece_index)
216    }
217
218    /// Derive sector slot challenge for this sector from provided global challenge
219    pub fn derive_sector_slot_challenge(
220        &self,
221        global_challenge: &Blake3Hash,
222    ) -> SectorSlotChallenge {
223        let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
224        SectorSlotChallenge(sector_slot_challenge.to_array().into())
225    }
226
227    /// Derive evaluation seed
228    pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
229        let mut bytes_to_hash = [0; Self::SIZE + PieceOffset::SIZE];
230        bytes_to_hash[..Self::SIZE].copy_from_slice(self.as_ref());
231        bytes_to_hash[Self::SIZE..].copy_from_slice(&piece_offset.to_bytes());
232        let evaluation_seed = blake3::hash(&bytes_to_hash);
233
234        PosSeed::from(*evaluation_seed.as_bytes())
235    }
236
237    /// Derive history size when sector created at `history_size` expires.
238    ///
239    /// Returns `None` on overflow.
240    pub fn derive_expiration_history_size(
241        &self,
242        history_size: HistorySize,
243        sector_expiration_check_segment_root: &SegmentRoot,
244        min_sector_lifetime: HistorySize,
245    ) -> Option<HistorySize> {
246        let sector_expiration_check_history_size = history_size
247            .sector_expiration_check(min_sector_lifetime)?
248            .as_non_zero_u64();
249
250        let input_hash = NanoU256::from_le_bytes(
251            *blake3::hash([*self.0, **sector_expiration_check_segment_root].as_flattened())
252                .as_bytes(),
253        );
254
255        let last_possible_expiration = min_sector_lifetime
256            .as_non_zero_u64()
257            .checked_add(history_size.as_non_zero_u64().get().checked_mul(4u64)?)?;
258        let expires_in = input_hash
259            % last_possible_expiration
260                .get()
261                .checked_sub(sector_expiration_check_history_size.get())?;
262
263        let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
264        let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
265            "History size is not zero, so result is not zero even if expires immediately; qed",
266        );
267        Some(HistorySize::new(expiration_history_size))
268    }
269}
270
271/// S-bucket used in consensus
272#[derive(
273    Debug,
274    Display,
275    Default,
276    Copy,
277    Clone,
278    Ord,
279    PartialOrd,
280    Eq,
281    PartialEq,
282    Hash,
283    From,
284    Into,
285    Add,
286    AddAssign,
287    Sub,
288    SubAssign,
289    Mul,
290    MulAssign,
291    Div,
292    DivAssign,
293)]
294#[cfg_attr(
295    feature = "scale-codec",
296    derive(Encode, Decode, TypeInfo, MaxEncodedLen)
297)]
298#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
299#[repr(C)]
300pub struct SBucket(u16);
301
302impl Step for SBucket {
303    #[inline]
304    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
305        u16::steps_between(&start.0, &end.0)
306    }
307
308    #[inline]
309    fn forward_checked(start: Self, count: usize) -> Option<Self> {
310        u16::forward_checked(start.0, count).map(Self)
311    }
312
313    #[inline]
314    fn backward_checked(start: Self, count: usize) -> Option<Self> {
315        u16::backward_checked(start.0, count).map(Self)
316    }
317}
318
319impl TryFrom<usize> for SBucket {
320    type Error = TryFromIntError;
321
322    #[inline]
323    fn try_from(value: usize) -> Result<Self, Self::Error> {
324        Ok(Self(u16::try_from(value)?))
325    }
326}
327
328impl From<SBucket> for u32 {
329    #[inline]
330    fn from(original: SBucket) -> Self {
331        u32::from(original.0)
332    }
333}
334
335impl From<SBucket> for usize {
336    #[inline]
337    fn from(original: SBucket) -> Self {
338        usize::from(original.0)
339    }
340}
341
342impl SBucket {
343    /// S-bucket 0.
344    pub const ZERO: SBucket = SBucket(0);
345    /// Max s-bucket index
346    pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
347}