Skip to main content

ab_core_primitives/
sectors.rs

1//! Sectors-related data structures.
2
3#[cfg(test)]
4mod tests;
5
6use crate::hashes::Blake3Hash;
7use crate::nano_u256::NanoU256;
8use crate::pieces::{PieceIndex, PieceOffset, Record};
9use crate::pos::PosSeed;
10use crate::segments::{HistorySize, SegmentRoot};
11use crate::solutions::ShardCommitmentHash;
12use ab_blake3::{single_block_hash, single_block_keyed_hash};
13use ab_io_type::trivial_type::TrivialType;
14use core::hash::Hash;
15use core::iter::Step;
16use core::num::{NonZeroU64, TryFromIntError};
17use core::simd::Simd;
18use derive_more::{Add, AddAssign, Deref, Display, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
19#[cfg(feature = "scale-codec")]
20use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
21#[cfg(feature = "serde")]
22use serde::{Deserialize, Serialize};
23
24/// Sector index in consensus
25#[derive(
26    Debug,
27    Display,
28    Default,
29    Copy,
30    Clone,
31    Ord,
32    PartialOrd,
33    Eq,
34    PartialEq,
35    Hash,
36    Add,
37    AddAssign,
38    Sub,
39    SubAssign,
40    Mul,
41    MulAssign,
42    Div,
43    DivAssign,
44    TrivialType,
45)]
46#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
47#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
48#[repr(C)]
49pub struct SectorIndex(u16);
50
51impl Step for SectorIndex {
52    #[inline(always)]
53    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
54        u16::steps_between(&start.0, &end.0)
55    }
56
57    #[inline(always)]
58    fn forward_checked(start: Self, count: usize) -> Option<Self> {
59        u16::forward_checked(start.0, count).map(Self)
60    }
61
62    #[inline(always)]
63    fn backward_checked(start: Self, count: usize) -> Option<Self> {
64        u16::backward_checked(start.0, count).map(Self)
65    }
66}
67
68impl const From<u16> for SectorIndex {
69    #[inline(always)]
70    fn from(value: u16) -> Self {
71        Self(value)
72    }
73}
74
75impl const From<SectorIndex> for u16 {
76    #[inline(always)]
77    fn from(value: SectorIndex) -> Self {
78        value.0
79    }
80}
81
82impl const From<SectorIndex> for u32 {
83    #[inline(always)]
84    fn from(original: SectorIndex) -> Self {
85        u32::from(original.0)
86    }
87}
88
89impl const From<SectorIndex> for u64 {
90    #[inline(always)]
91    fn from(original: SectorIndex) -> Self {
92        u64::from(original.0)
93    }
94}
95
96impl const From<SectorIndex> for usize {
97    #[inline(always)]
98    fn from(original: SectorIndex) -> Self {
99        usize::from(original.0)
100    }
101}
102
103impl SectorIndex {
104    /// Size in bytes
105    pub const SIZE: usize = size_of::<u16>();
106    /// Sector index 0
107    pub const ZERO: Self = Self(0);
108    /// Max sector index
109    pub const MAX: Self = Self(u16::MAX);
110
111    /// Create sector index from bytes.
112    #[inline(always)]
113    pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
114        Self(u16::from_le_bytes(bytes))
115    }
116
117    /// Convert sector index to bytes.
118    #[inline(always)]
119    pub const fn to_bytes(self) -> [u8; Self::SIZE] {
120        self.0.to_le_bytes()
121    }
122}
123
124/// Challenge used for a particular sector for particular slot
125#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
126pub struct SectorSlotChallenge(Blake3Hash);
127
128impl SectorSlotChallenge {
129    /// Index of s-bucket within sector to be audited
130    #[inline]
131    pub fn s_bucket_audit_index(&self) -> SBucket {
132        // As long as number of s-buckets is 2^16, we can pick first two bytes instead of actually
133        // calculating `U256::from_le_bytes(self.0) % Record::NUM_S_BUCKETS)`
134        const {
135            assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
136        }
137        SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
138    }
139}
140
141/// Data structure representing sector ID in farmer's plot
142#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
143#[cfg_attr(feature = "scale-codec", derive(Encode, Decode))]
144#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
145pub struct SectorId(Blake3Hash);
146
147impl AsRef<[u8]> for SectorId {
148    #[inline]
149    fn as_ref(&self) -> &[u8] {
150        self.0.as_ref()
151    }
152}
153
154impl SectorId {
155    /// Size in bytes
156    const SIZE: usize = Blake3Hash::SIZE;
157
158    /// Create a new sector ID by deriving it from public key and sector index
159    #[inline]
160    pub fn new(
161        public_key_hash: &Blake3Hash,
162        shard_commitments_root: &ShardCommitmentHash,
163        sector_index: SectorIndex,
164        history_size: HistorySize,
165    ) -> Self {
166        let mut bytes_to_hash =
167            [0; SectorIndex::SIZE + HistorySize::SIZE as usize + ShardCommitmentHash::SIZE];
168        bytes_to_hash[..SectorIndex::SIZE].copy_from_slice(&sector_index.to_bytes());
169        bytes_to_hash[SectorIndex::SIZE..][..HistorySize::SIZE as usize]
170            .copy_from_slice(&history_size.as_non_zero_u64().get().to_le_bytes());
171        bytes_to_hash[SectorIndex::SIZE + HistorySize::SIZE as usize..]
172            .copy_from_slice(shard_commitments_root.as_bytes());
173        // TODO: Is keyed hash really needed here?
174        Self(Blake3Hash::new(
175            single_block_keyed_hash(public_key_hash, &bytes_to_hash)
176                .expect("Less than a single block worth of bytes; qed"),
177        ))
178    }
179
180    /// Derive piece index that should be stored in sector at `piece_offset` for specified size of
181    /// blockchain history
182    pub fn derive_piece_index(
183        &self,
184        piece_offset: PieceOffset,
185        history_size: HistorySize,
186        max_pieces_in_sector: u16,
187        recent_segments: HistorySize,
188        recent_history_fraction: (HistorySize, HistorySize),
189    ) -> PieceIndex {
190        let recent_segments_in_pieces = recent_segments.in_pieces().get();
191        // Recent history must be at most `recent_history_fraction` of all history to use separate
192        // policy for recent pieces
193        let min_history_size_in_pieces = recent_segments_in_pieces
194            * recent_history_fraction.1.in_pieces().get()
195            / recent_history_fraction.0.in_pieces().get();
196        let input_hash = {
197            let piece_offset_bytes = piece_offset.to_bytes();
198            let mut key = [0; 32];
199            key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
200            // TODO: Is keyed hash really needed here?
201            NanoU256::from_le_bytes(
202                single_block_keyed_hash(&key, self.as_ref())
203                    .expect("Less than a single block worth of bytes; qed"),
204            )
205        };
206        let history_size_in_pieces = history_size.in_pieces().get();
207        let num_interleaved_pieces = 1.max(
208            u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
209                / recent_history_fraction.1.in_pieces().get()
210                * 2,
211        );
212
213        let piece_index = if history_size_in_pieces > min_history_size_in_pieces
214            && u64::from(piece_offset) < num_interleaved_pieces
215            && u16::from(piece_offset) % 2 == 1
216        {
217            // For odd piece offsets at the beginning of the sector pick pieces at random from
218            // recent history only
219            (input_hash % recent_segments_in_pieces)
220                + (history_size_in_pieces - recent_segments_in_pieces)
221        } else {
222            input_hash % history_size_in_pieces
223        };
224
225        PieceIndex::from(piece_index)
226    }
227
228    /// Derive sector slot challenge for this sector from provided global challenge
229    pub fn derive_sector_slot_challenge(
230        &self,
231        global_challenge: &Blake3Hash,
232    ) -> SectorSlotChallenge {
233        let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
234        SectorSlotChallenge(sector_slot_challenge.to_array().into())
235    }
236
237    /// Derive evaluation seed
238    pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
239        let mut bytes_to_hash = [0; Self::SIZE + PieceOffset::SIZE];
240        bytes_to_hash[..Self::SIZE].copy_from_slice(self.as_ref());
241        bytes_to_hash[Self::SIZE..].copy_from_slice(&piece_offset.to_bytes());
242        let evaluation_seed = single_block_hash(&bytes_to_hash)
243            .expect("Less than a single block worth of bytes; qed");
244
245        PosSeed::from(evaluation_seed)
246    }
247
248    /// Derive history size when sector created at `history_size` expires.
249    ///
250    /// Returns `None` on overflow.
251    pub fn derive_expiration_history_size(
252        &self,
253        history_size: HistorySize,
254        sector_expiration_check_segment_root: &SegmentRoot,
255        min_sector_lifetime: HistorySize,
256    ) -> Option<HistorySize> {
257        let sector_expiration_check_history_size = history_size
258            .sector_expiration_check(min_sector_lifetime)?
259            .as_non_zero_u64();
260
261        let input_hash = NanoU256::from_le_bytes(
262            single_block_hash([*self.0, **sector_expiration_check_segment_root].as_flattened())
263                .expect("Less than a single block worth of bytes; qed"),
264        );
265
266        let last_possible_expiration = min_sector_lifetime
267            .as_non_zero_u64()
268            .checked_add(history_size.as_non_zero_u64().get().checked_mul(4u64)?)?;
269        let expires_in = input_hash
270            % last_possible_expiration
271                .get()
272                .checked_sub(sector_expiration_check_history_size.get())?;
273
274        let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
275        let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
276            "History size is not zero, so result is not zero even if expires immediately; qed",
277        );
278        Some(HistorySize::new(expiration_history_size))
279    }
280}
281
282/// S-bucket used in consensus
283#[derive(
284    Debug,
285    Display,
286    Default,
287    Copy,
288    Clone,
289    Ord,
290    PartialOrd,
291    Eq,
292    PartialEq,
293    Hash,
294    Add,
295    AddAssign,
296    Sub,
297    SubAssign,
298    Mul,
299    MulAssign,
300    Div,
301    DivAssign,
302)]
303#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
304#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
305#[repr(C)]
306pub struct SBucket(u16);
307
308impl Step for SBucket {
309    #[inline]
310    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
311        u16::steps_between(&start.0, &end.0)
312    }
313
314    #[inline]
315    fn forward_checked(start: Self, count: usize) -> Option<Self> {
316        u16::forward_checked(start.0, count).map(Self)
317    }
318
319    #[inline]
320    fn backward_checked(start: Self, count: usize) -> Option<Self> {
321        u16::backward_checked(start.0, count).map(Self)
322    }
323}
324
325impl const From<u16> for SBucket {
326    #[inline(always)]
327    fn from(value: u16) -> Self {
328        Self(value)
329    }
330}
331
332impl const From<SBucket> for u16 {
333    #[inline(always)]
334    fn from(value: SBucket) -> Self {
335        value.0
336    }
337}
338
339impl TryFrom<usize> for SBucket {
340    type Error = TryFromIntError;
341
342    #[inline]
343    fn try_from(value: usize) -> Result<Self, Self::Error> {
344        Ok(Self(u16::try_from(value)?))
345    }
346}
347
348impl From<SBucket> for u32 {
349    #[inline]
350    fn from(original: SBucket) -> Self {
351        u32::from(original.0)
352    }
353}
354
355impl From<SBucket> for usize {
356    #[inline]
357    fn from(original: SBucket) -> Self {
358        usize::from(original.0)
359    }
360}
361
362impl SBucket {
363    /// S-bucket 0.
364    pub const ZERO: SBucket = SBucket(0);
365    /// Max s-bucket index
366    pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
367}