ab_core_primitives/
sectors.rs

1//! Sectors-related data structures.
2
3#[cfg(test)]
4mod tests;
5
6use crate::hashes::Blake3Hash;
7use crate::nano_u256::NanoU256;
8use crate::pieces::{PieceIndex, PieceOffset, Record};
9use crate::pos::PosSeed;
10use crate::segments::{HistorySize, SegmentRoot};
11use crate::solutions::ShardCommitmentHash;
12use ab_blake3::{single_block_hash, single_block_keyed_hash};
13use ab_io_type::trivial_type::TrivialType;
14use core::hash::Hash;
15use core::iter::Step;
16use core::num::{NonZeroU64, TryFromIntError};
17use core::simd::Simd;
18use derive_more::{
19    Add, AddAssign, Deref, Display, Div, DivAssign, From, Into, Mul, MulAssign, Sub, SubAssign,
20};
21#[cfg(feature = "scale-codec")]
22use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
23#[cfg(feature = "serde")]
24use serde::{Deserialize, Serialize};
25
26/// Sector index in consensus
27#[derive(
28    Debug,
29    Display,
30    Default,
31    Copy,
32    Clone,
33    Ord,
34    PartialOrd,
35    Eq,
36    PartialEq,
37    Hash,
38    From,
39    Into,
40    Add,
41    AddAssign,
42    Sub,
43    SubAssign,
44    Mul,
45    MulAssign,
46    Div,
47    DivAssign,
48    TrivialType,
49)]
50#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
51#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
52#[repr(C)]
53pub struct SectorIndex(u16);
54
55impl Step for SectorIndex {
56    #[inline(always)]
57    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
58        u16::steps_between(&start.0, &end.0)
59    }
60
61    #[inline(always)]
62    fn forward_checked(start: Self, count: usize) -> Option<Self> {
63        u16::forward_checked(start.0, count).map(Self)
64    }
65
66    #[inline(always)]
67    fn backward_checked(start: Self, count: usize) -> Option<Self> {
68        u16::backward_checked(start.0, count).map(Self)
69    }
70}
71
72impl From<SectorIndex> for u32 {
73    #[inline(always)]
74    fn from(original: SectorIndex) -> Self {
75        u32::from(original.0)
76    }
77}
78
79impl From<SectorIndex> for u64 {
80    #[inline(always)]
81    fn from(original: SectorIndex) -> Self {
82        u64::from(original.0)
83    }
84}
85
86impl From<SectorIndex> for usize {
87    #[inline(always)]
88    fn from(original: SectorIndex) -> Self {
89        usize::from(original.0)
90    }
91}
92
93impl SectorIndex {
94    /// Size in bytes
95    pub const SIZE: usize = size_of::<u16>();
96    /// Sector index 0
97    pub const ZERO: Self = Self(0);
98    /// Max sector index
99    pub const MAX: Self = Self(u16::MAX);
100
101    /// Create a new instance
102    #[inline(always)]
103    pub const fn new(n: u16) -> Self {
104        Self(n)
105    }
106
107    /// Create sector index from bytes.
108    #[inline(always)]
109    pub const fn from_bytes(bytes: [u8; Self::SIZE]) -> Self {
110        Self(u16::from_le_bytes(bytes))
111    }
112
113    /// Convert sector index to bytes.
114    #[inline(always)]
115    pub const fn to_bytes(self) -> [u8; Self::SIZE] {
116        self.0.to_le_bytes()
117    }
118}
119
120/// Challenge used for a particular sector for particular slot
121#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)]
122pub struct SectorSlotChallenge(Blake3Hash);
123
124impl SectorSlotChallenge {
125    /// Index of s-bucket within sector to be audited
126    #[inline]
127    pub fn s_bucket_audit_index(&self) -> SBucket {
128        // As long as number of s-buckets is 2^16, we can pick first two bytes instead of actually
129        // calculating `U256::from_le_bytes(self.0) % Record::NUM_S_BUCKETS)`
130        const _: () = const {
131            assert!(Record::NUM_S_BUCKETS == 1 << u16::BITS as usize);
132        };
133        SBucket::from(u16::from_le_bytes([self.0[0], self.0[1]]))
134    }
135}
136
137/// Data structure representing sector ID in farmer's plot
138#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
139#[cfg_attr(feature = "scale-codec", derive(Encode, Decode))]
140#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
141pub struct SectorId(Blake3Hash);
142
143impl AsRef<[u8]> for SectorId {
144    #[inline]
145    fn as_ref(&self) -> &[u8] {
146        self.0.as_ref()
147    }
148}
149
150impl SectorId {
151    /// Size in bytes
152    const SIZE: usize = Blake3Hash::SIZE;
153
154    /// Create a new sector ID by deriving it from public key and sector index
155    #[inline]
156    pub fn new(
157        public_key_hash: &Blake3Hash,
158        shard_commitments_root: &ShardCommitmentHash,
159        sector_index: SectorIndex,
160        history_size: HistorySize,
161    ) -> Self {
162        let mut bytes_to_hash =
163            [0; SectorIndex::SIZE + HistorySize::SIZE as usize + ShardCommitmentHash::SIZE];
164        bytes_to_hash[..SectorIndex::SIZE].copy_from_slice(&sector_index.to_bytes());
165        bytes_to_hash[SectorIndex::SIZE..][..HistorySize::SIZE as usize]
166            .copy_from_slice(&history_size.as_non_zero_u64().get().to_le_bytes());
167        bytes_to_hash[SectorIndex::SIZE + HistorySize::SIZE as usize..]
168            .copy_from_slice(shard_commitments_root.as_bytes());
169        // TODO: Is keyed hash really needed here?
170        Self(Blake3Hash::new(
171            single_block_keyed_hash(public_key_hash, &bytes_to_hash)
172                .expect("Less than a single block worth of bytes; qed"),
173        ))
174    }
175
176    /// Derive piece index that should be stored in sector at `piece_offset` for specified size of
177    /// blockchain history
178    pub fn derive_piece_index(
179        &self,
180        piece_offset: PieceOffset,
181        history_size: HistorySize,
182        max_pieces_in_sector: u16,
183        recent_segments: HistorySize,
184        recent_history_fraction: (HistorySize, HistorySize),
185    ) -> PieceIndex {
186        let recent_segments_in_pieces = recent_segments.in_pieces().get();
187        // Recent history must be at most `recent_history_fraction` of all history to use separate
188        // policy for recent pieces
189        let min_history_size_in_pieces = recent_segments_in_pieces
190            * recent_history_fraction.1.in_pieces().get()
191            / recent_history_fraction.0.in_pieces().get();
192        let input_hash = {
193            let piece_offset_bytes = piece_offset.to_bytes();
194            let mut key = [0; 32];
195            key[..piece_offset_bytes.len()].copy_from_slice(&piece_offset_bytes);
196            // TODO: Is keyed hash really needed here?
197            NanoU256::from_le_bytes(
198                single_block_keyed_hash(&key, self.as_ref())
199                    .expect("Less than a single block worth of bytes; qed"),
200            )
201        };
202        let history_size_in_pieces = history_size.in_pieces().get();
203        let num_interleaved_pieces = 1.max(
204            u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get()
205                / recent_history_fraction.1.in_pieces().get()
206                * 2,
207        );
208
209        let piece_index = if history_size_in_pieces > min_history_size_in_pieces
210            && u64::from(piece_offset) < num_interleaved_pieces
211            && u16::from(piece_offset) % 2 == 1
212        {
213            // For odd piece offsets at the beginning of the sector pick pieces at random from
214            // recent history only
215            (input_hash % recent_segments_in_pieces)
216                + (history_size_in_pieces - recent_segments_in_pieces)
217        } else {
218            input_hash % history_size_in_pieces
219        };
220
221        PieceIndex::from(piece_index)
222    }
223
224    /// Derive sector slot challenge for this sector from provided global challenge
225    pub fn derive_sector_slot_challenge(
226        &self,
227        global_challenge: &Blake3Hash,
228    ) -> SectorSlotChallenge {
229        let sector_slot_challenge = Simd::from(*self.0) ^ Simd::from(**global_challenge);
230        SectorSlotChallenge(sector_slot_challenge.to_array().into())
231    }
232
233    /// Derive evaluation seed
234    pub fn derive_evaluation_seed(&self, piece_offset: PieceOffset) -> PosSeed {
235        let mut bytes_to_hash = [0; Self::SIZE + PieceOffset::SIZE];
236        bytes_to_hash[..Self::SIZE].copy_from_slice(self.as_ref());
237        bytes_to_hash[Self::SIZE..].copy_from_slice(&piece_offset.to_bytes());
238        let evaluation_seed = single_block_hash(&bytes_to_hash)
239            .expect("Less than a single block worth of bytes; qed");
240
241        PosSeed::from(evaluation_seed)
242    }
243
244    /// Derive history size when sector created at `history_size` expires.
245    ///
246    /// Returns `None` on overflow.
247    pub fn derive_expiration_history_size(
248        &self,
249        history_size: HistorySize,
250        sector_expiration_check_segment_root: &SegmentRoot,
251        min_sector_lifetime: HistorySize,
252    ) -> Option<HistorySize> {
253        let sector_expiration_check_history_size = history_size
254            .sector_expiration_check(min_sector_lifetime)?
255            .as_non_zero_u64();
256
257        let input_hash = NanoU256::from_le_bytes(
258            single_block_hash([*self.0, **sector_expiration_check_segment_root].as_flattened())
259                .expect("Less than a single block worth of bytes; qed"),
260        );
261
262        let last_possible_expiration = min_sector_lifetime
263            .as_non_zero_u64()
264            .checked_add(history_size.as_non_zero_u64().get().checked_mul(4u64)?)?;
265        let expires_in = input_hash
266            % last_possible_expiration
267                .get()
268                .checked_sub(sector_expiration_check_history_size.get())?;
269
270        let expiration_history_size = sector_expiration_check_history_size.get() + expires_in;
271        let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect(
272            "History size is not zero, so result is not zero even if expires immediately; qed",
273        );
274        Some(HistorySize::new(expiration_history_size))
275    }
276}
277
278/// S-bucket used in consensus
279#[derive(
280    Debug,
281    Display,
282    Default,
283    Copy,
284    Clone,
285    Ord,
286    PartialOrd,
287    Eq,
288    PartialEq,
289    Hash,
290    From,
291    Into,
292    Add,
293    AddAssign,
294    Sub,
295    SubAssign,
296    Mul,
297    MulAssign,
298    Div,
299    DivAssign,
300)]
301#[cfg_attr(feature = "scale-codec", derive(Encode, Decode, MaxEncodedLen))]
302#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
303#[repr(C)]
304pub struct SBucket(u16);
305
306impl Step for SBucket {
307    #[inline]
308    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
309        u16::steps_between(&start.0, &end.0)
310    }
311
312    #[inline]
313    fn forward_checked(start: Self, count: usize) -> Option<Self> {
314        u16::forward_checked(start.0, count).map(Self)
315    }
316
317    #[inline]
318    fn backward_checked(start: Self, count: usize) -> Option<Self> {
319        u16::backward_checked(start.0, count).map(Self)
320    }
321}
322
323impl TryFrom<usize> for SBucket {
324    type Error = TryFromIntError;
325
326    #[inline]
327    fn try_from(value: usize) -> Result<Self, Self::Error> {
328        Ok(Self(u16::try_from(value)?))
329    }
330}
331
332impl From<SBucket> for u32 {
333    #[inline]
334    fn from(original: SBucket) -> Self {
335        u32::from(original.0)
336    }
337}
338
339impl From<SBucket> for usize {
340    #[inline]
341    fn from(original: SBucket) -> Self {
342        usize::from(original.0)
343    }
344}
345
346impl SBucket {
347    /// S-bucket 0.
348    pub const ZERO: SBucket = SBucket(0);
349    /// Max s-bucket index
350    pub const MAX: SBucket = SBucket((Record::NUM_S_BUCKETS - 1) as u16);
351}