Skip to main content

ab_farmer_components/
shard_commitment.rs

1//! Utilities related to shard commitments
2
3use ab_core_primitives::hashes::Blake3Hash;
4use ab_core_primitives::segments::{HistorySize, SegmentIndex};
5use ab_core_primitives::shard::NumShards;
6use ab_core_primitives::solutions::{
7    ShardCommitmentHash, ShardMembershipEntropy, SolutionShardCommitment,
8};
9use ab_io_type::trivial_type::TrivialType;
10use ab_merkle_tree::unbalanced::UnbalancedMerkleTree;
11use blake3::Hasher;
12use parking_lot::RwLock;
13use schnellru::{ByLength, LruMap};
14use std::iter;
15use std::mem::MaybeUninit;
16use std::sync::Arc;
17
18const SHARD_COMMITMENTS_ROOT_CACHE_SIZE: ByLength = ByLength::new(32);
19
20#[derive(Debug)]
21struct Inner {
22    shard_commitments_seed: Blake3Hash,
23    lru: LruMap<HistorySize, ShardCommitmentHash, ByLength>,
24}
25
26/// Cache for shard commitments roots to avoid recomputing them repeatedly
27#[derive(Debug, Clone)]
28pub struct ShardCommitmentsRootsCache {
29    inner: Arc<RwLock<Inner>>,
30}
31
32impl ShardCommitmentsRootsCache {
33    /// Create a new instance
34    pub fn new(shard_commitments_seed: Blake3Hash) -> Self {
35        Self {
36            inner: Arc::new(RwLock::new(Inner {
37                shard_commitments_seed,
38                lru: LruMap::new(SHARD_COMMITMENTS_ROOT_CACHE_SIZE),
39            })),
40        }
41    }
42
43    /// Seed used during instantiation
44    pub fn shard_commitments_seed(&self) -> Blake3Hash {
45        self.inner.read().shard_commitments_seed
46    }
47
48    /// Get root for a specified history size.
49    ///
50    /// Root will be recomputed unless already known.
51    pub fn get(&self, history_size: HistorySize) -> ShardCommitmentHash {
52        if let Some(root) = self.inner.read().lru.peek(&history_size).copied() {
53            return root;
54        }
55
56        let inner = &mut *self.inner.write();
57        // NOTE: See https://github.com/koute/schnellru/issues/7 for an explanation of the
58        // `Option` return type
59        *inner
60            .lru
61            .get_or_insert(history_size, || {
62                derive_shard_commitments_root(&inner.shard_commitments_seed, history_size)
63            })
64            .expect("Not limited by memory; qed")
65    }
66}
67
68/// Derive shard commitments root from the seed and history size
69pub fn derive_shard_commitments_root(
70    shard_commitments_seed: &Blake3Hash,
71    history_size: HistorySize,
72) -> ShardCommitmentHash {
73    let mut stream = {
74        let mut hasher = Hasher::new_keyed(shard_commitments_seed);
75        hasher.update(SegmentIndex::from(history_size).as_bytes());
76        hasher.finalize_xof()
77    };
78
79    let mut index = 0;
80    let leaves = iter::from_fn(|| {
81        if index < SolutionShardCommitment::NUM_LEAVES {
82            let mut bytes = [0; ShardCommitmentHash::SIZE];
83            stream.fill(&mut bytes);
84
85            index += 1;
86
87            Some(bytes)
88        } else {
89            None
90        }
91    });
92
93    // NOTE: Using unbalanced implementation since balanced implementation requires allocation of
94    // leaves
95    const NUM_LEAVES_U64: u64 = SolutionShardCommitment::NUM_LEAVES as u64;
96    let root = UnbalancedMerkleTree::compute_root_only::<NUM_LEAVES_U64, _, _>(leaves)
97        .expect("List of leaves is not empty; qed");
98
99    ShardCommitmentHash::new(root)
100}
101
102/// Derive solution shard commitment
103pub fn derive_solution_shard_commitment(
104    public_key_hash: &Blake3Hash,
105    shard_commitments_seed: &Blake3Hash,
106    shard_commitments_root: &ShardCommitmentHash,
107    history_size: HistorySize,
108    shard_membership_entropy: &ShardMembershipEntropy,
109    num_shards: NumShards,
110) -> SolutionShardCommitment {
111    let mut stream = {
112        let mut hasher = Hasher::new_keyed(shard_commitments_seed);
113        hasher.update(SegmentIndex::from(history_size).as_bytes());
114        hasher.finalize_xof()
115    };
116
117    let leaf_index = num_shards.derive_shard_commitment_index(
118        public_key_hash,
119        shard_commitments_root,
120        shard_membership_entropy,
121        history_size,
122    ) as usize;
123
124    let mut leaf = [0; _];
125    let mut index = 0;
126    let leaves = iter::from_fn(|| {
127        if index < SolutionShardCommitment::NUM_LEAVES {
128            let mut bytes = [0; ShardCommitmentHash::SIZE];
129            stream.fill(&mut bytes);
130
131            if index == leaf_index {
132                leaf = bytes;
133            }
134
135            index += 1;
136
137            Some(bytes)
138        } else {
139            None
140        }
141    });
142
143    const NUM_LEAVES_U64: u64 = SolutionShardCommitment::NUM_LEAVES as u64;
144    let mut proof = [MaybeUninit::uninit(); _];
145    // NOTE: Using unbalanced implementation since balanced implementation requires an allocation
146    // and uses a lot more RAM
147    let (_root, computed_proof) =
148        UnbalancedMerkleTree::compute_root_and_proof_in::<NUM_LEAVES_U64, _, _>(
149            leaves, leaf_index, &mut proof,
150        )
151        .expect("Index is always within the list of leaves; qed");
152    debug_assert_eq!(computed_proof.len(), proof.len());
153
154    // SAFETY: Checked above that it is fully initialized
155    let proof = unsafe { MaybeUninit::array_assume_init(proof) };
156
157    SolutionShardCommitment {
158        root: *shard_commitments_root,
159        proof: ShardCommitmentHash::array_from_repr(proof),
160        leaf: ShardCommitmentHash::new(leaf),
161    }
162}