Skip to content

Commit f0ba4d8

Browse files
committed
Refactor pruning proof least common ancestor in store logic
1 parent f736436 commit f0ba4d8

File tree

1 file changed

+48
-32
lines changed
  • consensus/src/processes/pruning_proof

1 file changed

+48
-32
lines changed

consensus/src/processes/pruning_proof/build.rs

Lines changed: 48 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ use crate::{
2828

2929
use super::{PruningProofManager, PruningProofManagerInternalResult};
3030

31+
// The amount of extra depth we try to create per level after the usual 2M
32+
const SAFETY_MARGIN: u64 = 100;
33+
3134
#[derive(Clone)]
3235
struct RelationsStoreInFutureOfRoot<T: RelationsStoreReader, U: ReachabilityService> {
3336
relations_store: T,
@@ -96,19 +99,14 @@ impl PruningProofManager {
9699
.block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m)
97100
.map_err(|err| format!("level + 1: {}, err: {}", level + 1, err))
98101
.unwrap();
99-
if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) {
100-
block_at_depth_m_at_next_level
101-
} else if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) {
102-
block_at_depth_2m
103-
} else {
104-
self.find_common_ancestor_in_chain_of_a(
105-
&*ghostdag_stores[level],
106-
block_at_depth_m_at_next_level,
107-
block_at_depth_2m,
108-
)
109-
.map_err(|err| format!("level: {}, err: {}", level, err))
110-
.unwrap()
111-
}
102+
103+
self.find_latest_common_ancestor_in_store(
104+
&*ghostdag_stores[level],
105+
block_at_depth_m_at_next_level,
106+
block_at_depth_2m,
107+
)
108+
.map_err(|err| format!("level: {}, err: {}", level, err))
109+
.unwrap()
112110
} else {
113111
block_at_depth_2m
114112
};
@@ -209,6 +207,33 @@ impl PruningProofManager {
209207
)
210208
}
211209

210+
/// Given two hashes (both are assumed to exist in the passed ghostdag store),
211+
/// find their latest common ancestor. The latest common ancestor is minimum block
212+
/// such that both 'a' and 'b' are either equal to it or in its future.
213+
/// This ancestor must also be in the selected parent chain of 'a'.
214+
///
215+
/// Additional Notes:
216+
/// - 'a' and 'b' are expected to be hashes acquired via block_at_depth.
217+
/// By virtue of this, they are both chain block hashes.
218+
fn find_latest_common_ancestor_in_store(
219+
&self,
220+
ghostdag_store: &DbGhostdagStore,
221+
a: Hash,
222+
b: Hash,
223+
) -> Result<Hash, PruningProofManagerInternalError> {
224+
// These two hashes are expected to be acquired from the passed ghostdag store
225+
assert!(ghostdag_store.has(a).unwrap());
226+
assert!(ghostdag_store.has(b).unwrap());
227+
228+
if self.reachability_service.is_dag_ancestor_of(a, b) {
229+
Ok(a)
230+
} else if self.reachability_service.is_dag_ancestor_of(b, a) {
231+
Ok(b)
232+
} else {
233+
self.find_common_ancestor_in_chain_of_a(ghostdag_store, a, b)
234+
}
235+
}
236+
212237
/// Find a sufficient root at a given level by going through the headers store and looking
213238
/// for a deep enough level block
214239
/// For each root candidate, fill in the ghostdag data to see if it actually is deep enough.
@@ -221,7 +246,7 @@ impl PruningProofManager {
221246
&self,
222247
pp_header: &HeaderWithBlockLevel,
223248
level: BlockLevel,
224-
_current_dag_level: BlockLevel,
249+
current_dag_level: BlockLevel,
225250
required_block: Option<Hash>,
226251
temp_db: Arc<DB>,
227252
) -> PruningProofManagerInternalResult<(Arc<DbGhostdagStore>, Hash, Hash)> {
@@ -237,17 +262,15 @@ impl PruningProofManager {
237262

238263
// We only have the headers store (which has level 0 blue_scores) to assemble the proof data from.
239264
// We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level
240-
// TODO: uncomment when the full fix to minimize proof sizes comes.
241-
// let mut required_base_level_depth = self.estimated_blue_depth_at_level_0(
242-
// level,
243-
// required_level_depth + 100, // We take a safety margin
244-
// current_dag_level,
245-
// );
265+
let mut required_base_level_depth = self.estimated_blue_depth_at_level_0(
266+
level,
267+
required_level_depth + SAFETY_MARGIN, // We take a safety margin
268+
current_dag_level,
269+
);
246270
// NOTE: Starting from required_level_depth (a much lower starting point than normal) will typically require O(N) iterations
247271
// for level L + N where L is the current dag level. This is fine since the steps per iteration are still exponential
248272
// and so we will complete each level in not much more than N iterations per level.
249273
// We start here anyway so we can try to minimize the proof size when the current dag level goes down significantly.
250-
let mut required_base_level_depth = required_level_depth + 100;
251274

252275
let mut is_last_level_header;
253276
let mut tries = 0;
@@ -307,23 +330,16 @@ impl PruningProofManager {
307330
if root != self.genesis_hash {
308331
// Try to adjust the root forward with the new known block_at_depth_2m_buffered
309332
let block_at_depth_2m_buffered =
310-
self.block_at_depth(&*ghostdag_store, selected_tip, required_level_depth + 100).unwrap();
333+
self.block_at_depth(&*ghostdag_store, selected_tip, required_level_depth + SAFETY_MARGIN).unwrap();
311334

312-
root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m_buffered, block_at_depth_m_at_next_level)
313-
{
314-
block_at_depth_2m_buffered
315-
} else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m_buffered)
316-
{
317-
block_at_depth_m_at_next_level
318-
} else {
319-
self.find_common_ancestor_in_chain_of_a(
335+
root = self
336+
.find_latest_common_ancestor_in_store(
320337
&*ghostdag_store,
321338
block_at_depth_m_at_next_level,
322339
block_at_depth_2m_buffered,
323340
)
324341
.map_err(|err| format!("level: {}, err: {}", level, err))
325-
.unwrap()
326-
};
342+
.unwrap();
327343
}
328344

329345
break Ok((ghostdag_store, selected_tip, root));

0 commit comments

Comments
 (0)