Skip to content

Commit 986614d

Browse files
committed
node: Request blocks in an uneven distribution
0-200k blocks are essentially empty. Abuse this fact
1 parent 964806f commit 986614d

File tree

3 files changed

+50
-11
lines changed

3 files changed

+50
-11
lines changed

node/config_spec.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ doc = "The bitcoin network to operate on. Default `bitcoin`. Options are `bitcoi
1919
[[param]]
2020
name = "ping_timeout"
2121
type = "u64"
22-
default = "15"
22+
default = "60"
2323
doc = "The time (seconds) a peer has to respond to a `ping` message. Pings are sent aggressively throughout IBD to find slow peers."
2424

2525
[[param]]
@@ -43,11 +43,11 @@ doc = "The maximum time (seconds) to write to a TCP stream until the connection
4343
[[param]]
4444
name = "min_blocks_per_sec"
4545
type = "f64"
46-
default = "1."
46+
default = "0.20"
4747
doc = "The minimum rate a peer has to respond to block requests."
4848

4949
[[param]]
5050
name = "tasks"
5151
type = "usize"
52-
default = "64"
52+
default = "128"
5353
doc = "The number of tasks to download blocks. Default is 64. Each task uses two OS threads."

node/src/bin/ibd.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,7 @@ fn main() {
7474
let acc_task = std::thread::spawn(move || accumulator_state.verify());
7575
let peers = Arc::new(Mutex::new(peers));
7676
let mut tasks = Vec::new();
77-
let chunk_size = chain.best_header().height() as usize / task_num;
78-
let hashes = hashes_from_chain(Arc::clone(&chain), chunk_size);
77+
let hashes = hashes_from_chain(Arc::clone(&chain), task_num);
7978
for (task_id, chunk) in hashes.into_iter().enumerate() {
8079
let chain = Arc::clone(&chain);
8180
let tx = tx.clone();

node/src/lib.rs

Lines changed: 46 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ use p2p::{
3434
};
3535

3636
const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::WTXID_RELAY_VERSION;
37+
const MAX_GETDATA: usize = 50_000;
3738

3839
pub fn elapsed_time(then: Instant) {
3940
let duration_sec = then.elapsed().as_secs_f64();
@@ -79,11 +80,12 @@ pub fn sync_block_headers(
7980
hosts: &[SocketAddr],
8081
chainman: Arc<ChainstateManager>,
8182
network: Network,
82-
timeout_params: TimeoutParams,
83+
mut timeout_params: TimeoutParams,
8384
) {
8485
let mut rng = thread_rng();
8586
let then = Instant::now();
8687
tracing::info!("Syncing block headers to assume valid hash");
88+
timeout_params.ping_interval(Duration::from_secs(30));
8789
loop {
8890
let random = hosts
8991
.choose(&mut rng)
@@ -162,6 +164,7 @@ pub fn get_blocks_for_range(
162164
updater: Sender<AccumulatorUpdate>,
163165
mut batch: Vec<BlockHash>,
164166
) {
167+
tracing::info!("{task_id} assigned {} blocks", batch.len());
165168
let mut rng = thread_rng();
166169
loop {
167170
let peer = {
@@ -207,7 +210,15 @@ pub fn get_blocks_for_range(
207210
hints.get_block_offsets(block_height).into_iter().collect();
208211
// tracing::info!("{task_id} -> {block_height}:{hash}");
209212
let file_path = block_dir.join(format!("{hash}.block"));
210-
let mut file = File::create_new(file_path).expect("duplicate block file");
213+
let file = File::create_new(file_path);
214+
let mut file = match file {
215+
Ok(file) => file,
216+
Err(e) => {
217+
tracing::warn!("Conflicting open files at: {}", block_height);
218+
tracing::warn!("{e}");
219+
panic!("files cannot conflict");
220+
}
221+
};
211222
let block_bytes = consensus::serialize(&block);
212223
file.write_all(&block_bytes)
213224
.expect("failed to write block file");
@@ -278,7 +289,7 @@ pub fn get_blocks_for_range(
278289
}
279290
}
280291
if metrics.ping_timed_out(ping_timeout) {
281-
tracing::info!("{task_id} failed to respond to a ping");
292+
tracing::warn!("{task_id} failed to respond to a ping");
282293
break;
283294
}
284295
}
@@ -289,21 +300,50 @@ pub fn get_blocks_for_range(
289300
tracing::info!("All block ranges fetched: {task_id}");
290301
}
291302

292-
pub fn hashes_from_chain(chain: Arc<ChainstateManager>, chunks: usize) -> Vec<Vec<BlockHash>> {
303+
pub fn hashes_from_chain(chain: Arc<ChainstateManager>, jobs: usize) -> Vec<Vec<BlockHash>> {
293304
let height = chain.best_header().height();
294305
let mut hashes = Vec::with_capacity(height as usize);
295306
let mut curr = chain.best_header();
296307
let tip_hash = BlockHash::from_byte_array(curr.block_hash().hash);
297308
hashes.push(tip_hash);
309+
let mut out = Vec::new();
298310
while let Ok(next) = curr.prev() {
299311
if next.height() == 0 {
300-
return hashes.chunks(chunks).map(|slice| slice.to_vec()).collect();
312+
break;
301313
}
302314
let hash = BlockHash::from_byte_array(next.block_hash().hash);
303315
hashes.push(hash);
304316
curr = next;
305317
}
306-
hashes.chunks(chunks).map(|slice| slice.to_vec()).collect()
318+
// These blocks are empty. Fetch the maximum amount of blocks.
319+
let first_epoch = hashes.split_off(hashes.len() - 200_000);
320+
let first_chunks: Vec<Vec<BlockHash>> = first_epoch
321+
.chunks(MAX_GETDATA)
322+
.map(|slice| slice.to_vec())
323+
.collect();
324+
out.extend(first_chunks);
325+
// These start to get larger, but are still small
326+
let next_epoch = hashes.split_off(hashes.len() - 100_000);
327+
let next_chunks: Vec<Vec<BlockHash>> = next_epoch
328+
.chunks(MAX_GETDATA / 2)
329+
.map(|slice| slice.to_vec())
330+
.collect();
331+
out.extend(next_chunks);
332+
// Still not entirely full, but almost there
333+
let to_segwit = hashes.split_off(hashes.len() - 100_000);
334+
let to_segwit_chunks: Vec<Vec<BlockHash>> = to_segwit
335+
.chunks(MAX_GETDATA / 4)
336+
.map(|slice| slice.to_vec())
337+
.collect();
338+
out.extend(to_segwit_chunks);
339+
// Now divide the rest among jobs
340+
let chunk_size = hashes.len() / jobs;
341+
let rest: Vec<Vec<BlockHash>> = hashes
342+
.chunks(chunk_size)
343+
.map(|slice| slice.to_vec())
344+
.collect();
345+
out.extend(rest);
346+
out
307347
}
308348

309349
pub trait ChainExt {

0 commit comments

Comments
 (0)