Skip to content

Commit 72442b0

Browse files
authored
[sui-tool] limit sui-tool download concurrency (#24450)
## Description attempting to mitigate increased errors with: ``` Generic HTTP error: error decoding response body Caused by: 0: error decoding response body 1: request or response body error 2: error reading a body from connection 3: peer closed connection without sending TLS close_notify: https://docs.rs/rustls/latest/rustls/manual/_03_howto/index.html#unexpected-eof exit_code=1 failed at Wed Nov 26 03:17:20 PM UTC 2025 ``` ## Test plan How did you test the new or updated feature? --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] gRPC: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: - [ ] Indexing Framework:
1 parent 72d6f9f commit 72442b0

File tree

2 files changed

+9
-15
lines changed

2 files changed

+9
-15
lines changed

crates/sui-tool/src/commands.rs

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -227,8 +227,7 @@ pub enum ToolCommand {
227227
/// skip downloading indexes dir
228228
#[clap(long = "skip-indexes")]
229229
skip_indexes: bool,
230-
/// Number of parallel downloads to perform. Defaults to a reasonable
231-
/// value based on number of available logical cores.
230+
/// Number of parallel downloads to perform. Defaults to 50, max 200.
232231
#[clap(long = "num-parallel-downloads")]
233232
num_parallel_downloads: Option<usize>,
234233
/// Network to download snapshot for. Defaults to "mainnet".
@@ -293,8 +292,7 @@ pub enum ToolCommand {
293292
genesis: PathBuf,
294293
#[clap(long = "path")]
295294
path: PathBuf,
296-
/// Number of parallel downloads to perform. Defaults to a reasonable
297-
/// value based on number of available logical cores.
295+
/// Number of parallel downloads to perform. Defaults to 50, max 200.
298296
#[clap(long = "num-parallel-downloads")]
299297
num_parallel_downloads: Option<usize>,
300298
/// Verification mode to employ.
@@ -691,11 +689,7 @@ impl ToolCommand {
691689
.update_log("off")
692690
.expect("Failed to update log level");
693691
}
694-
let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
695-
num_cpus::get()
696-
.checked_sub(1)
697-
.expect("Failed to get number of CPUs")
698-
});
692+
let num_parallel_downloads = num_parallel_downloads.unwrap_or(50).min(200);
699693
let snapshot_bucket =
700694
snapshot_bucket.or_else(|| match (network, no_sign_request) {
701695
(Chain::Mainnet, false) => Some(
@@ -846,11 +840,7 @@ impl ToolCommand {
846840
.update_log("off")
847841
.expect("Failed to update log level");
848842
}
849-
let num_parallel_downloads = num_parallel_downloads.unwrap_or_else(|| {
850-
num_cpus::get()
851-
.checked_sub(1)
852-
.expect("Failed to get number of CPUs")
853-
});
843+
let num_parallel_downloads = num_parallel_downloads.unwrap_or(50).min(200);
854844
let snapshot_bucket =
855845
snapshot_bucket.or_else(|| match (network, no_sign_request) {
856846
(Chain::Mainnet, false) => Some(

crates/sui-tool/src/lib.rs

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1056,9 +1056,13 @@ async fn backfill_epoch_transaction_digests(
10561056
}
10571057
});
10581058

1059+
// Use reduced concurrency for backfill to avoid overwhelming the remote server
1060+
// when running in parallel with snapshot download
1061+
let backfill_concurrency = (concurrency / 4).max(1);
1062+
10591063
futures::stream::iter(checkpoints_to_fetch)
10601064
.map(|sq| CheckpointReader::fetch_from_object_store(&client, sq))
1061-
.buffer_unordered(concurrency)
1065+
.buffer_unordered(backfill_concurrency)
10621066
.try_for_each(|checkpoint| {
10631067
let perpetual_db = perpetual_db.clone();
10641068
let tx_counter = tx_counter.clone();

0 commit comments

Comments
 (0)