forked from firedancer-io/radiance
-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy pathconfig.example.toml
More file actions
454 lines (371 loc) · 18.3 KB
/
config.example.toml
File metadata and controls
454 lines (371 loc) · 18.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
# Mithril Configuration File
# Generate a starter config with: mithril config init
# CLI flags override these values when both are specified.
#
# Quick start:
# 1. Run: mithril config init
# 2. Edit [storage] paths for your setup
# 3. Run: mithril run --config config.toml
#
# Everything else has sensible defaults.
# ============================================================================
# GENERAL SETTINGS
# ============================================================================
# Name of this Mithril instance (used in logs and metrics)
name = "mithril"
# ============================================================================
# [bootstrap] - Startup Mode
# ============================================================================
#
# Controls how Mithril initializes its state on startup.
#
# Modes:
# "auto" - Use existing AccountsDB if valid, otherwise download snapshot.
# If AccountsDB is significantly behind chain tip (>full_threshold
# slots), prompts interactively to continue or rebuild.
# "snapshot" - Rebuild AccountsDB from snapshot. Reuses existing downloaded
# snapshot if within full_threshold slots, otherwise downloads new.
# "new-snapshot" - Always download a fresh snapshot, ignoring all existing data.
# Cleans AccountsDB and snapshot directories before starting.
# "accountsdb" - Require existing valid AccountsDB, fail fast if missing.
#
# The default is "auto" which tries to resume from existing AccountsDB if valid,
# or downloads a fresh snapshot if needed. Use "snapshot" to always check freshness.
[bootstrap]
mode = "auto"
# ============================================================================
# [storage] - Storage Paths
# ============================================================================
#
# Mithril stores data in these locations:
#
# AccountsDB (~500GB)
# In Solana, "accounts" are the fundamental unit of state - every wallet,
# token balance, program, and piece of on-chain data is an account.
# AccountsDB is Mithril's index of all ~500M accounts on the network.
# Heavy random I/O - put this on your FASTEST NVMe.
#
# Snapshots (~100GB)
# Periodic dumps of account state from the network. Mithril downloads
# one on first run to bootstrap AccountsDB.
#
# Blockstore (varies)
# Incoming blocks from RPC/Lightbringer. Size depends on how much history
# you want to keep (0 to hundreds of GB).
# NOTE: Block download to disk is TEMPORARILY DISABLED. Blocks are
# currently streamed directly from RPC and not persisted to disk.
# This path is reserved for future use when block persistence is
# re-implemented.
#
# Recommended setup (two NVMe drives):
# /mnt/mithril-accounts/ <- Fast NVMe (~500GB for AccountsDB)
# /mnt/mithril-ledger/ <- Larger NVMe for everything else
# ├── blockstore/
# └── snapshots/
#
# See scripts/disk-setup.sh for automated setup.
[storage]
# AccountsDB - the main account state database (~500GB)
# Put this on your fastest NVMe due to heavy random I/O.
accounts = "/mnt/mithril-accounts"
# Blockstore - incoming blocks from RPC/Lightbringer
# Size depends on how much block history you keep.
# NOTE: Block persistence is temporarily disabled. Blocks are streamed
# directly from RPC without saving to disk. This setting is reserved
# for when block persistence is re-implemented.
blockstore = "/mnt/mithril-ledger/blockstore"
# Snapshots - downloaded on first run (~100GB for full + incremental)
snapshots = "/mnt/mithril-ledger/snapshots"
# Logs - runtime output for debugging and monitoring
logs = "/mnt/mithril-logs"
# ============================================================================
# [network] - RPC Endpoints
# ============================================================================
#
# Solana RPC endpoints for all cluster access (blocks, leader schedule, etc.)
# Endpoints are listed in PRIORITY ORDER:
# - First endpoint is PRIMARY (used for all calls when healthy)
# - Remaining endpoints are FALLBACKS (used when primary fails)
#
# On hard connectivity errors (connection refused, DNS failure, etc.), Mithril
# automatically fails over to the next endpoint in the list. It periodically
# probes the primary and restores to it when healthy.
[network]
# Solana cluster (required): "mainnet-beta", "testnet", or "devnet"
# This is validated against the RPC's genesis hash to prevent accidentally
# running mainnet state against testnet, or vice versa.
cluster = "mainnet-beta"
# RPC endpoints in priority order (first = primary, rest = fallbacks)
#
# Example with primary + fallback:
# rpc = [
# "https://your-rpc.example.com", # Primary
# "https://api.mainnet-beta.solana.com" # Fallback
# ]
rpc = ["https://api.mainnet-beta.solana.com"]
# ============================================================================
# [block] - Block Streaming
# ============================================================================
[block]
# Where to stream new blocks from:
# "rpc" - Fetch blocks via RPC (uses endpoints from [network].rpc)
# "lightbringer" - Stream blocks from a Lightbringer endpoint (faster, lower latency)
# NOTE: Lightbringer mode is TEMPORARILY DISABLED and will fall back to RPC.
# The background downloader was removed; Lightbringer streaming will be
# re-implemented in a future release.
source = "rpc"
# Lightbringer endpoint address (only used when source = "lightbringer")
# NOTE: Currently unused - Lightbringer mode is temporarily disabled (see above)
# lightbringer_endpoint = "localhost:9000"
# =========================================================================
# Global Fetch Tuning
# =========================================================================
# These settings control parallel block fetching.
# Tune based on your RPC provider's rate limits.
# Maximum RPC requests per second for block fetching.
# Check your RPC provider's rate limits. Private nodes can go higher.
max_rps = 8
# Maximum concurrent block fetch requests (workers).
# Should generally match max_rps.
max_inflight = 8
# How often to poll for chain tip in CATCHUP mode (milliseconds).
tip_poll_interval_ms = 1000
# Don't fetch within this many slots of the confirmed tip.
# Prevents "slot not available" errors. 32 slots ≈ 13 seconds.
# NOTE: Only applied in CATCHUP mode when gap > catchup_tip_gate_threshold.
tip_safety_margin = 32
# =========================================================================
# Mode Thresholds (Hysteresis)
# =========================================================================
# Mithril uses two modes for block fetching:
#
# CATCHUP: Far from tip - aggressive prefetching, fills buffer to ~100 slots
# NEAR-TIP: Close to tip - JIT scheduling with small lookahead
#
# Hysteresis prevents mode thrashing: different thresholds for entering vs exiting.
# Example: enter near-tip at gap <= 32, exit back to catchup at gap >= 64.
# Enter NEAR-TIP mode when gap to confirmed tip <= this.
near_tip_threshold = 32
# Exit NEAR-TIP mode (back to CATCHUP) when gap >= this.
catchup_threshold = 64
# Only apply tip_safety_margin when gap > this threshold.
# When gap is smaller (transitioning to near-tip), the margin is too restrictive
# and causes "beyond tip" storms. Set high enough to give plenty of headroom.
catchup_tip_gate_threshold = 128
# =========================================================================
# Near-Tip Tuning
# =========================================================================
# These settings only apply in NEAR-TIP mode (when close to chain tip).
# Note: tip_safety_margin is NOT applied in near-tip mode by design.
# Near-tip relies on fast retries for "slot not available" instead of margin.
# How often to poll for chain tip in NEAR-TIP mode (milliseconds).
# Faster than catchup to stay responsive to new slots.
near_tip_poll_interval_ms = 500
# How many slots ahead to schedule in NEAR-TIP mode.
# Provides enough buffer to hide RPC latency (~300ms) behind execution (~100ms).
near_tip_lookahead = 2
# ============================================================================
# [replay] - Block Replay
# ============================================================================
#
# Controls how Mithril processes and verifies blocks.
[replay]
# Transaction parallelism. Set to 0 for sequential execution,
# or >0 to execute a topsort tx plan with the given number of workers.
# Recommended: 2x your CPU core count (e.g., 192 for a 96-core machine)
txpar = 24
# Finite replay: stop after N slots or at a specific slot (optional)
# num_slots = 0
# end_slot = -1
# ============================================================================
# [rpc] - Mithril RPC Server
# ============================================================================
#
# Mithril exposes a Solana-compatible JSON-RPC interface on all interfaces,
# allowing other tools to query Mithril's account state directly.
#
# Query with: curl http://localhost:8899 -X POST -H "Content-Type: application/json" \
# -d '{"jsonrpc":"2.0","id":1,"method":"getBlockHeight"}'
[rpc]
# Port for Mithril's RPC interface (0 = disabled)
port = 8899
# ============================================================================
# [tuning] - Performance Tuning & Profiling
# ============================================================================
#
# Advanced settings for optimizing Mithril's performance.
# The defaults work well for most deployments.
[tuning]
# Zstd decoder concurrency (defaults to NumCPU)
# zstd_decoder_concurrency = 16
# Bound for number of log shards to flush to Accounts DB Index at once
max_concurrent_flushers = 16
# Size in MB for serialized parameter arena (0 to disable)
param_arena_size_mb = 512
# Number of borrowed accounts to preallocate in arena (0 to disable)
borrowed_account_arena_size = 1024
# Enable/disable pool allocator for slices
use_pool = true
# Number of goroutines to store modified accounts at the end of each block
store_accounts_workers = 128
# [tuning.pprof] - CPU/Memory Profiling
#
# Usage (assuming port = 6060):
#
# View in browser:
# http://localhost:6060/debug/pprof/
#
# CPU profile (30 sec sample, opens interactive terminal):
# go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
#
# Heap profile (current memory usage):
# go tool pprof http://localhost:6060/debug/pprof/heap
#
# Alloc profile (total allocations since start):
# go tool pprof http://localhost:6060/debug/pprof/allocs
#
# View goroutines in terminal:
# curl http://localhost:6060/debug/pprof/goroutine?debug=1
#
# View all goroutines with full stack traces:
# curl http://localhost:6060/debug/pprof/goroutine?debug=2
#
[tuning.pprof]
# Port to serve HTTP pprof endpoint (-1 to disable)
# port = 6060
# Filename to write CPU profile (for offline analysis with go tool pprof)
# cpu_profile_path = "/mnt/mithril-data/profiling/cpu.pprof"
# ============================================================================
# [debug] - Debug Logging
# ============================================================================
#
# Options for debugging specific transactions or accounts.
[debug]
# Transaction signatures to enable debug logging for
# transaction_signatures = ["sig1", "sig2"]
# Account pubkeys to enable debug logging of transactions that modify them
# account_writes = ["pubkey1", "pubkey2"]
# ============================================================================
# [reporting] - Metrics & Reporting
# ============================================================================
[reporting]
# Replay timings (JSONL latency records per block) are automatically written
# to replay_timings.jsonl in each run's log directory.
# ============================================================================
# [snapshot] - Snapshot Download Settings
# ============================================================================
[snapshot]
# -------------------------------------------------------------------------
# Snapshot Storage
# -------------------------------------------------------------------------
# Snapshots are downloaded to the path specified in [storage].snapshots above.
# Maximum snapshots to keep on disk (controls both saving and retention)
# 0 = Stream-only mode (don't save snapshots, saves disk space)
# 1 = Save one snapshot, delete previous before downloading new
# 2+ = Keep N snapshots, delete oldest when limit exceeded
#
# Saved snapshots are valuable for debugging and reproducing issues -
# the dev team can start from a snapshot to investigate problems.
# Set to 0 for stream-only mode (saves disk space but requires re-download
# if interrupted).
max_full_snapshots = 1
# Enable verbose output showing detailed node discovery statistics
verbose = false
# -------------------------------------------------------------------------
# Snapshot Age Threshold
# -------------------------------------------------------------------------
# Maximum age for full snapshots in slots (~100,000 slots ≈ 12 hours at 400ms/slot)
#
# This threshold is used for:
# - Snapshot download: Skip nodes with snapshots older than this
# - Snapshot reuse: In mode=snapshot, reuse existing downloaded snapshot
# if within this age instead of downloading new
# - Stale AccountsDB detection: In mode=auto, prompt user if AccountsDB
# is this far behind the latest available snapshot
full_threshold = 100000
# Maximum age for incremental snapshots (slots)
# Incrementals are always downloaded (they're small and provide latest state)
incremental_threshold = 1000
# Safety margin - warn if snapshot is this close to expiration (slots)
safety_margin_slots = 5000
# -------------------------------------------------------------------------
# Stage 1: Fast Parallel Triage
# Quickly tests many nodes in parallel to eliminate slow ones
# -------------------------------------------------------------------------
# Warmup data before timing (KiB)
stage1_warm_kib = 512
# Size of each measurement window (KiB)
stage1_window_kib = 512
# Number of measurement windows (total data = windows * window_kib)
stage1_windows = 4
# Timeout for stage 1 testing per node (milliseconds)
stage1_timeout_ms = 3000
# Number of concurrent downloads in stage 1 (0 = auto, uses CPU cores)
stage1_concurrency = 0
# -------------------------------------------------------------------------
# Stage 2: Sustained Speed Test
# Tests top candidates from stage 1 with longer downloads
# -------------------------------------------------------------------------
# Number of top candidates from stage 1 to test in stage 2
stage2_top_k = 8
# Warmup duration before measurement (seconds)
# Recommended: 3 seconds for home internet (more variable), 1-2 for datacenter
stage2_warm_sec = 3
# Measurement duration (seconds)
# Recommended: 3 seconds for home internet (more variable), 1-2 for datacenter
stage2_measure_sec = 3
# Minimum speed ratio (collapse if speed drops below this * peak)
stage2_min_ratio = 0.6
# Minimum absolute speed (MB/s, 0 = disabled)
stage2_min_abs_mbs = 0.0
# -------------------------------------------------------------------------
# Node Filtering
# -------------------------------------------------------------------------
# Maximum RTT to consider a node (milliseconds, 0 = disabled)
max_rtt_ms = 200
# TCP connection timeout for pre-check (milliseconds)
tcp_timeout_ms = 1000
# Minimum Solana version required (e.g., "3.0.0", empty = no filter)
# min_node_version = "3.0.0"
# Allowed Solana versions (empty = all versions allowed)
# Example: allowed_node_versions = ["2.2.0", "3.0.0"]
# allowed_node_versions = []
# -------------------------------------------------------------------------
# Performance
# -------------------------------------------------------------------------
# Number of concurrent workers for node evaluation
worker_count = 100
# -------------------------------------------------------------------------
# Fallback Resilience
# -------------------------------------------------------------------------
# Maximum number of ranked snapshot sources to try before giving up
# This is a resilience mechanism - if the #1 fastest node's HTTP endpoint
# is down or snapshot was deleted, we try the next best ranked nodes.
# Set to 0 to try all available nodes.
max_snapshot_url_attempts = 3
# -------------------------------------------------------------------------
# Incremental Snapshot Selection
# -------------------------------------------------------------------------
# Minimum download speed for incremental snapshot sources (MB/s)
# Incrementals are ~1GB, so this filters out nodes that would take too long.
# At 2 MB/s: ~8 minutes for 1GB (acceptable)
# Set to 0 to disable speed filtering.
min_incremental_speed_mbs = 2.0
# ============================================================================
# [log] - Logging Configuration
# ============================================================================
#
# Mithril writes logs to files for easy debugging and sharing.
# Each run creates a new log file with timestamp and run ID.
[log]
# Log directory is set in [storage].logs above
# Log level: "debug" | "info" | "warn" | "error"
level = "info"
# Also write to stdout (set to false for daemon/service mode)
to_stdout = true
# Maximum log file size in MB before rotation (0 = no limit)
max_size_mb = 100
# Delete log files older than this many days (0 = never delete)
max_age_days = 30
# Maximum number of old log files to keep (0 = unlimited)
max_backups = 100