|
12 | 12 | #include <fs.h>
|
13 | 13 | #include <hash.h>
|
14 | 14 | #include <pow.h>
|
| 15 | +#include <reverse_iterator.h> |
15 | 16 | #include <shutdown.h>
|
16 | 17 | #include <signet.h>
|
17 | 18 | #include <streams.h>
|
@@ -47,6 +48,374 @@ static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
|
47 | 48 | static FlatFileSeq BlockFileSeq();
|
48 | 49 | static FlatFileSeq UndoFileSeq();
|
49 | 50 |
|
| 51 | +CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const |
| 52 | +{ |
| 53 | + AssertLockHeld(cs_main); |
| 54 | + BlockMap::const_iterator it = m_block_index.find(hash); |
| 55 | + return it == m_block_index.end() ? nullptr : it->second; |
| 56 | +} |
| 57 | + |
| 58 | +CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block) |
| 59 | +{ |
| 60 | + AssertLockHeld(cs_main); |
| 61 | + |
| 62 | + // Check for duplicate |
| 63 | + uint256 hash = block.GetHash(); |
| 64 | + BlockMap::iterator it = m_block_index.find(hash); |
| 65 | + if (it != m_block_index.end()) |
| 66 | + return it->second; |
| 67 | + |
| 68 | + // Construct new block index object |
| 69 | + CBlockIndex* pindexNew = new CBlockIndex(block); |
| 70 | + // We assign the sequence id to blocks only when the full data is available, |
| 71 | + // to avoid miners withholding blocks but broadcasting headers, to get a |
| 72 | + // competitive advantage. |
| 73 | + pindexNew->nSequenceId = 0; |
| 74 | + BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first; |
| 75 | + pindexNew->phashBlock = &((*mi).first); |
| 76 | + BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); |
| 77 | + if (miPrev != m_block_index.end()) |
| 78 | + { |
| 79 | + pindexNew->pprev = (*miPrev).second; |
| 80 | + pindexNew->nHeight = pindexNew->pprev->nHeight + 1; |
| 81 | + pindexNew->BuildSkip(); |
| 82 | + } |
| 83 | + pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); |
| 84 | + pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); |
| 85 | + pindexNew->RaiseValidity(BLOCK_VALID_TREE); |
| 86 | + if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) |
| 87 | + pindexBestHeader = pindexNew; |
| 88 | + |
| 89 | + setDirtyBlockIndex.insert(pindexNew); |
| 90 | + |
| 91 | + return pindexNew; |
| 92 | +} |
| 93 | + |
| 94 | +void BlockManager::PruneOneBlockFile(const int fileNumber) |
| 95 | +{ |
| 96 | + AssertLockHeld(cs_main); |
| 97 | + LOCK(cs_LastBlockFile); |
| 98 | + |
| 99 | + for (const auto& entry : m_block_index) { |
| 100 | + CBlockIndex* pindex = entry.second; |
| 101 | + if (pindex->nFile == fileNumber) { |
| 102 | + pindex->nStatus &= ~BLOCK_HAVE_DATA; |
| 103 | + pindex->nStatus &= ~BLOCK_HAVE_UNDO; |
| 104 | + pindex->nFile = 0; |
| 105 | + pindex->nDataPos = 0; |
| 106 | + pindex->nUndoPos = 0; |
| 107 | + setDirtyBlockIndex.insert(pindex); |
| 108 | + |
| 109 | + // Prune from m_blocks_unlinked -- any block we prune would have |
| 110 | + // to be downloaded again in order to consider its chain, at which |
| 111 | + // point it would be considered as a candidate for |
| 112 | + // m_blocks_unlinked or setBlockIndexCandidates. |
| 113 | + auto range = m_blocks_unlinked.equal_range(pindex->pprev); |
| 114 | + while (range.first != range.second) { |
| 115 | + std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first; |
| 116 | + range.first++; |
| 117 | + if (_it->second == pindex) { |
| 118 | + m_blocks_unlinked.erase(_it); |
| 119 | + } |
| 120 | + } |
| 121 | + } |
| 122 | + } |
| 123 | + |
| 124 | + vinfoBlockFile[fileNumber].SetNull(); |
| 125 | + setDirtyFileInfo.insert(fileNumber); |
| 126 | +} |
| 127 | + |
| 128 | +void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height) |
| 129 | +{ |
| 130 | + assert(fPruneMode && nManualPruneHeight > 0); |
| 131 | + |
| 132 | + LOCK2(cs_main, cs_LastBlockFile); |
| 133 | + if (chain_tip_height < 0) { |
| 134 | + return; |
| 135 | + } |
| 136 | + |
| 137 | + // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip) |
| 138 | + unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP); |
| 139 | + int count = 0; |
| 140 | + for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { |
| 141 | + if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { |
| 142 | + continue; |
| 143 | + } |
| 144 | + PruneOneBlockFile(fileNumber); |
| 145 | + setFilesToPrune.insert(fileNumber); |
| 146 | + count++; |
| 147 | + } |
| 148 | + LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); |
| 149 | +} |
| 150 | + |
| 151 | +void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) |
| 152 | +{ |
| 153 | + LOCK2(cs_main, cs_LastBlockFile); |
| 154 | + if (chain_tip_height < 0 || nPruneTarget == 0) { |
| 155 | + return; |
| 156 | + } |
| 157 | + if ((uint64_t)chain_tip_height <= nPruneAfterHeight) { |
| 158 | + return; |
| 159 | + } |
| 160 | + |
| 161 | + unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))}; |
| 162 | + uint64_t nCurrentUsage = CalculateCurrentUsage(); |
| 163 | + // We don't check to prune until after we've allocated new space for files |
| 164 | + // So we should leave a buffer under our target to account for another allocation |
| 165 | + // before the next pruning. |
| 166 | + uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; |
| 167 | + uint64_t nBytesToPrune; |
| 168 | + int count = 0; |
| 169 | + |
| 170 | + if (nCurrentUsage + nBuffer >= nPruneTarget) { |
| 171 | + // On a prune event, the chainstate DB is flushed. |
| 172 | + // To avoid excessive prune events negating the benefit of high dbcache |
| 173 | + // values, we should not prune too rapidly. |
| 174 | + // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon. |
| 175 | + if (is_ibd) { |
| 176 | + // Since this is only relevant during IBD, we use a fixed 10% |
| 177 | + nBuffer += nPruneTarget / 10; |
| 178 | + } |
| 179 | + |
| 180 | + for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { |
| 181 | + nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; |
| 182 | + |
| 183 | + if (vinfoBlockFile[fileNumber].nSize == 0) { |
| 184 | + continue; |
| 185 | + } |
| 186 | + |
| 187 | + if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target? |
| 188 | + break; |
| 189 | + } |
| 190 | + |
| 191 | + // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning |
| 192 | + if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) { |
| 193 | + continue; |
| 194 | + } |
| 195 | + |
| 196 | + PruneOneBlockFile(fileNumber); |
| 197 | + // Queue up the files for removal |
| 198 | + setFilesToPrune.insert(fileNumber); |
| 199 | + nCurrentUsage -= nBytesToPrune; |
| 200 | + count++; |
| 201 | + } |
| 202 | + } |
| 203 | + |
| 204 | + LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", |
| 205 | + nPruneTarget/1024/1024, nCurrentUsage/1024/1024, |
| 206 | + ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, |
| 207 | + nLastBlockWeCanPrune, count); |
| 208 | +} |
| 209 | + |
| 210 | +CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash) |
| 211 | +{ |
| 212 | + AssertLockHeld(cs_main); |
| 213 | + |
| 214 | + if (hash.IsNull()) |
| 215 | + return nullptr; |
| 216 | + |
| 217 | + // Return existing |
| 218 | + BlockMap::iterator mi = m_block_index.find(hash); |
| 219 | + if (mi != m_block_index.end()) |
| 220 | + return (*mi).second; |
| 221 | + |
| 222 | + // Create new |
| 223 | + CBlockIndex* pindexNew = new CBlockIndex(); |
| 224 | + mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first; |
| 225 | + pindexNew->phashBlock = &((*mi).first); |
| 226 | + |
| 227 | + return pindexNew; |
| 228 | +} |
| 229 | + |
| 230 | +bool BlockManager::LoadBlockIndex( |
| 231 | + const Consensus::Params& consensus_params, |
| 232 | + ChainstateManager& chainman) |
| 233 | +{ |
| 234 | + if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) { |
| 235 | + return false; |
| 236 | + } |
| 237 | + |
| 238 | + // Calculate nChainWork |
| 239 | + std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight; |
| 240 | + vSortedByHeight.reserve(m_block_index.size()); |
| 241 | + for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) |
| 242 | + { |
| 243 | + CBlockIndex* pindex = item.second; |
| 244 | + vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex)); |
| 245 | + } |
| 246 | + sort(vSortedByHeight.begin(), vSortedByHeight.end()); |
| 247 | + |
| 248 | + // Find start of assumed-valid region. |
| 249 | + int first_assumed_valid_height = std::numeric_limits<int>::max(); |
| 250 | + |
| 251 | + for (const auto& [height, block] : vSortedByHeight) { |
| 252 | + if (block->IsAssumedValid()) { |
| 253 | + auto chainstates = chainman.GetAll(); |
| 254 | + |
| 255 | + // If we encounter an assumed-valid block index entry, ensure that we have |
| 256 | + // one chainstate that tolerates assumed-valid entries and another that does |
| 257 | + // not (i.e. the background validation chainstate), since assumed-valid |
| 258 | + // entries should always be pending validation by a fully-validated chainstate. |
| 259 | + auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); }; |
| 260 | + assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); })); |
| 261 | + assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); })); |
| 262 | + |
| 263 | + first_assumed_valid_height = height; |
| 264 | + break; |
| 265 | + } |
| 266 | + } |
| 267 | + |
| 268 | + for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight) |
| 269 | + { |
| 270 | + if (ShutdownRequested()) return false; |
| 271 | + CBlockIndex* pindex = item.second; |
| 272 | + pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); |
| 273 | + pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); |
| 274 | + |
| 275 | + // We can link the chain of blocks for which we've received transactions at some point, or |
| 276 | + // blocks that are assumed-valid on the basis of snapshot load (see |
| 277 | + // PopulateAndValidateSnapshot()). |
| 278 | + // Pruned nodes may have deleted the block. |
| 279 | + if (pindex->nTx > 0) { |
| 280 | + if (pindex->pprev) { |
| 281 | + if (pindex->pprev->nChainTx > 0) { |
| 282 | + pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; |
| 283 | + } else { |
| 284 | + pindex->nChainTx = 0; |
| 285 | + m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); |
| 286 | + } |
| 287 | + } else { |
| 288 | + pindex->nChainTx = pindex->nTx; |
| 289 | + } |
| 290 | + } |
| 291 | + if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) { |
| 292 | + pindex->nStatus |= BLOCK_FAILED_CHILD; |
| 293 | + setDirtyBlockIndex.insert(pindex); |
| 294 | + } |
| 295 | + if (pindex->IsAssumedValid() || |
| 296 | + (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && |
| 297 | + (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) { |
| 298 | + |
| 299 | + // Fill each chainstate's block candidate set. Only add assumed-valid |
| 300 | + // blocks to the tip candidate set if the chainstate is allowed to rely on |
| 301 | + // assumed-valid blocks. |
| 302 | + // |
| 303 | + // If all setBlockIndexCandidates contained the assumed-valid blocks, the |
| 304 | + // background chainstate's ActivateBestChain() call would add assumed-valid |
| 305 | + // blocks to the chain (based on how FindMostWorkChain() works). Obviously |
| 306 | + // we don't want this since the purpose of the background validation chain |
| 307 | + // is to validate assued-valid blocks. |
| 308 | + // |
| 309 | + // Note: This is considering all blocks whose height is greater or equal to |
| 310 | + // the first assumed-valid block to be assumed-valid blocks, and excluding |
| 311 | + // them from the background chainstate's setBlockIndexCandidates set. This |
| 312 | + // does mean that some blocks which are not technically assumed-valid |
| 313 | + // (later blocks on a fork beginning before the first assumed-valid block) |
| 314 | + // might not get added to the the background chainstate, but this is ok, |
| 315 | + // because they will still be attached to the active chainstate if they |
| 316 | + // actually contain more work. |
| 317 | + // |
| 318 | + // Instad of this height-based approach, an earlier attempt was made at |
| 319 | + // detecting "holistically" whether the block index under consideration |
| 320 | + // relied on an assumed-valid ancestor, but this proved to be too slow to |
| 321 | + // be practical. |
| 322 | + for (CChainState* chainstate : chainman.GetAll()) { |
| 323 | + if (chainstate->reliesOnAssumedValid() || |
| 324 | + pindex->nHeight < first_assumed_valid_height) { |
| 325 | + chainstate->setBlockIndexCandidates.insert(pindex); |
| 326 | + } |
| 327 | + } |
| 328 | + } |
| 329 | + if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) { |
| 330 | + chainman.m_best_invalid = pindex; |
| 331 | + } |
| 332 | + if (pindex->pprev) |
| 333 | + pindex->BuildSkip(); |
| 334 | + if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) |
| 335 | + pindexBestHeader = pindex; |
| 336 | + } |
| 337 | + |
| 338 | + return true; |
| 339 | +} |
| 340 | + |
| 341 | +void BlockManager::Unload() { |
| 342 | + m_blocks_unlinked.clear(); |
| 343 | + |
| 344 | + for (const BlockMap::value_type& entry : m_block_index) { |
| 345 | + delete entry.second; |
| 346 | + } |
| 347 | + |
| 348 | + m_block_index.clear(); |
| 349 | +} |
| 350 | + |
| 351 | +bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman) |
| 352 | +{ |
| 353 | + if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) { |
| 354 | + return false; |
| 355 | + } |
| 356 | + |
| 357 | + // Load block file info |
| 358 | + m_block_tree_db->ReadLastBlockFile(nLastBlockFile); |
| 359 | + vinfoBlockFile.resize(nLastBlockFile + 1); |
| 360 | + LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); |
| 361 | + for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { |
| 362 | + m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); |
| 363 | + } |
| 364 | + LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); |
| 365 | + for (int nFile = nLastBlockFile + 1; true; nFile++) { |
| 366 | + CBlockFileInfo info; |
| 367 | + if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { |
| 368 | + vinfoBlockFile.push_back(info); |
| 369 | + } else { |
| 370 | + break; |
| 371 | + } |
| 372 | + } |
| 373 | + |
| 374 | + // Check presence of blk files |
| 375 | + LogPrintf("Checking all blk files are present...\n"); |
| 376 | + std::set<int> setBlkDataFiles; |
| 377 | + for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) { |
| 378 | + CBlockIndex* pindex = item.second; |
| 379 | + if (pindex->nStatus & BLOCK_HAVE_DATA) { |
| 380 | + setBlkDataFiles.insert(pindex->nFile); |
| 381 | + } |
| 382 | + } |
| 383 | + for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) |
| 384 | + { |
| 385 | + FlatFilePos pos(*it, 0); |
| 386 | + if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { |
| 387 | + return false; |
| 388 | + } |
| 389 | + } |
| 390 | + |
| 391 | + // Check whether we have ever pruned block & undo files |
| 392 | + m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned); |
| 393 | + if (fHavePruned) |
| 394 | + LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); |
| 395 | + |
| 396 | + // Check whether we need to continue reindexing |
| 397 | + bool fReindexing = false; |
| 398 | + m_block_tree_db->ReadReindexing(fReindexing); |
| 399 | + if(fReindexing) fReindex = true; |
| 400 | + |
| 401 | + return true; |
| 402 | +} |
| 403 | + |
| 404 | +CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data) |
| 405 | +{ |
| 406 | + const MapCheckpoints& checkpoints = data.mapCheckpoints; |
| 407 | + |
| 408 | + for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) |
| 409 | + { |
| 410 | + const uint256& hash = i.second; |
| 411 | + CBlockIndex* pindex = LookupBlockIndex(hash); |
| 412 | + if (pindex) { |
| 413 | + return pindex; |
| 414 | + } |
| 415 | + } |
| 416 | + return nullptr; |
| 417 | +} |
| 418 | + |
50 | 419 | bool IsBlockPruned(const CBlockIndex* pblockindex)
|
51 | 420 | {
|
52 | 421 | return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
|
|
0 commit comments