|
| 1 | +// Copyright (c) 2022 The Bitcoin Core developers |
| 2 | +// Distributed under the MIT software license, see the accompanying |
| 3 | +// file COPYING or http://www.opensource.org/licenses/mit-license.php. |
| 4 | + |
| 5 | +#include <headerssync.h> |
| 6 | +#include <logging.h> |
| 7 | +#include <pow.h> |
| 8 | +#include <timedata.h> |
| 9 | +#include <util/check.h> |
| 10 | + |
| 11 | +// The two constants below are computed using the simulation script on |
| 12 | +// https://gist.github.com/sipa/016ae445c132cdf65a2791534dfb7ae1 |
| 13 | + |
| 14 | +//! Store a commitment to a header every HEADER_COMMITMENT_PERIOD blocks. |
| 15 | +constexpr size_t HEADER_COMMITMENT_PERIOD{584}; |
| 16 | + |
| 17 | +//! Only feed headers to validation once this many headers on top have been |
| 18 | +//! received and validated against commitments. |
| 19 | +constexpr size_t REDOWNLOAD_BUFFER_SIZE{13959}; // 13959/584 = ~23.9 commitments |
| 20 | + |
| 21 | +// Our memory analysis assumes 48 bytes for a CompressedHeader (so we should |
| 22 | +// re-calculate parameters if we compress further) |
| 23 | +static_assert(sizeof(CompressedHeader) == 48); |
| 24 | + |
| 25 | +HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, |
| 26 | + const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) : |
| 27 | + m_id(id), m_consensus_params(consensus_params), |
| 28 | + m_chain_start(chain_start), |
| 29 | + m_minimum_required_work(minimum_required_work), |
| 30 | + m_current_chain_work(chain_start->nChainWork), |
| 31 | + m_commit_offset(GetRand<unsigned>(HEADER_COMMITMENT_PERIOD)), |
| 32 | + m_last_header_received(m_chain_start->GetBlockHeader()), |
| 33 | + m_current_height(chain_start->nHeight) |
| 34 | +{ |
| 35 | + // Estimate the number of blocks that could possibly exist on the peer's |
| 36 | + // chain *right now* using 6 blocks/second (fastest blockrate given the MTP |
| 37 | + // rule) times the number of seconds from the last allowed block until |
| 38 | + // today. This serves as a memory bound on how many commitments we might |
| 39 | + // store from this peer, and we can safely give up syncing if the peer |
| 40 | + // exceeds this bound, because it's not possible for a consensus-valid |
| 41 | + // chain to be longer than this (at the current time -- in the future we |
| 42 | + // could try again, if necessary, to sync a longer chain). |
| 43 | + m_max_commitments = 6*(Ticks<std::chrono::seconds>(GetAdjustedTime() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD; |
| 44 | + |
| 45 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); |
| 46 | +} |
| 47 | + |
| 48 | +/** Free any memory in use, and mark this object as no longer usable. This is |
| 49 | + * required to guarantee that we won't reuse this object with the same |
| 50 | + * SaltedTxidHasher for another sync. */ |
| 51 | +void HeadersSyncState::Finalize() |
| 52 | +{ |
| 53 | + Assume(m_download_state != State::FINAL); |
| 54 | + m_header_commitments = {}; |
| 55 | + m_last_header_received.SetNull(); |
| 56 | + m_redownloaded_headers = {}; |
| 57 | + m_redownload_buffer_last_hash.SetNull(); |
| 58 | + m_redownload_buffer_first_prev_hash.SetNull(); |
| 59 | + m_process_all_remaining_headers = false; |
| 60 | + m_current_height = 0; |
| 61 | + |
| 62 | + m_download_state = State::FINAL; |
| 63 | +} |
| 64 | + |
| 65 | +/** Process the next batch of headers received from our peer. |
| 66 | + * Validate and store commitments, and compare total chainwork to our target to |
| 67 | + * see if we can switch to REDOWNLOAD mode. */ |
| 68 | +HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const |
| 69 | + std::vector<CBlockHeader>& received_headers, const bool full_headers_message) |
| 70 | +{ |
| 71 | + ProcessingResult ret; |
| 72 | + |
| 73 | + Assume(!received_headers.empty()); |
| 74 | + if (received_headers.empty()) return ret; |
| 75 | + |
| 76 | + Assume(m_download_state != State::FINAL); |
| 77 | + if (m_download_state == State::FINAL) return ret; |
| 78 | + |
| 79 | + if (m_download_state == State::PRESYNC) { |
| 80 | + // During PRESYNC, we minimally validate block headers and |
| 81 | + // occasionally add commitments to them, until we reach our work |
| 82 | + // threshold (at which point m_download_state is updated to REDOWNLOAD). |
| 83 | + ret.success = ValidateAndStoreHeadersCommitments(received_headers); |
| 84 | + if (ret.success) { |
| 85 | + if (full_headers_message || m_download_state == State::REDOWNLOAD) { |
| 86 | + // A full headers message means the peer may have more to give us; |
| 87 | + // also if we just switched to REDOWNLOAD then we need to re-request |
| 88 | + // headers from the beginning. |
| 89 | + ret.request_more = true; |
| 90 | + } else { |
| 91 | + Assume(m_download_state == State::PRESYNC); |
| 92 | + // If we're in PRESYNC and we get a non-full headers |
| 93 | + // message, then the peer's chain has ended and definitely doesn't |
| 94 | + // have enough work, so we can stop our sync. |
| 95 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height); |
| 96 | + } |
| 97 | + } |
| 98 | + } else if (m_download_state == State::REDOWNLOAD) { |
| 99 | + // During REDOWNLOAD, we compare our stored commitments to what we |
| 100 | + // receive, and add headers to our redownload buffer. When the buffer |
| 101 | + // gets big enough (meaning that we've checked enough commitments), |
| 102 | + // we'll return a batch of headers to the caller for processing. |
| 103 | + ret.success = true; |
| 104 | + for (const auto& hdr : received_headers) { |
| 105 | + if (!ValidateAndStoreRedownloadedHeader(hdr)) { |
| 106 | + // Something went wrong -- the peer gave us an unexpected chain. |
| 107 | + // We could consider looking at the reason for failure and |
| 108 | + // punishing the peer, but for now just give up on sync. |
| 109 | + ret.success = false; |
| 110 | + break; |
| 111 | + } |
| 112 | + } |
| 113 | + |
| 114 | + if (ret.success) { |
| 115 | + // Return any headers that are ready for acceptance. |
| 116 | + ret.pow_validated_headers = PopHeadersReadyForAcceptance(); |
| 117 | + |
| 118 | + // If we hit our target blockhash, then all remaining headers will be |
| 119 | + // returned and we can clear any leftover internal state. |
| 120 | + if (m_redownloaded_headers.empty() && m_process_all_remaining_headers) { |
| 121 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); |
| 122 | + } else if (full_headers_message) { |
| 123 | + // If the headers message is full, we need to request more. |
| 124 | + ret.request_more = true; |
| 125 | + } else { |
| 126 | + // For some reason our peer gave us a high-work chain, but is now |
| 127 | + // declining to serve us that full chain again. Give up. |
| 128 | + // Note that there's no more processing to be done with these |
| 129 | + // headers, so we can still return success. |
| 130 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); |
| 131 | + } |
| 132 | + } |
| 133 | + } |
| 134 | + |
| 135 | + if (!(ret.success && ret.request_more)) Finalize(); |
| 136 | + return ret; |
| 137 | +} |
| 138 | + |
| 139 | +bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector<CBlockHeader>& headers) |
| 140 | +{ |
| 141 | + // The caller should not give us an empty set of headers. |
| 142 | + Assume(headers.size() > 0); |
| 143 | + if (headers.size() == 0) return true; |
| 144 | + |
| 145 | + Assume(m_download_state == State::PRESYNC); |
| 146 | + if (m_download_state != State::PRESYNC) return false; |
| 147 | + |
| 148 | + if (headers[0].hashPrevBlock != m_last_header_received.GetHash()) { |
| 149 | + // Somehow our peer gave us a header that doesn't connect. |
| 150 | + // This might be benign -- perhaps our peer reorged away from the chain |
| 151 | + // they were on. Give up on this sync for now (likely we will start a |
| 152 | + // new sync with a new starting point). |
| 153 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height); |
| 154 | + return false; |
| 155 | + } |
| 156 | + |
| 157 | + // If it does connect, (minimally) validate and occasionally store |
| 158 | + // commitments. |
| 159 | + for (const auto& hdr : headers) { |
| 160 | + if (!ValidateAndProcessSingleHeader(hdr)) { |
| 161 | + return false; |
| 162 | + } |
| 163 | + } |
| 164 | + |
| 165 | + if (m_current_chain_work >= m_minimum_required_work) { |
| 166 | + m_redownloaded_headers.clear(); |
| 167 | + m_redownload_buffer_last_height = m_chain_start->nHeight; |
| 168 | + m_redownload_buffer_first_prev_hash = m_chain_start->GetBlockHash(); |
| 169 | + m_redownload_buffer_last_hash = m_chain_start->GetBlockHash(); |
| 170 | + m_redownload_chain_work = m_chain_start->nChainWork; |
| 171 | + m_download_state = State::REDOWNLOAD; |
| 172 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height); |
| 173 | + } |
| 174 | + return true; |
| 175 | +} |
| 176 | + |
| 177 | +bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& current) |
| 178 | +{ |
| 179 | + Assume(m_download_state == State::PRESYNC); |
| 180 | + if (m_download_state != State::PRESYNC) return false; |
| 181 | + |
| 182 | + int next_height = m_current_height + 1; |
| 183 | + |
| 184 | + // Verify that the difficulty isn't growing too fast; an adversary with |
| 185 | + // limited hashing capability has a greater chance of producing a high |
| 186 | + // work chain if they compress the work into as few blocks as possible, |
| 187 | + // so don't let anyone give a chain that would violate the difficulty |
| 188 | + // adjustment maximum. |
| 189 | + if (!PermittedDifficultyTransition(m_consensus_params, next_height, |
| 190 | + m_last_header_received.nBits, current.nBits)) { |
| 191 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height); |
| 192 | + return false; |
| 193 | + } |
| 194 | + |
| 195 | + if (next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { |
| 196 | + // Add a commitment. |
| 197 | + m_header_commitments.push_back(m_hasher(current.GetHash()) & 1); |
| 198 | + if (m_header_commitments.size() > m_max_commitments) { |
| 199 | + // The peer's chain is too long; give up. |
| 200 | + // It's possible the chain grew since we started the sync; so |
| 201 | + // potentially we could succeed in syncing the peer's chain if we |
| 202 | + // try again later. |
| 203 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height); |
| 204 | + return false; |
| 205 | + } |
| 206 | + } |
| 207 | + |
| 208 | + m_current_chain_work += GetBlockProof(CBlockIndex(current)); |
| 209 | + m_last_header_received = current; |
| 210 | + m_current_height = next_height; |
| 211 | + |
| 212 | + return true; |
| 213 | +} |
| 214 | + |
| 215 | +bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& header) |
| 216 | +{ |
| 217 | + Assume(m_download_state == State::REDOWNLOAD); |
| 218 | + if (m_download_state != State::REDOWNLOAD) return false; |
| 219 | + |
| 220 | + int64_t next_height = m_redownload_buffer_last_height + 1; |
| 221 | + |
| 222 | + // Ensure that we're working on a header that connects to the chain we're |
| 223 | + // downloading. |
| 224 | + if (header.hashPrevBlock != m_redownload_buffer_last_hash) { |
| 225 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height); |
| 226 | + return false; |
| 227 | + } |
| 228 | + |
| 229 | + // Check that the difficulty adjustments are within our tolerance: |
| 230 | + uint32_t previous_nBits{0}; |
| 231 | + if (!m_redownloaded_headers.empty()) { |
| 232 | + previous_nBits = m_redownloaded_headers.back().nBits; |
| 233 | + } else { |
| 234 | + previous_nBits = m_chain_start->nBits; |
| 235 | + } |
| 236 | + |
| 237 | + if (!PermittedDifficultyTransition(m_consensus_params, next_height, |
| 238 | + previous_nBits, header.nBits)) { |
| 239 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height); |
| 240 | + return false; |
| 241 | + } |
| 242 | + |
| 243 | + // Track work on the redownloaded chain |
| 244 | + m_redownload_chain_work += GetBlockProof(CBlockIndex(header)); |
| 245 | + |
| 246 | + if (m_redownload_chain_work >= m_minimum_required_work) { |
| 247 | + m_process_all_remaining_headers = true; |
| 248 | + } |
| 249 | + |
| 250 | + // If we're at a header for which we previously stored a commitment, verify |
| 251 | + // it is correct. Failure will result in aborting download. |
| 252 | + // Also, don't check commitments once we've gotten to our target blockhash; |
| 253 | + // it's possible our peer has extended its chain between our first sync and |
| 254 | + // our second, and we don't want to return failure after we've seen our |
| 255 | + // target blockhash just because we ran out of commitments. |
| 256 | + if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { |
| 257 | + if (m_header_commitments.size() == 0) { |
| 258 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); |
| 259 | + // Somehow our peer managed to feed us a different chain and |
| 260 | + // we've run out of commitments. |
| 261 | + return false; |
| 262 | + } |
| 263 | + bool commitment = m_hasher(header.GetHash()) & 1; |
| 264 | + bool expected_commitment = m_header_commitments.front(); |
| 265 | + m_header_commitments.pop_front(); |
| 266 | + if (commitment != expected_commitment) { |
| 267 | + LogPrint(BCLog::HEADERSSYNC, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height); |
| 268 | + return false; |
| 269 | + } |
| 270 | + } |
| 271 | + |
| 272 | + // Store this header for later processing. |
| 273 | + m_redownloaded_headers.push_back(header); |
| 274 | + m_redownload_buffer_last_height = next_height; |
| 275 | + m_redownload_buffer_last_hash = header.GetHash(); |
| 276 | + |
| 277 | + return true; |
| 278 | +} |
| 279 | + |
| 280 | +std::vector<CBlockHeader> HeadersSyncState::PopHeadersReadyForAcceptance() |
| 281 | +{ |
| 282 | + std::vector<CBlockHeader> ret; |
| 283 | + |
| 284 | + Assume(m_download_state == State::REDOWNLOAD); |
| 285 | + if (m_download_state != State::REDOWNLOAD) return ret; |
| 286 | + |
| 287 | + while (m_redownloaded_headers.size() > REDOWNLOAD_BUFFER_SIZE || |
| 288 | + (m_redownloaded_headers.size() > 0 && m_process_all_remaining_headers)) { |
| 289 | + ret.emplace_back(m_redownloaded_headers.front().GetFullHeader(m_redownload_buffer_first_prev_hash)); |
| 290 | + m_redownloaded_headers.pop_front(); |
| 291 | + m_redownload_buffer_first_prev_hash = ret.back().GetHash(); |
| 292 | + } |
| 293 | + return ret; |
| 294 | +} |
| 295 | + |
| 296 | +CBlockLocator HeadersSyncState::NextHeadersRequestLocator() const |
| 297 | +{ |
| 298 | + Assume(m_download_state != State::FINAL); |
| 299 | + if (m_download_state == State::FINAL) return {}; |
| 300 | + |
| 301 | + auto chain_start_locator = LocatorEntries(m_chain_start); |
| 302 | + std::vector<uint256> locator; |
| 303 | + |
| 304 | + if (m_download_state == State::PRESYNC) { |
| 305 | + // During pre-synchronization, we continue from the last header received. |
| 306 | + locator.push_back(m_last_header_received.GetHash()); |
| 307 | + } |
| 308 | + |
| 309 | + if (m_download_state == State::REDOWNLOAD) { |
| 310 | + // During redownload, we will download from the last received header that we stored. |
| 311 | + locator.push_back(m_redownload_buffer_last_hash); |
| 312 | + } |
| 313 | + |
| 314 | + locator.insert(locator.end(), chain_start_locator.begin(), chain_start_locator.end()); |
| 315 | + |
| 316 | + return CBlockLocator{std::move(locator)}; |
| 317 | +} |
0 commit comments