|
| 1 | +// Copyright (c) 2018 The Bitcoin Core developers |
| 2 | +// Distributed under the MIT software license, see the accompanying |
| 3 | +// file COPYING or http://www.opensource.org/licenses/mit-license.php. |
| 4 | + |
| 5 | +#include <map> |
| 6 | + |
| 7 | +#include <index/blockfilterindex.h> |
| 8 | +#include <util/system.h> |
| 9 | +#include <validation.h> |
| 10 | + |
| 11 | +/* The index database stores three items for each block: the disk location of the encoded filter, |
| 12 | + * its dSHA256 hash, and the header. Those belonging to blocks on the active chain are indexed by |
| 13 | + * height, and those belonging to blocks that have been reorganized out of the active chain are |
| 14 | + * indexed by block hash. This ensures that filter data for any block that becomes part of the |
| 15 | + * active chain can always be retrieved, alleviating timing concerns. |
| 16 | + * |
| 17 | + * The filters themselves are stored in flat files and referenced by the LevelDB entries. This |
| 18 | + * minimizes the amount of data written to LevelDB and keeps the database values constant size. The |
| 19 | + * disk location of the next block filter to be written (represented as a FlatFilePos) is stored |
| 20 | + * under the DB_FILTER_POS key. |
| 21 | + * |
| 22 | + * Keys for the height index have the type [DB_BLOCK_HEIGHT, uint32 (BE)]. The height is represented |
| 23 | + * as big-endian so that sequential reads of filters by height are fast. |
| 24 | + * Keys for the hash index have the type [DB_BLOCK_HASH, uint256]. |
| 25 | + */ |
| 26 | +constexpr char DB_BLOCK_HASH = 's'; |
| 27 | +constexpr char DB_BLOCK_HEIGHT = 't'; |
| 28 | +constexpr char DB_FILTER_POS = 'P'; |
| 29 | + |
| 30 | +constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB |
| 31 | +/** The pre-allocation chunk size for fltr?????.dat files */ |
| 32 | +constexpr unsigned int FLTR_FILE_CHUNK_SIZE = 0x100000; // 1 MiB |
| 33 | + |
| 34 | +namespace { |
| 35 | + |
| 36 | +struct DBVal { |
| 37 | + uint256 hash; |
| 38 | + uint256 header; |
| 39 | + FlatFilePos pos; |
| 40 | + |
| 41 | + ADD_SERIALIZE_METHODS; |
| 42 | + |
| 43 | + template <typename Stream, typename Operation> |
| 44 | + inline void SerializationOp(Stream& s, Operation ser_action) { |
| 45 | + READWRITE(hash); |
| 46 | + READWRITE(header); |
| 47 | + READWRITE(pos); |
| 48 | + } |
| 49 | +}; |
| 50 | + |
| 51 | +struct DBHeightKey { |
| 52 | + int height; |
| 53 | + |
| 54 | + DBHeightKey() : height(0) {} |
| 55 | + DBHeightKey(int height_in) : height(height_in) {} |
| 56 | + |
| 57 | + template<typename Stream> |
| 58 | + void Serialize(Stream& s) const |
| 59 | + { |
| 60 | + ser_writedata8(s, DB_BLOCK_HEIGHT); |
| 61 | + ser_writedata32be(s, height); |
| 62 | + } |
| 63 | + |
| 64 | + template<typename Stream> |
| 65 | + void Unserialize(Stream& s) |
| 66 | + { |
| 67 | + char prefix = ser_readdata8(s); |
| 68 | + if (prefix != DB_BLOCK_HEIGHT) { |
| 69 | + throw std::ios_base::failure("Invalid format for block filter index DB height key"); |
| 70 | + } |
| 71 | + height = ser_readdata32be(s); |
| 72 | + } |
| 73 | +}; |
| 74 | + |
| 75 | +struct DBHashKey { |
| 76 | + uint256 hash; |
| 77 | + |
| 78 | + DBHashKey(const uint256& hash_in) : hash(hash_in) {} |
| 79 | + |
| 80 | + ADD_SERIALIZE_METHODS; |
| 81 | + |
| 82 | + template <typename Stream, typename Operation> |
| 83 | + inline void SerializationOp(Stream& s, Operation ser_action) { |
| 84 | + char prefix = DB_BLOCK_HASH; |
| 85 | + READWRITE(prefix); |
| 86 | + if (prefix != DB_BLOCK_HASH) { |
| 87 | + throw std::ios_base::failure("Invalid format for block filter index DB hash key"); |
| 88 | + } |
| 89 | + |
| 90 | + READWRITE(hash); |
| 91 | + } |
| 92 | +}; |
| 93 | + |
| 94 | +}; // namespace |
| 95 | + |
| 96 | +BlockFilterIndex::BlockFilterIndex(BlockFilterType filter_type, |
| 97 | + size_t n_cache_size, bool f_memory, bool f_wipe) |
| 98 | + : m_filter_type(filter_type) |
| 99 | +{ |
| 100 | + const std::string& filter_name = BlockFilterTypeName(filter_type); |
| 101 | + if (filter_name.empty()) throw std::invalid_argument("unknown filter_type"); |
| 102 | + |
| 103 | + fs::path path = GetDataDir() / "indexes" / "blockfilter" / filter_name; |
| 104 | + fs::create_directories(path); |
| 105 | + |
| 106 | + m_name = filter_name + " block filter index"; |
| 107 | + m_db = MakeUnique<BaseIndex::DB>(path / "db", n_cache_size, f_memory, f_wipe); |
| 108 | + m_filter_fileseq = MakeUnique<FlatFileSeq>(std::move(path), "fltr", FLTR_FILE_CHUNK_SIZE); |
| 109 | +} |
| 110 | + |
| 111 | +bool BlockFilterIndex::Init() |
| 112 | +{ |
| 113 | + if (!m_db->Read(DB_FILTER_POS, m_next_filter_pos)) { |
| 114 | + // Check that the cause of the read failure is that the key does not exist. Any other errors |
| 115 | + // indicate database corruption or a disk failure, and starting the index would cause |
| 116 | + // further corruption. |
| 117 | + if (m_db->Exists(DB_FILTER_POS)) { |
| 118 | + return error("%s: Cannot read current %s state; index may be corrupted", |
| 119 | + __func__, GetName()); |
| 120 | + } |
| 121 | + |
| 122 | + // If the DB_FILTER_POS is not set, then initialize to the first location. |
| 123 | + m_next_filter_pos.nFile = 0; |
| 124 | + m_next_filter_pos.nPos = 0; |
| 125 | + } |
| 126 | + return BaseIndex::Init(); |
| 127 | +} |
| 128 | + |
| 129 | +bool BlockFilterIndex::CommitInternal(CDBBatch& batch) |
| 130 | +{ |
| 131 | + const FlatFilePos& pos = m_next_filter_pos; |
| 132 | + |
| 133 | + // Flush current filter file to disk. |
| 134 | + CAutoFile file(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); |
| 135 | + if (file.IsNull()) { |
| 136 | + return error("%s: Failed to open filter file %d", __func__, pos.nFile); |
| 137 | + } |
| 138 | + if (!FileCommit(file.Get())) { |
| 139 | + return error("%s: Failed to commit filter file %d", __func__, pos.nFile); |
| 140 | + } |
| 141 | + |
| 142 | + batch.Write(DB_FILTER_POS, pos); |
| 143 | + return BaseIndex::CommitInternal(batch); |
| 144 | +} |
| 145 | + |
| 146 | +size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& filter) |
| 147 | +{ |
| 148 | + assert(filter.GetFilterType() == GetFilterType()); |
| 149 | + |
| 150 | + size_t data_size = |
| 151 | + GetSerializeSize(filter.GetBlockHash(), CLIENT_VERSION) + |
| 152 | + GetSerializeSize(filter.GetEncodedFilter(), CLIENT_VERSION); |
| 153 | + |
| 154 | + // If writing the filter would overflow the file, flush and move to the next one. |
| 155 | + if (pos.nPos + data_size > MAX_FLTR_FILE_SIZE) { |
| 156 | + CAutoFile last_file(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); |
| 157 | + if (last_file.IsNull()) { |
| 158 | + LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); |
| 159 | + return 0; |
| 160 | + } |
| 161 | + if (!TruncateFile(last_file.Get(), pos.nPos)) { |
| 162 | + LogPrintf("%s: Failed to truncate filter file %d\n", __func__, pos.nFile); |
| 163 | + return 0; |
| 164 | + } |
| 165 | + if (!FileCommit(last_file.Get())) { |
| 166 | + LogPrintf("%s: Failed to commit filter file %d\n", __func__, pos.nFile); |
| 167 | + return 0; |
| 168 | + } |
| 169 | + |
| 170 | + pos.nFile++; |
| 171 | + pos.nPos = 0; |
| 172 | + } |
| 173 | + |
| 174 | + // Pre-allocate sufficient space for filter data. |
| 175 | + bool out_of_space; |
| 176 | + m_filter_fileseq->Allocate(pos, data_size, out_of_space); |
| 177 | + if (out_of_space) { |
| 178 | + LogPrintf("%s: out of disk space\n", __func__); |
| 179 | + return 0; |
| 180 | + } |
| 181 | + |
| 182 | + CAutoFile fileout(m_filter_fileseq->Open(pos), SER_DISK, CLIENT_VERSION); |
| 183 | + if (fileout.IsNull()) { |
| 184 | + LogPrintf("%s: Failed to open filter file %d\n", __func__, pos.nFile); |
| 185 | + return 0; |
| 186 | + } |
| 187 | + |
| 188 | + fileout << filter.GetBlockHash() << filter.GetEncodedFilter(); |
| 189 | + return data_size; |
| 190 | +} |
| 191 | + |
| 192 | +bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) |
| 193 | +{ |
| 194 | + CBlockUndo block_undo; |
| 195 | + uint256 prev_header; |
| 196 | + |
| 197 | + if (pindex->nHeight > 0) { |
| 198 | + if (!UndoReadFromDisk(block_undo, pindex)) { |
| 199 | + return false; |
| 200 | + } |
| 201 | + |
| 202 | + std::pair<uint256, DBVal> read_out; |
| 203 | + if (!m_db->Read(DBHeightKey(pindex->nHeight - 1), read_out)) { |
| 204 | + return false; |
| 205 | + } |
| 206 | + |
| 207 | + uint256 expected_block_hash = pindex->pprev->GetBlockHash(); |
| 208 | + if (read_out.first != expected_block_hash) { |
| 209 | + return error("%s: previous block header belongs to unexpected block %s; expected %s", |
| 210 | + __func__, read_out.first.ToString(), expected_block_hash.ToString()); |
| 211 | + } |
| 212 | + |
| 213 | + prev_header = read_out.second.header; |
| 214 | + } |
| 215 | + |
| 216 | + BlockFilter filter(m_filter_type, block, block_undo); |
| 217 | + |
| 218 | + size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter); |
| 219 | + if (bytes_written == 0) return false; |
| 220 | + |
| 221 | + std::pair<uint256, DBVal> value; |
| 222 | + value.first = pindex->GetBlockHash(); |
| 223 | + value.second.hash = filter.GetHash(); |
| 224 | + value.second.header = filter.ComputeHeader(prev_header); |
| 225 | + value.second.pos = m_next_filter_pos; |
| 226 | + |
| 227 | + if (!m_db->Write(DBHeightKey(pindex->nHeight), value)) { |
| 228 | + return false; |
| 229 | + } |
| 230 | + |
| 231 | + m_next_filter_pos.nPos += bytes_written; |
| 232 | + return true; |
| 233 | +} |
| 234 | + |
| 235 | +static bool CopyHeightIndexToHashIndex(CDBIterator& db_it, CDBBatch& batch, |
| 236 | + const std::string& index_name, |
| 237 | + int start_height, int stop_height) |
| 238 | +{ |
| 239 | + DBHeightKey key(start_height); |
| 240 | + db_it.Seek(key); |
| 241 | + |
| 242 | + for (int height = start_height; height <= stop_height; ++height) { |
| 243 | + if (!db_it.GetKey(key) || key.height != height) { |
| 244 | + return error("%s: unexpected key in %s: expected (%c, %d)", |
| 245 | + __func__, index_name, DB_BLOCK_HEIGHT, height); |
| 246 | + } |
| 247 | + |
| 248 | + std::pair<uint256, DBVal> value; |
| 249 | + if (!db_it.GetValue(value)) { |
| 250 | + return error("%s: unable to read value in %s at key (%c, %d)", |
| 251 | + __func__, index_name, DB_BLOCK_HEIGHT, height); |
| 252 | + } |
| 253 | + |
| 254 | + batch.Write(DBHashKey(value.first), std::move(value.second)); |
| 255 | + |
| 256 | + db_it.Next(); |
| 257 | + } |
| 258 | + return true; |
| 259 | +} |
| 260 | + |
| 261 | +bool BlockFilterIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip) |
| 262 | +{ |
| 263 | + assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); |
| 264 | + |
| 265 | + CDBBatch batch(*m_db); |
| 266 | + std::unique_ptr<CDBIterator> db_it(m_db->NewIterator()); |
| 267 | + |
| 268 | + // During a reorg, we need to copy all filters for blocks that are getting disconnected from the |
| 269 | + // height index to the hash index so we can still find them when the height index entries are |
| 270 | + // overwritten. |
| 271 | + if (!CopyHeightIndexToHashIndex(*db_it, batch, m_name, new_tip->nHeight, current_tip->nHeight)) { |
| 272 | + return false; |
| 273 | + } |
| 274 | + |
| 275 | + // The latest filter position gets written in Commit by the call to the BaseIndex::Rewind. |
| 276 | + // But since this creates new references to the filter, the position should get updated here |
| 277 | + // atomically as well in case Commit fails. |
| 278 | + batch.Write(DB_FILTER_POS, m_next_filter_pos); |
| 279 | + if (!m_db->WriteBatch(batch)) return false; |
| 280 | + |
| 281 | + return BaseIndex::Rewind(current_tip, new_tip); |
| 282 | +} |
0 commit comments