|
| 1 | +import * as path from 'path'; |
| 2 | +import * as fs from 'fs'; |
| 3 | +import { cycleMigrations, dangerousDropAllTables, PgDataStore } from '../datastore/postgres-store'; |
| 4 | +import { startEventServer } from '../event-stream/event-server'; |
| 5 | +import { getApiConfiguredChainID, httpPostRequest, logger } from '../helpers'; |
| 6 | +import { findTsvBlockHeight, getDbBlockHeight } from './helpers'; |
| 7 | + |
| 8 | +enum EventImportMode { |
| 9 | + /** |
| 10 | + * The Event Server will ingest and process every single Stacks node event contained in the TSV file |
| 11 | + * from block 0 to the latest block. This is the default mode. |
| 12 | + */ |
| 13 | + archival = 'archival', |
| 14 | + /** |
| 15 | + * The Event Server will ingore certain "prunable" events (see `PRUNABLE_EVENT_PATHS`) from |
| 16 | + * the imported TSV file if they are received outside of a block window, usually set to |
| 17 | + * TSV's `block_height` - 256. |
| 18 | + * This allows the import to be faster at the expense of historical blockchain information. |
| 19 | + */ |
| 20 | + pruned = 'pruned', |
| 21 | +} |
| 22 | + |
| 23 | +/** |
| 24 | + * Event paths that will be ignored during `EventImportMode.pruned` if received outside of the |
| 25 | + * pruned block window. |
| 26 | + */ |
| 27 | +const PRUNABLE_EVENT_PATHS = ['/new_mempool_tx', '/drop_mempool_tx', '/new_microblocks']; |
| 28 | + |
| 29 | +/** |
| 30 | + * Exports all Stacks node events stored in the `event_observer_requests` table to a TSV file. |
| 31 | + * @param filePath - Path to TSV file to write |
| 32 | + * @param overwriteFile - If we should overwrite the file |
| 33 | + */ |
| 34 | +export async function exportEventsAsTsv( |
| 35 | + filePath?: string, |
| 36 | + overwriteFile: boolean = false |
| 37 | +): Promise<void> { |
| 38 | + if (!filePath) { |
| 39 | + throw new Error(`A file path should be specified with the --file option`); |
| 40 | + } |
| 41 | + const resolvedFilePath = path.resolve(filePath); |
| 42 | + if (fs.existsSync(resolvedFilePath) && overwriteFile !== true) { |
| 43 | + throw new Error( |
| 44 | + `A file already exists at ${resolvedFilePath}. Add --overwrite-file to truncate an existing file` |
| 45 | + ); |
| 46 | + } |
| 47 | + console.log(`Export event data to file: ${resolvedFilePath}`); |
| 48 | + const writeStream = fs.createWriteStream(resolvedFilePath); |
| 49 | + console.log(`Export started...`); |
| 50 | + await PgDataStore.exportRawEventRequests(writeStream); |
| 51 | + console.log('Export successful.'); |
| 52 | +} |
| 53 | + |
| 54 | +/** |
| 55 | + * Imports Stacks node events from a TSV file and ingests them through the Event Server. |
| 56 | + * @param filePath - Path to TSV file to read |
| 57 | + * @param importMode - Event import mode |
| 58 | + * @param wipeDb - If we should wipe the DB before importing |
| 59 | + * @param force - If we should force drop all tables |
| 60 | + */ |
| 61 | +export async function importEventsFromTsv( |
| 62 | + filePath?: string, |
| 63 | + importMode?: string, |
| 64 | + wipeDb: boolean = false, |
| 65 | + force: boolean = false |
| 66 | +): Promise<void> { |
| 67 | + if (!filePath) { |
| 68 | + throw new Error(`A file path should be specified with the --file option`); |
| 69 | + } |
| 70 | + const resolvedFilePath = path.resolve(filePath); |
| 71 | + if (!fs.existsSync(resolvedFilePath)) { |
| 72 | + throw new Error(`File does not exist: ${resolvedFilePath}`); |
| 73 | + } |
| 74 | + let eventImportMode: EventImportMode; |
| 75 | + switch (importMode) { |
| 76 | + case 'pruned': |
| 77 | + eventImportMode = EventImportMode.pruned; |
| 78 | + break; |
| 79 | + case 'archival': |
| 80 | + case undefined: |
| 81 | + eventImportMode = EventImportMode.archival; |
| 82 | + break; |
| 83 | + default: |
| 84 | + throw new Error(`Invalid event import mode: ${importMode}`); |
| 85 | + } |
| 86 | + const hasData = await PgDataStore.containsAnyRawEventRequests(); |
| 87 | + if (!wipeDb && hasData) { |
| 88 | + throw new Error(`Database contains existing data. Add --wipe-db to drop the existing tables.`); |
| 89 | + } |
| 90 | + if (force) { |
| 91 | + await dangerousDropAllTables({ acknowledgePotentialCatastrophicConsequences: 'yes' }); |
| 92 | + } |
| 93 | + |
| 94 | + // This performs a "migration down" which drops the tables, then re-creates them. |
| 95 | + // If there's a breaking change in the migration files, this will throw, and the pg database needs wiped manually, |
| 96 | + // or the `--force` option can be used. |
| 97 | + await cycleMigrations({ dangerousAllowDataLoss: true }); |
| 98 | + |
| 99 | + // Look for the TSV's block height and determine the prunable block window. |
| 100 | + const tsvBlockHeight = await findTsvBlockHeight(resolvedFilePath); |
| 101 | + const blockWindowSize = parseInt( |
| 102 | + process.env['STACKS_MEMPOOL_TX_GARBAGE_COLLECTION_THRESHOLD'] ?? '256' |
| 103 | + ); |
| 104 | + const prunedBlockHeight = Math.max(tsvBlockHeight - blockWindowSize, 0); |
| 105 | + console.log(`Event file's block height: ${tsvBlockHeight}`); |
| 106 | + console.log(`Starting event import and playback in ${eventImportMode} mode`); |
| 107 | + if (eventImportMode === EventImportMode.pruned) { |
| 108 | + console.log(`Ignoring all prunable events before block height: ${prunedBlockHeight}`); |
| 109 | + } |
| 110 | + |
| 111 | + const db = await PgDataStore.connect({ |
| 112 | + usageName: 'import-events', |
| 113 | + skipMigrations: true, |
| 114 | + withNotifier: false, |
| 115 | + eventReplay: true, |
| 116 | + }); |
| 117 | + const eventServer = await startEventServer({ |
| 118 | + datastore: db, |
| 119 | + chainId: getApiConfiguredChainID(), |
| 120 | + serverHost: '127.0.0.1', |
| 121 | + serverPort: 0, |
| 122 | + httpLogLevel: 'debug', |
| 123 | + }); |
| 124 | + |
| 125 | + const readStream = fs.createReadStream(resolvedFilePath); |
| 126 | + const rawEventsIterator = PgDataStore.getRawEventRequests(readStream, status => { |
| 127 | + console.log(status); |
| 128 | + }); |
| 129 | + // Set logger to only output for warnings/errors, otherwise the event replay will result |
| 130 | + // in the equivalent of months/years of API log output. |
| 131 | + logger.level = 'warn'; |
| 132 | + // Disable this feature so a redundant export file isn't created while importing from an existing one. |
| 133 | + delete process.env['STACKS_EXPORT_EVENTS_FILE']; |
| 134 | + // The current import block height. Will be updated with every `/new_block` event. |
| 135 | + let blockHeight = 0; |
| 136 | + let isPruneFinished = false; |
| 137 | + for await (const rawEvents of rawEventsIterator) { |
| 138 | + for (const rawEvent of rawEvents) { |
| 139 | + if (eventImportMode === EventImportMode.pruned) { |
| 140 | + if (PRUNABLE_EVENT_PATHS.includes(rawEvent.event_path) && blockHeight < prunedBlockHeight) { |
| 141 | + // Prunable events are ignored here. |
| 142 | + continue; |
| 143 | + } |
| 144 | + if (blockHeight == prunedBlockHeight && !isPruneFinished) { |
| 145 | + isPruneFinished = true; |
| 146 | + console.log(`Resuming prunable event import...`); |
| 147 | + } |
| 148 | + } |
| 149 | + await httpPostRequest({ |
| 150 | + host: '127.0.0.1', |
| 151 | + port: eventServer.serverAddress.port, |
| 152 | + path: rawEvent.event_path, |
| 153 | + headers: { 'Content-Type': 'application/json' }, |
| 154 | + body: Buffer.from(rawEvent.payload, 'utf8'), |
| 155 | + throwOnNotOK: true, |
| 156 | + }); |
| 157 | + if (rawEvent.event_path === '/new_block') { |
| 158 | + blockHeight = await getDbBlockHeight(db); |
| 159 | + } |
| 160 | + } |
| 161 | + } |
| 162 | + await db.finishEventReplay(); |
| 163 | + console.log(`Event import and playback successful.`); |
| 164 | + await eventServer.closeAsync(); |
| 165 | + await db.close(); |
| 166 | +} |
0 commit comments