Skip to content

Commit 6cec20d

Browse files
authored
Merge pull request #1357 from hirosystems/6.1.0
6.1.0
2 parents 08887ac + 46ccf06 commit 6cec20d

File tree

8 files changed

+122
-80
lines changed

8 files changed

+122
-80
lines changed

.env

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@ PG_PASSWORD=postgres
55
PG_DATABASE=stacks_blockchain_api
66
PG_SCHEMA=public
77
PG_SSL=false
8+
# Idle connection timeout in seconds, defaults to 30
9+
# PG_IDLE_TIMEOUT=30
10+
# Max connection lifetime in seconds, defaults to 60
11+
# PG_MAX_LIFETIME=60
812

913
# Can be any string, use to specify a use case specific to a deployment
1014
PG_APPLICATION_NAME=stacks-blockchain-api
@@ -27,12 +31,13 @@ PG_APPLICATION_NAME=stacks-blockchain-api
2731
# PG_PRIMARY_DATABASE=
2832
# PG_PRIMARY_SCHEMA=
2933
# PG_PRIMARY_SSL=
34+
# PG_PRIMARY_IDLE_TIMEOUT=
35+
# PG_PRIMARY_MAX_LIFETIME=
3036
# The connection URI below can be used in place of the PG variables above,
3137
# but if enabled it must be defined without others or omitted.
3238
# PG_PRIMARY_CONNECTION_URI=
3339

3440
# Limit to how many concurrent connections can be created, defaults to 10
35-
# See https://node-postgres.com/api/pool
3641
# PG_CONNECTION_POOL_MAX=10
3742

3843
# If specified, controls the Stacks Blockchain API mode. The possible values are:

.gitpod.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM blockstack/stacks-blockchain:2.05.0.3.0-stretch as corenode
1+
FROM blockstack/stacks-blockchain:2.05.0.4.0 as corenode
22

33
FROM gitpod/workspace-postgres
44

docker/stx-rosetta.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
ARG STACKS_API_VERSION=v0.71.2
2-
ARG STACKS_NODE_VERSION=2.05.0.3.0
2+
ARG STACKS_NODE_VERSION=2.05.0.4.0
33
ARG STACKS_API_REPO=blockstack/stacks-blockchain-api
44
ARG STACKS_NODE_REPO=blockstack/stacks-blockchain
55
ARG PG_VERSION=12

src/datastore/connection.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,8 @@ export function getPostgres({
138138
ssl: pgEnvValue('SSL'),
139139
schema: pgEnvValue('SCHEMA'),
140140
applicationName: pgEnvValue('APPLICATION_NAME'),
141+
idleTimeout: parseInt(pgEnvValue('IDLE_TIMEOUT') ?? '30'),
142+
maxLifetime: parseInt(pgEnvValue('MAX_LIFETIME') ?? '60'),
141143
poolMax: parseInt(process.env['PG_CONNECTION_POOL_MAX'] ?? '10'),
142144
};
143145
const defaultAppName = 'stacks-blockchain-api';
@@ -180,6 +182,8 @@ export function getPostgres({
180182
host: pgEnvVars.host,
181183
port: parsePort(pgEnvVars.port),
182184
ssl: parseArgBoolean(pgEnvVars.ssl),
185+
idle_timeout: pgEnvVars.idleTimeout,
186+
max_lifetime: pgEnvVars.maxLifetime,
183187
max: pgEnvVars.poolMax,
184188
types: PG_TYPE_MAPPINGS,
185189
connection: {

src/datastore/pg-write-store.ts

Lines changed: 63 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,8 @@ export class PgWriteStore extends PgStore {
188188
async update(data: DataStoreBlockUpdateData): Promise<void> {
189189
const tokenMetadataQueueEntries: DbTokenMetadataQueueEntry[] = [];
190190
let garbageCollectedMempoolTxs: string[] = [];
191+
let batchedTxData: DataStoreTxEventData[] = [];
192+
191193
await this.sql.begin(async sql => {
192194
const chainTip = await this.getChainTip(sql, false);
193195
await this.handleReorg(sql, data.block, chainTip.blockHeight);
@@ -257,7 +259,7 @@ export class PgWriteStore extends PgStore {
257259
data.block.execution_cost_write_count = totalCost.execution_cost_write_count;
258260
data.block.execution_cost_write_length = totalCost.execution_cost_write_length;
259261

260-
let batchedTxData: DataStoreTxEventData[] = data.txs;
262+
batchedTxData = data.txs;
261263

262264
// Find microblocks that weren't already inserted via the unconfirmed microblock event.
263265
// This happens when a stacks-node is syncing and receives confirmed microblocks with their anchor block at the same time.
@@ -358,8 +360,6 @@ export class PgWriteStore extends PgStore {
358360
await this.updateNames(sql, entry.tx, bnsName);
359361
}
360362
}
361-
await this.refreshNftCustody(sql, batchedTxData);
362-
await this.refreshMaterializedView(sql, 'chain_tip');
363363
const mempoolGarbageResults = await this.deleteGarbageCollectedMempoolTxs(sql);
364364
if (mempoolGarbageResults.deletedTxs.length > 0) {
365365
logger.verbose(
@@ -399,6 +399,10 @@ export class PgWriteStore extends PgStore {
399399
}
400400
});
401401

402+
await this.refreshNftCustody(batchedTxData);
403+
await this.refreshMaterializedView('chain_tip');
404+
await this.refreshMaterializedView('mempool_digest');
405+
402406
// Skip sending `PgNotifier` updates altogether if we're in the genesis block since this block is the
403407
// event replay of the v1 blockchain.
404408
if ((data.block.block_height > 1 || !isProdEnv) && this.notifier) {
@@ -529,6 +533,9 @@ export class PgWriteStore extends PgStore {
529533
}
530534

531535
async updateMicroblocksInternal(data: DataStoreMicroblockUpdateData): Promise<void> {
536+
const txData: DataStoreTxEventData[] = [];
537+
let dbMicroblocks: DbMicroblock[] = [];
538+
532539
await this.sql.begin(async sql => {
533540
// Sanity check: ensure incoming microblocks have a `parent_index_block_hash` that matches the API's
534541
// current known canonical chain tip. We assume this holds true so incoming microblock data is always
@@ -550,7 +557,7 @@ export class PgWriteStore extends PgStore {
550557

551558
// The block height is just one after the current chain tip height
552559
const blockHeight = chainTip.blockHeight + 1;
553-
const dbMicroblocks = data.microblocks.map(mb => {
560+
dbMicroblocks = data.microblocks.map(mb => {
554561
const dbMicroBlock: DbMicroblock = {
555562
canonical: true,
556563
microblock_canonical: true,
@@ -570,8 +577,6 @@ export class PgWriteStore extends PgStore {
570577
return dbMicroBlock;
571578
});
572579

573-
const txs: DataStoreTxEventData[] = [];
574-
575580
for (const entry of data.txs) {
576581
// Note: the properties block_hash and burn_block_time are empty here because the anchor block with that data doesn't yet exist.
577582
const dbTx: DbTx = {
@@ -582,7 +587,7 @@ export class PgWriteStore extends PgStore {
582587

583588
// Set all the `block_height` properties for the related tx objects, since it wasn't known
584589
// when creating the objects using only the stacks-node message payload.
585-
txs.push({
590+
txData.push({
586591
tx: dbTx,
587592
stxEvents: entry.stxEvents.map(e => ({ ...e, block_height: blockHeight })),
588593
contractLogEvents: entry.contractLogEvents.map(e => ({
@@ -598,7 +603,7 @@ export class PgWriteStore extends PgStore {
598603
});
599604
}
600605

601-
await this.insertMicroblockData(sql, dbMicroblocks, txs);
606+
await this.insertMicroblockData(sql, dbMicroblocks, txData);
602607

603608
// Find any microblocks that have been orphaned by this latest microblock chain tip.
604609
// This function also checks that each microblock parent hash points to an existing microblock in the db.
@@ -646,24 +651,25 @@ export class PgWriteStore extends PgStore {
646651
);
647652
}
648653

649-
await this.refreshNftCustody(sql, txs, true);
650-
await this.refreshMaterializedView(sql, 'chain_tip');
651-
652654
if (!this.isEventReplay) {
653655
const mempoolStats = await this.getMempoolStatsInternal({ sql });
654656
this.eventEmitter.emit('mempoolStatsUpdate', mempoolStats);
655657
}
658+
});
656659

657-
if (this.notifier) {
658-
for (const microblock of dbMicroblocks) {
659-
await this.notifier.sendMicroblock({ microblockHash: microblock.microblock_hash });
660-
}
661-
for (const tx of txs) {
662-
await this.notifier.sendTx({ txId: tx.tx.tx_id });
663-
}
664-
await this.emitAddressTxUpdates(txs);
660+
await this.refreshNftCustody(txData, true);
661+
await this.refreshMaterializedView('chain_tip');
662+
await this.refreshMaterializedView('mempool_digest');
663+
664+
if (this.notifier) {
665+
for (const microblock of dbMicroblocks) {
666+
await this.notifier.sendMicroblock({ microblockHash: microblock.microblock_hash });
665667
}
666-
});
668+
for (const tx of txData) {
669+
await this.notifier.sendTx({ txId: tx.tx.tx_id });
670+
}
671+
await this.emitAddressTxUpdates(txData);
672+
}
667673
}
668674

669675
async updateStxLockEvent(sql: PgSqlClient, tx: DbTx, event: DbStxLockEvent) {
@@ -1313,13 +1319,12 @@ export class PgWriteStore extends PgStore {
13131319
updatedTxs.push(tx);
13141320
}
13151321
}
1316-
await this.refreshMaterializedView(sql, 'mempool_digest');
1317-
13181322
if (!this.isEventReplay) {
13191323
const mempoolStats = await this.getMempoolStatsInternal({ sql });
13201324
this.eventEmitter.emit('mempoolStatsUpdate', mempoolStats);
13211325
}
13221326
});
1327+
await this.refreshMaterializedView('mempool_digest');
13231328
for (const tx of updatedTxs) {
13241329
await this.notifier?.sendTx({ txId: tx.tx_id });
13251330
}
@@ -1335,8 +1340,8 @@ export class PgWriteStore extends PgStore {
13351340
RETURNING ${sql(MEMPOOL_TX_COLUMNS)}
13361341
`;
13371342
updatedTxs = updateResults.map(r => parseMempoolTxQueryResult(r));
1338-
await this.refreshMaterializedView(sql, 'mempool_digest');
13391343
});
1344+
await this.refreshMaterializedView('mempool_digest');
13401345
for (const tx of updatedTxs) {
13411346
await this.notifier?.sendTx({ txId: tx.tx_id });
13421347
}
@@ -1964,7 +1969,6 @@ export class PgWriteStore extends PgStore {
19641969
WHERE tx_id IN ${sql(txIds)}
19651970
RETURNING tx_id
19661971
`;
1967-
await this.refreshMaterializedView(sql, 'mempool_digest');
19681972
const restoredTxs = updateResults.map(r => r.tx_id);
19691973
return { restoredTxs: restoredTxs };
19701974
}
@@ -1988,7 +1992,6 @@ export class PgWriteStore extends PgStore {
19881992
WHERE tx_id IN ${sql(txIds)}
19891993
RETURNING tx_id
19901994
`;
1991-
await this.refreshMaterializedView(sql, 'mempool_digest');
19921995
const removedTxs = updateResults.map(r => r.tx_id);
19931996
return { removedTxs: removedTxs };
19941997
}
@@ -2002,7 +2005,9 @@ export class PgWriteStore extends PgStore {
20022005
// Get threshold block.
20032006
const blockThreshold = process.env['STACKS_MEMPOOL_TX_GARBAGE_COLLECTION_THRESHOLD'] ?? 256;
20042007
const cutoffResults = await sql<{ block_height: number }[]>`
2005-
SELECT (block_height - ${blockThreshold}) AS block_height FROM chain_tip
2008+
SELECT (MAX(block_height) - ${blockThreshold}) AS block_height
2009+
FROM blocks
2010+
WHERE canonical = TRUE
20062011
`;
20072012
if (cutoffResults.length != 1) {
20082013
return { deletedTxs: [] };
@@ -2016,7 +2021,6 @@ export class PgWriteStore extends PgStore {
20162021
WHERE pruned = FALSE AND receipt_block_height < ${cutoffBlockHeight}
20172022
RETURNING tx_id
20182023
`;
2019-
await this.refreshMaterializedView(sql, 'mempool_digest');
20202024
const deletedTxs = deletedTxResults.map(r => r.tx_id);
20212025
for (const txId of deletedTxs) {
20222026
await this.notifier?.sendTx({ txId: txId });
@@ -2437,11 +2441,12 @@ export class PgWriteStore extends PgStore {
24372441

24382442
/**
24392443
* Refreshes a Postgres materialized view.
2440-
* @param sql - Pg Client
24412444
* @param viewName - Materialized view name
2445+
* @param sql - Pg scoped client. Will use the default client if none specified
24422446
* @param skipDuringEventReplay - If we should skip refreshing during event replay
24432447
*/
2444-
async refreshMaterializedView(sql: PgSqlClient, viewName: string, skipDuringEventReplay = true) {
2448+
async refreshMaterializedView(viewName: string, sql?: PgSqlClient, skipDuringEventReplay = true) {
2449+
sql = sql ?? this.sql;
24452450
if (this.isEventReplay && skipDuringEventReplay) {
24462451
return;
24472452
}
@@ -2454,34 +2459,32 @@ export class PgWriteStore extends PgStore {
24542459
* @param txs - Transaction event data
24552460
* @param unanchored - If this refresh is requested from a block or microblock
24562461
*/
2457-
async refreshNftCustody(
2458-
sql: PgSqlClient,
2459-
txs: DataStoreTxEventData[],
2460-
unanchored: boolean = false
2461-
) {
2462-
const newNftEventCount = txs
2463-
.map(tx => tx.nftEvents.length)
2464-
.reduce((prev, cur) => prev + cur, 0);
2465-
if (newNftEventCount > 0) {
2466-
// Always refresh unanchored view since even if we're in a new anchored block we should update the
2467-
// unanchored state to the current one.
2468-
await this.refreshMaterializedView(sql, 'nft_custody_unanchored');
2469-
if (!unanchored) {
2470-
await this.refreshMaterializedView(sql, 'nft_custody');
2471-
}
2472-
} else if (!unanchored) {
2473-
// Even if we didn't receive new NFT events in a new anchor block, we should check if we need to
2474-
// update the anchored view to reflect any changes made by previous microblocks.
2475-
const result = await sql<{ outdated: boolean }[]>`
2476-
WITH anchored_height AS (SELECT MAX(block_height) AS anchored FROM nft_custody),
2477-
unanchored_height AS (SELECT MAX(block_height) AS unanchored FROM nft_custody_unanchored)
2478-
SELECT unanchored > anchored AS outdated
2479-
FROM anchored_height CROSS JOIN unanchored_height
2480-
`;
2481-
if (result.length > 0 && result[0].outdated) {
2482-
await this.refreshMaterializedView(sql, 'nft_custody');
2462+
async refreshNftCustody(txs: DataStoreTxEventData[], unanchored: boolean = false) {
2463+
await this.sql.begin(async sql => {
2464+
const newNftEventCount = txs
2465+
.map(tx => tx.nftEvents.length)
2466+
.reduce((prev, cur) => prev + cur, 0);
2467+
if (newNftEventCount > 0) {
2468+
// Always refresh unanchored view since even if we're in a new anchored block we should update the
2469+
// unanchored state to the current one.
2470+
await this.refreshMaterializedView('nft_custody_unanchored', sql);
2471+
if (!unanchored) {
2472+
await this.refreshMaterializedView('nft_custody', sql);
2473+
}
2474+
} else if (!unanchored) {
2475+
// Even if we didn't receive new NFT events in a new anchor block, we should check if we need to
2476+
// update the anchored view to reflect any changes made by previous microblocks.
2477+
const result = await sql<{ outdated: boolean }[]>`
2478+
WITH anchored_height AS (SELECT MAX(block_height) AS anchored FROM nft_custody),
2479+
unanchored_height AS (SELECT MAX(block_height) AS unanchored FROM nft_custody_unanchored)
2480+
SELECT unanchored > anchored AS outdated
2481+
FROM anchored_height CROSS JOIN unanchored_height
2482+
`;
2483+
if (result.length > 0 && result[0].outdated) {
2484+
await this.refreshMaterializedView('nft_custody', sql);
2485+
}
24832486
}
2484-
}
2487+
});
24852488
}
24862489

24872490
/**
@@ -2492,10 +2495,10 @@ export class PgWriteStore extends PgStore {
24922495
return;
24932496
}
24942497
await this.sql.begin(async sql => {
2495-
await this.refreshMaterializedView(sql, 'nft_custody', false);
2496-
await this.refreshMaterializedView(sql, 'nft_custody_unanchored', false);
2497-
await this.refreshMaterializedView(sql, 'chain_tip', false);
2498-
await this.refreshMaterializedView(sql, 'mempool_digest', false);
2498+
await this.refreshMaterializedView('nft_custody', sql, false);
2499+
await this.refreshMaterializedView('nft_custody_unanchored', sql, false);
2500+
await this.refreshMaterializedView('chain_tip', sql, false);
2501+
await this.refreshMaterializedView('mempool_digest', sql, false);
24992502
});
25002503
}
25012504
}

0 commit comments

Comments
 (0)