diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index e679a61c98..781d0191dc 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -108,6 +108,7 @@ pub struct MinerTenureInfo<'a> { pub cause: Option, pub active_reward_set: boot::RewardSet, pub tenure_block_commit_opt: Option, + pub ephemeral: bool, } /// Structure returned from `NakamotoBlockBuilder::build_nakamoto_block` with @@ -216,14 +217,27 @@ impl NakamotoBlockBuilder { /// This function should be called before `tenure_begin`. /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles - /// expire. + /// expire. This is used for normal blocks. pub fn load_tenure_info<'a>( &self, chainstate: &'a mut StacksChainState, burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { - self.inner_load_tenure_info(chainstate, burn_dbconn, cause, false) + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, false, false) + } + + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. This is used for ephemeral blocks + pub fn load_ephemeral_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionHandleConn, + cause: Option, + ) -> Result, Error> { + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, false, true) } /// This function should be called before `tenure_begin`. @@ -236,8 +250,9 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, cause: Option, shadow_block: bool, + ephemeral: bool, ) -> Result, Error> { - debug!("Nakamoto miner tenure begin"; "shadow" => shadow_block, "tenure_change" => ?cause); + debug!("Nakamoto miner tenure begin"; "shadow" => shadow_block, "tenure_change" => ?cause, "ephemeral" => ephemeral); let Some(tenure_election_sn) = SortitionDB::get_block_snapshot_consensus(burn_dbconn, &self.header.consensus_hash)? @@ -372,6 +387,7 @@ impl NakamotoBlockBuilder { coinbase_height, active_reward_set, tenure_block_commit_opt, + ephemeral, }) } @@ -395,24 +411,45 @@ impl NakamotoBlockBuilder { clarity_tx, matured_miner_rewards_opt, .. - } = NakamotoChainState::setup_block( - &mut info.chainstate_tx, - info.clarity_instance, - burn_dbconn, - burn_dbconn.context.first_block_height, - &burn_dbconn.context.pox_constants, - info.parent_consensus_hash, - info.parent_header_hash, - info.parent_burn_block_height, - info.burn_tip, - info.burn_tip_height, - info.cause == Some(TenureChangeCause::BlockFound), - info.coinbase_height, - info.cause == Some(TenureChangeCause::Extended), - &self.header.pox_treatment, - block_commit, - &info.active_reward_set, - )?; + } = if info.ephemeral { + NakamotoChainState::setup_ephemeral_block( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + burn_dbconn.context.first_block_height, + &burn_dbconn.context.pox_constants, + info.parent_consensus_hash, + info.parent_header_hash, + info.parent_burn_block_height, + info.burn_tip, + info.burn_tip_height, + info.cause == Some(TenureChangeCause::BlockFound), + info.coinbase_height, + info.cause == Some(TenureChangeCause::Extended), + &self.header.pox_treatment, + block_commit, + &info.active_reward_set, + ) + } else { + NakamotoChainState::setup_block( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + burn_dbconn.context.first_block_height, + &burn_dbconn.context.pox_constants, + info.parent_consensus_hash, + info.parent_header_hash, + info.parent_burn_block_height, + info.burn_tip, + info.burn_tip_height, + info.cause == Some(TenureChangeCause::BlockFound), + info.coinbase_height, + info.cause == Some(TenureChangeCause::Extended), + &self.header.pox_treatment, + block_commit, + &info.active_reward_set, + ) + }?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; Ok(clarity_tx) } @@ -458,10 +495,11 @@ impl NakamotoBlockBuilder { }; test_debug!( - "\n\nMined Nakamoto block {}, {} transactions, state root is {}\n", + "\n\nMined Nakamoto block {}, {} transactions, state root is {}\nBlock: {:?}", block.header.block_hash(), block.txs.len(), - state_root_hash + state_root_hash, + &block ); debug!( diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e66b0bae59..99e005f289 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3782,7 +3782,8 @@ impl NakamotoChainState { } /// Begin block-processing for a normal block and return all of the pre-processed state within a - /// `SetupBlockResult`. Used by the Nakamoto miner, and called by Self::setup_normal_block() + /// `SetupBlockResult`. Used by the Nakamoto miner, and called by + /// Self::setup_normal_block_processing() pub fn setup_block<'a, 'b>( chainstate_tx: &'b mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, @@ -3817,6 +3818,47 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + false, + ) + } + + /// Begin block-processing for a replay of a normal block and return all of the pre-processed state within a + /// `SetupBlockResult`. Used by the block replay logic, and called by Self::setup_normal_block_processing() + pub fn setup_ephemeral_block<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + burn_header_hash: BurnchainHeaderHash, + burn_header_height: u32, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, + ) -> Result, ChainstateError> { + // this block's bitvec header must match the miner's block commit punishments + Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; + Self::inner_setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash, + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + true, ) } @@ -3942,6 +3984,7 @@ impl NakamotoChainState { /// * coinbase_height: the number of tenures that this block confirms (including epoch2 blocks) /// (this is equivalent to the number of coinbases) /// * tenure_extend: whether or not to reset the tenure's ongoing execution cost + /// * ephemeral: whether or not to begin an ephemeral block (i.e. which won't hit disk) /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, @@ -3961,6 +4004,7 @@ impl NakamotoChainState { new_tenure: bool, coinbase_height: u64, tenure_extend: bool, + ephemeral: bool, ) -> Result, ChainstateError> { let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn @@ -4010,15 +4054,27 @@ impl NakamotoChainState { parent_cost_total }; - let mut clarity_tx = StacksChainState::chainstate_block_begin( - chainstate_tx, - clarity_instance, - sortition_dbconn.as_burn_state_db(), - &parent_consensus_hash, - &parent_header_hash, - &MINER_BLOCK_CONSENSUS_HASH, - &MINER_BLOCK_HEADER_HASH, - ); + let mut clarity_tx = if ephemeral { + StacksChainState::chainstate_ephemeral_block_begin( + chainstate_tx, + clarity_instance, + sortition_dbconn.as_burn_state_db(), + &parent_consensus_hash, + &parent_header_hash, + &MINER_BLOCK_CONSENSUS_HASH, + &MINER_BLOCK_HEADER_HASH, + ) + } else { + StacksChainState::chainstate_block_begin( + chainstate_tx, + clarity_instance, + sortition_dbconn.as_burn_state_db(), + &parent_consensus_hash, + &parent_header_hash, + &MINER_BLOCK_CONSENSUS_HASH, + &MINER_BLOCK_HEADER_HASH, + ) + }; // now that we have access to the ClarityVM, we can account for reward deductions from // PoisonMicroblocks if we have new rewards scheduled diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 59bcd2854a..90dd9264c9 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -414,6 +414,7 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + false, ) } } @@ -431,7 +432,7 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { - self.inner_load_tenure_info(chainstate, burn_dbconn, cause, true) + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, true, false) } /// Begin/resume mining a shadow tenure's transactions. diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 900a560eac..05acb5df2e 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -31,7 +31,9 @@ use crate::chainstate::stacks::boot::{ use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::chainstate::stacks::{C32_ADDRESS_VERSION_TESTNET_SINGLESIG, *}; use crate::clarity_vm::clarity::{ClarityBlockConnection, Error as ClarityError}; -use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; +use crate::clarity_vm::database::marf::{ + ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV, WritableMarfStore, +}; use crate::core::{ StacksEpoch, StacksEpochId, BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, @@ -141,10 +143,10 @@ impl ClarityTestSim { F: FnOnce(&mut ClarityBlockConnection) -> R, { let r = { - let mut store = self.marf.begin( + let mut store: Box = Box::new(self.marf.begin( &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), - ); + )); self.block_height += 1; if new_tenure { @@ -193,10 +195,10 @@ impl ClarityTestSim { where F: FnOnce(&mut OwnedEnvironment) -> R, { - let mut store = self.marf.begin( + let mut store: Box = Box::new(self.marf.begin( &StacksBlockId(test_sim_height_to_hash(self.block_height, self.fork)), &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), - ); + )); self.block_height += 1; if new_tenure { @@ -240,8 +242,8 @@ impl ClarityTestSim { self.execute_next_block_with_tenure(true, f) } - fn check_and_bump_epoch( - store: &mut WritableMarfStore, + fn check_and_bump_epoch<'a>( + store: &mut Box, headers_db: &TestSimHeadersDB, burn_db: &dyn BurnStateDB, ) -> StacksEpochId { @@ -268,10 +270,10 @@ impl ClarityTestSim { where F: FnOnce(&mut OwnedEnvironment) -> R, { - let mut store = self.marf.begin( + let mut store: Box = Box::new(self.marf.begin( &StacksBlockId(test_sim_height_to_hash(parent_height, self.fork)), &StacksBlockId(test_sim_height_to_hash(parent_height + 1, self.fork + 1)), - ); + )); let r = { let headers_db = TestSimHeadersDB { diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index a21e022c4f..c541099104 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2037,6 +2037,30 @@ impl StacksChainState { ) } + /// Begin processing an epoch's transactions within the context of a chainstate transaction, + /// but do so in a way that will not cause them to be persisted. Used for replaying blocks. + pub fn chainstate_ephemeral_block_begin<'a, 'b>( + chainstate_tx: &'b ChainstateTx<'b>, + clarity_instance: &'a mut ClarityInstance, + burn_dbconn: &'b dyn BurnStateDB, + parent_consensus_hash: &ConsensusHash, + parent_block: &BlockHeaderHash, + new_consensus_hash: &ConsensusHash, + new_block: &BlockHeaderHash, + ) -> ClarityTx<'a, 'b> { + let conf = chainstate_tx.config.clone(); + StacksChainState::inner_ephemeral_clarity_tx_begin( + conf, + chainstate_tx, + clarity_instance, + burn_dbconn, + parent_consensus_hash, + parent_block, + new_consensus_hash, + new_block, + ) + } + /// Begin a transaction against the Clarity VM, _outside of_ the context of a chainstate /// transaction. Used by the miner for producing blocks. pub fn block_begin<'a>( @@ -2060,6 +2084,30 @@ impl StacksChainState { ) } + /// Begin an ephemeral transaction against the Clarity VM, _outside of_ the context of a chainstate + /// transaction. The block will not be stored to disk, even if it is committed. + /// Used by code paths which need to replay blocks. + pub fn ephemeral_block_begin<'a>( + &'a mut self, + burn_dbconn: &'a dyn BurnStateDB, + parent_consensus_hash: &ConsensusHash, + parent_block: &BlockHeaderHash, + new_consensus_hash: &ConsensusHash, + new_block: &BlockHeaderHash, + ) -> ClarityTx<'a, 'a> { + let conf = self.config(); + StacksChainState::inner_ephemeral_clarity_tx_begin( + conf, + &self.state_index, + &mut self.clarity_state, + burn_dbconn, + parent_consensus_hash, + parent_block, + new_consensus_hash, + new_block, + ) + } + /// Begin a transaction against the Clarity VM for initiating the genesis block /// the genesis block is special cased because it must be evaluated _before_ the /// cost contract is loaded in the boot code. @@ -2328,6 +2376,59 @@ impl StacksChainState { } } + /// Create an ephemeral Clarity VM database transaction. + /// The child block, identified by `new_consensus_hash` and `new_block`, will be treated as + /// ephemeral. + fn inner_ephemeral_clarity_tx_begin<'a, 'b>( + conf: DBConfig, + headers_db: &'b dyn HeadersDB, + clarity_instance: &'a mut ClarityInstance, + burn_dbconn: &'b dyn BurnStateDB, + parent_consensus_hash: &ConsensusHash, + parent_block: &BlockHeaderHash, + new_consensus_hash: &ConsensusHash, + new_block: &BlockHeaderHash, + ) -> ClarityTx<'a, 'b> { + // mix consensus hash and stacks block header hash together, since the stacks block hash + // it not guaranteed to be globally unique (but the pair is) + let parent_index_block = + StacksChainState::get_parent_index_block(parent_consensus_hash, parent_block); + + let new_index_block = + StacksBlockHeader::make_index_block_hash(new_consensus_hash, new_block); + + test_debug!( + "Begin processing ephemeral Stacks block off of {}/{}", + parent_consensus_hash, + parent_block + ); + test_debug!( + "Child ephemeral MARF index root: {} = {} + {}", + new_index_block, + new_consensus_hash, + new_block + ); + test_debug!( + "Parent ephemeral MARF index root: {} = {} + {}", + parent_index_block, + parent_consensus_hash, + parent_block + ); + + let inner_clarity_tx = clarity_instance.begin_ephemeral( + &parent_index_block, + &new_index_block, + headers_db, + burn_dbconn, + ); + + test_debug!("Got ephemeral clarity TX!"); + ClarityTx { + block: inner_clarity_tx, + config: conf, + } + } + /// Create a Clarity VM transaction connection for testing in 2.1 #[cfg(test)] pub fn test_genesis_block_begin_2_1<'a>( diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 63e1c66890..b588025e37 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1652,4 +1652,9 @@ impl MARF { pub fn into_sqlite_conn(self) -> Connection { self.storage.into_sqlite_conn() } + + /// Get the underlying storage DB path + pub fn get_db_path(&self) -> &str { + &self.storage.db_path + } } diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 6b714771b2..af07de5aee 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -57,7 +57,9 @@ use crate::clarity::vm::{ analysis, ast, eval_all, ClarityVersion, ContractContext, ContractName, SymbolicExpression, Value, }; -use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; +use crate::clarity_vm::database::marf::{ + ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV, PersistentWritableMarfStore, +}; use crate::clarity_vm::database::MemoryBackingStore; use crate::core::{StacksEpochId, BLOCK_LIMIT_MAINNET_205, HELIUM_BLOCK_LIMIT_20}; use crate::util_lib::boot::{boot_code_addr, boot_code_id}; @@ -171,7 +173,7 @@ trait ClarityStorage { fn get_analysis_db(&mut self) -> AnalysisDatabase<'_>; } -impl ClarityStorage for WritableMarfStore<'_> { +impl ClarityStorage for PersistentWritableMarfStore<'_> { fn get_clarity_db<'a>( &'a mut self, headers_db: &'a dyn HeadersDB, @@ -345,7 +347,10 @@ fn in_block( f: F, ) -> (CLIHeadersDB, MarfedKV, R) where - F: FnOnce(CLIHeadersDB, WritableMarfStore) -> (CLIHeadersDB, WritableMarfStore, R), + F: FnOnce( + CLIHeadersDB, + PersistentWritableMarfStore, + ) -> (CLIHeadersDB, PersistentWritableMarfStore, R), { // need to load the last block let (from, to) = headers_db.advance_cli_chain_tip(); @@ -353,7 +358,7 @@ where let marf_tx = marf_kv.begin(&from, &to); let (headers_return, marf_return, result) = f(headers_db, marf_tx); marf_return - .commit_to(&to) + .commit_to_processed_block(&to) .expect("FATAL: failed to commit block"); (headers_return, result) }; @@ -364,7 +369,7 @@ where // chain tip itself. fn at_chaintip(db_path: &str, mut marf_kv: MarfedKV, f: F) -> R where - F: FnOnce(WritableMarfStore) -> (WritableMarfStore, R), + F: FnOnce(PersistentWritableMarfStore) -> (PersistentWritableMarfStore, R), { // store CLI data alongside the MARF database state let cli_db_path = get_cli_db_path(db_path); @@ -374,13 +379,13 @@ where let marf_tx = marf_kv.begin(&from, &to); let (marf_return, result) = f(marf_tx); - marf_return.rollback_block(); + marf_return.drop_current_trie(); result } fn at_block(blockhash: &str, mut marf_kv: MarfedKV, f: F) -> R where - F: FnOnce(WritableMarfStore) -> (WritableMarfStore, R), + F: FnOnce(PersistentWritableMarfStore) -> (PersistentWritableMarfStore, R), { // store CLI data alongside the MARF database state let from = StacksBlockId::from_hex(blockhash) @@ -389,7 +394,7 @@ where let marf_tx = marf_kv.begin(&from, &to); let (marf_return, result) = f(marf_tx); - marf_return.rollback_block(); + marf_return.drop_current_trie(); result } @@ -405,7 +410,7 @@ fn default_chain_id(mainnet: bool) -> u32 { fn with_env_costs( mainnet: bool, header_db: &CLIHeadersDB, - marf: &mut WritableMarfStore, + marf: &mut PersistentWritableMarfStore, coverage: Option<&mut CoverageReporter>, f: F, ) -> (R, ExecutionCost) diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index f4aa9a051e..6fa4008aab 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -53,7 +53,9 @@ use crate::chainstate::stacks::{ Error as ChainstateError, StacksMicroblockHeader, StacksTransaction, TransactionPayload, TransactionSmartContract, TransactionVersion, }; -use crate::clarity_vm::database::marf::{MarfedKV, ReadOnlyMarfStore, WritableMarfStore}; +use crate::clarity_vm::database::marf::{ + ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV, ReadOnlyMarfStore, WritableMarfStore, +}; use crate::core::{StacksEpoch, StacksEpochId, FIRST_STACKS_BLOCK_ID, GENESIS_EPOCH}; use crate::util_lib::boot::{boot_code_acc, boot_code_addr, boot_code_id, boot_code_tx_auth}; use crate::util_lib::db::Error as DatabaseError; @@ -101,7 +103,7 @@ pub struct ClarityInstance { /// issuring event dispatches, before the Clarity database commits. /// pub struct PreCommitClarityBlock<'a> { - datastore: WritableMarfStore<'a>, + datastore: Box, commit_to: StacksBlockId, } @@ -109,7 +111,7 @@ pub struct PreCommitClarityBlock<'a> { /// A high-level interface for Clarity VM interactions within a single block. /// pub struct ClarityBlockConnection<'a, 'b> { - datastore: WritableMarfStore<'a>, + datastore: Box, header_db: &'b dyn HeadersDB, burn_state_db: &'b dyn BurnStateDB, cost_track: Option, @@ -196,7 +198,7 @@ macro_rules! using { impl ClarityBlockConnection<'_, '_> { #[cfg(test)] pub fn new_test_conn<'a, 'b>( - datastore: WritableMarfStore<'a>, + datastore: Box, header_db: &'b dyn HeadersDB, burn_state_db: &'b dyn BurnStateDB, epoch: StacksEpochId, @@ -328,7 +330,7 @@ impl ClarityInstance { }; ClarityBlockConnection { - datastore, + datastore: Box::new(datastore), header_db, burn_state_db, cost_track, @@ -352,7 +354,7 @@ impl ClarityInstance { let cost_track = Some(LimitedCostTracker::new_free()); ClarityBlockConnection { - datastore, + datastore: Box::new(datastore), header_db, burn_state_db, cost_track, @@ -378,7 +380,7 @@ impl ClarityInstance { let cost_track = Some(LimitedCostTracker::new_free()); let mut conn = ClarityBlockConnection { - datastore: writable, + datastore: Box::new(writable), header_db, burn_state_db, cost_track, @@ -477,7 +479,7 @@ impl ClarityInstance { let cost_track = Some(LimitedCostTracker::new_free()); let mut conn = ClarityBlockConnection { - datastore: writable, + datastore: Box::new(writable), header_db, burn_state_db, cost_track, @@ -559,7 +561,7 @@ impl ClarityInstance { pub fn drop_unconfirmed_state(&mut self, block: &StacksBlockId) -> Result<(), Error> { let datastore = self.datastore.begin_unconfirmed(block); - datastore.rollback_unconfirmed()?; + datastore.drop_unconfirmed()?; Ok(()) } @@ -588,7 +590,47 @@ impl ClarityInstance { }; ClarityBlockConnection { - datastore, + datastore: Box::new(datastore), + header_db, + burn_state_db, + cost_track, + mainnet: self.mainnet, + chain_id: self.chain_id, + epoch: epoch.epoch_id, + } + } + + /// Begin an ephemeral block, which will not be persisted and which may even already exist in + /// the chainstate. + pub fn begin_ephemeral<'a, 'b>( + &'a mut self, + base_tip: &StacksBlockId, + ephemeral_next: &StacksBlockId, + header_db: &'b dyn HeadersDB, + burn_state_db: &'b dyn BurnStateDB, + ) -> ClarityBlockConnection<'a, 'b> { + let mut datastore = self + .datastore + .begin_ephemeral(base_tip, ephemeral_next) + .expect("FATAL: failed to begin ephemeral block connection"); + + let epoch = Self::get_epoch_of(base_tip, header_db, burn_state_db); + let cost_track = { + let mut clarity_db = datastore.as_clarity_db(&NULL_HEADER_DB, &NULL_BURN_STATE_DB); + Some( + LimitedCostTracker::new( + self.mainnet, + self.chain_id, + epoch.block_limit.clone(), + &mut clarity_db, + epoch.epoch_id, + ) + .expect("FAIL: problem instantiating cost tracking"), + ) + }; + + ClarityBlockConnection { + datastore: Box::new(datastore), header_db, burn_state_db, cost_track, @@ -739,12 +781,12 @@ impl PreCommitClarityBlock<'_> { pub fn commit(self) { debug!("Committing Clarity block connection"; "index_block" => %self.commit_to); self.datastore - .commit_to(&self.commit_to) + .commit_to_processed_block(&self.commit_to) .expect("FATAL: failed to commit block"); } } -impl<'a> ClarityBlockConnection<'a, '_> { +impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// Rolls back all changes in the current block by /// (1) dropping all writes from the current MARF tip, /// (2) rolling back side-storage @@ -752,7 +794,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { // this is a "lower-level" rollback than the roll backs performed in // ClarityDatabase or AnalysisDatabase -- this is done at the backing store level. debug!("Rollback Clarity datastore"); - self.datastore.rollback_block(); + self.datastore.drop_current_trie(); } /// Rolls back all unconfirmed state in the current block by @@ -763,7 +805,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { // ClarityDatabase or AnalysisDatabase -- this is done at the backing store level. debug!("Rollback unconfirmed Clarity datastore"); self.datastore - .rollback_unconfirmed() + .drop_unconfirmed() .expect("FATAL: failed to rollback block"); } @@ -796,7 +838,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { pub fn commit_to_block(self, final_bhh: &StacksBlockId) -> LimitedCostTracker { debug!("Commit Clarity datastore to {}", final_bhh); self.datastore - .commit_to(final_bhh) + .commit_to_processed_block(final_bhh) .expect("FATAL: failed to commit block"); self.cost_track.unwrap() @@ -810,7 +852,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { /// a miner re-executes a constructed block. pub fn commit_mined_block(self, bhh: &StacksBlockId) -> Result { debug!("Commit mined Clarity datastore to {}", bhh); - self.datastore.commit_mined_block(bhh)?; + self.datastore.commit_to_mined_block(bhh)?; Ok(self.cost_track.unwrap()) } @@ -1758,10 +1800,10 @@ impl<'a> ClarityBlockConnection<'a, '_> { } pub fn seal(&mut self) -> TrieHash { - self.datastore.seal() + self.datastore.seal_trie() } - pub fn destruct(self) -> WritableMarfStore<'a> { + pub fn destruct(self) -> Box { self.datastore } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 80a8534af8..f00cb3f5ad 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -1,3 +1,20 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::mem; use std::path::PathBuf; use std::str::FromStr; @@ -15,11 +32,14 @@ use clarity::vm::errors::{ IncomparableError, InterpreterError, InterpreterResult, RuntimeErrorType, }; use clarity::vm::types::QualifiedContractIdentifier; +use rusqlite; use rusqlite::Connection; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; +use stacks_common::types::sqlite::NO_PARAMS; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MarfTransaction, MARF}; +use crate::chainstate::stacks::index::storage::{TrieFileStorage, TrieHashCalculationMode}; use crate::chainstate::stacks::index::{ClarityMarfTrieId, Error, MARFValue}; use crate::clarity_vm::special::handle_contract_call_special_cases; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; @@ -35,6 +55,7 @@ use crate::util_lib::db::{Error as DatabaseError, IndexDBConn}; pub struct MarfedKV { chain_tip: StacksBlockId, marf: MARF, + ephemeral_marf: Option>, } impl MarfedKV { @@ -92,7 +113,11 @@ impl MarfedKV { None => StacksBlockId::sentinel(), }; - Ok(MarfedKV { marf, chain_tip }) + Ok(MarfedKV { + marf, + chain_tip, + ephemeral_marf: None, + }) } pub fn open_unconfirmed( @@ -106,7 +131,11 @@ impl MarfedKV { None => StacksBlockId::sentinel(), }; - Ok(MarfedKV { marf, chain_tip }) + Ok(MarfedKV { + marf, + chain_tip, + ephemeral_marf: None, + }) } // used by benchmarks @@ -135,7 +164,11 @@ impl MarfedKV { let chain_tip = StacksBlockId::sentinel(); - MarfedKV { marf, chain_tip } + MarfedKV { + marf, + chain_tip, + ephemeral_marf: None, + } } pub fn begin_read_only<'a>( @@ -194,7 +227,7 @@ impl MarfedKV { &'a mut self, current: &StacksBlockId, next: &StacksBlockId, - ) -> WritableMarfStore<'a> { + ) -> PersistentWritableMarfStore<'a> { let mut tx = self.marf.begin_tx().unwrap_or_else(|_| { panic!( "ERROR: Failed to begin new MARF block {} - {})", @@ -213,13 +246,16 @@ impl MarfedKV { .expect("ERROR: Failed to get open MARF") .clone(); - WritableMarfStore { + PersistentWritableMarfStore { chain_tip, marf: tx, } } - pub fn begin_unconfirmed<'a>(&'a mut self, current: &StacksBlockId) -> WritableMarfStore<'a> { + pub fn begin_unconfirmed<'a>( + &'a mut self, + current: &StacksBlockId, + ) -> PersistentWritableMarfStore<'a> { let mut tx = self.marf.begin_tx().unwrap_or_else(|_| { panic!( "ERROR: Failed to begin new unconfirmed MARF block for {})", @@ -238,12 +274,90 @@ impl MarfedKV { .expect("ERROR: Failed to get open MARF") .clone(); - WritableMarfStore { + PersistentWritableMarfStore { chain_tip, marf: tx, } } + /// Begin an ephemeral MARF block. + /// The data will never hit disk. + pub fn begin_ephemeral<'a>( + &'a mut self, + base_tip: &StacksBlockId, + ephemeral_next: &StacksBlockId, + ) -> InterpreterResult> { + // sanity check -- `base_tip` must be mapped + self.marf.open_block(&base_tip).map_err(|e| { + debug!( + "Failed to open read only connection at {}: {:?}", + &base_tip, &e + ); + InterpreterError::MarfFailure(Error::NotFoundError.to_string()) + })?; + + // set up ephemeral MARF + let ephemeral_marf_storage = TrieFileStorage::open( + ":memory:", + MARFOpenOpts::new(TrieHashCalculationMode::Deferred, "noop", false), + ) + .map_err(|e| { + InterpreterError::Expect(format!("Failed to instantiate ephemeral MARF: {:?}", &e)) + })?; + + let mut ephemeral_marf = MARF::from_storage(ephemeral_marf_storage); + let tx = ephemeral_marf + .storage_tx() + .map_err(|err| InterpreterError::DBError(err.to_string()))?; + + SqliteConnection::initialize_conn(&tx)?; + tx.commit() + .map_err(|err| InterpreterError::SqliteError(IncomparableError { err }))?; + + self.ephemeral_marf = Some(ephemeral_marf); + + let read_only_marf = ReadOnlyMarfStore { + chain_tip: base_tip.clone(), + marf: &mut self.marf, + }; + + let tx = if let Some(ephemeral_marf) = self.ephemeral_marf.as_mut() { + // attach the disk-backed MARF to the ephemeral MARF + EphemeralMarfStore::attach_read_only_marf(&ephemeral_marf, &read_only_marf).map_err( + |e| { + InterpreterError::Expect(format!( + "Failed to attach read-only MARF to ephemeral MARF: {:?}", + &e + )) + }, + )?; + + let mut tx = ephemeral_marf.begin_tx().map_err(|e| { + InterpreterError::Expect(format!("Failed to open ephemeral MARF tx: {:?}", &e)) + })?; + tx.begin(&StacksBlockId::sentinel(), ephemeral_next) + .map_err(|e| { + InterpreterError::Expect(format!( + "Failed to begin first ephemeral MARF block: {:?}", + &e + )) + })?; + tx + } else { + // unreachable since self.ephemeral_marf is already assigned + unreachable!(); + }; + + let ephemeral_marf_store = EphemeralMarfStore::new(read_only_marf, tx).map_err(|e| { + InterpreterError::Expect(format!( + "Failed to instantiate ephemeral MARF store: {:?}", + &e + )) + })?; + + Ok(ephemeral_marf_store) + } + pub fn get_chain_tip(&self) -> &StacksBlockId { &self.chain_tip } @@ -262,29 +376,380 @@ impl MarfedKV { } } -pub struct WritableMarfStore<'a> { +/// A wrapper around a MARF transaction which allows read/write access to the MARF's keys off of a +/// given chain tip. +pub struct PersistentWritableMarfStore<'a> { + /// The chain tip from which reads and writes will be indexed. chain_tip: StacksBlockId, + /// The transaction to the MARF instance marf: MarfTransaction<'a, StacksBlockId>, } +/// A wrapper around a MARF handle which allows only read access to the MARF's keys off of a given +/// chain tip. pub struct ReadOnlyMarfStore<'a> { + /// The chain tip from which reads will be indexed. chain_tip: StacksBlockId, + /// Handle to the MARF being read marf: &'a mut MARF, } -impl ReadOnlyMarfStore<'_> { - pub fn as_clarity_db<'b>( +/// Ephemeral MARF store. +/// +/// The implementation "chains" a read-only MARF and a RAM-backed MARF together, for the purposes +/// of giving the Clarity VM a backing store. Writes will be stored to the ephemeral MARF, and +/// reads will be carried out against either the ephemeral MARF or the read-only MARF, depending on +/// whether or not the opened chain tip refers to a block in the former or the latter. +pub struct EphemeralMarfStore<'a> { + /// The opened chain tip. It may refer to either a block in the ephemeral MARF or the + /// read-only MARF. + open_tip: EphemeralTip, + /// The tip upon which the ephemeral MARF is built + base_tip: StacksBlockId, + /// The height of the base tip in the disk-backed MARF + base_tip_height: u32, + /// Transaction on a RAM-backed MARF which will be discarded once this struct is dropped + ephemeral_marf: MarfTransaction<'a, StacksBlockId>, + /// Handle to on-disk MARF + read_only_marf: ReadOnlyMarfStore<'a>, +} + +/// A MARF store transaction for a chainstate block's trie. +/// This transaction instantiates a trie which builds atop an already-written trie in the +/// chainstate. Once committed, it will persist -- it may be built upon, and a subsequent attempt +/// to build the same trie will fail. +/// +/// The Stacks node commits tries for one of three purposes: +/// * It processed a block, and needs to persist its trie in the chainstate proper. +/// * It mined a block, and needs to persist its trie outside of the chainstate proper. The miner +/// may build on it later. +/// * It processed an unconfirmed microblock (Stacks 2.x only), and needs to persist the +/// unconfirmed chainstate outside of the chainstate proper so that the microblock miner can +/// continue to build on it and the network can service RPC requests on its state. +/// +/// These needs are each captured in distinct methods for committing this transaction. +pub trait ClarityMarfStoreTransaction { + /// Commit all inserted metadata and associate it with the block trie identified by `target`. + /// It can later be deleted via `drop_metadata_for()` if given the same taret. + /// Returns Ok(()) on success + /// Returns Err(..) on error + fn commit_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()>; + + /// Drop metadata for a particular block trie that was stored previously via `commit_metadata_to()`. + /// This function is idempotent. + /// + /// Returns Ok(()) if the metadata for the trie identified by `target` was dropped. + /// It will be possible to insert it again afterwards. + /// Returns Err(..) if the metadata was not successfully dropped. + fn drop_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()>; + + /// Compute the ID of the trie being built. + /// In Stacks, this will only be called once all key/value pairs are inserted (and will only be + /// called at most once in this transaction's lifetime). + fn seal_trie(&mut self) -> TrieHash; + + /// Drop the block trie that this transaction was creating. + /// Destroys the transaction. + fn drop_current_trie(self); + + /// Drop the unconfirmed state trie that this transaction was creating. + /// Destroys the transaction. + /// + /// Returns Ok(()) on successful deletion of the data + /// Returns Err(..) if the deletion failed (this usually isn't recoverable, but recovery is up + /// to the caller) + fn drop_unconfirmed(self) -> InterpreterResult<()>; + + /// Store the processed block's trie that this transaction was creating. + /// The trie's ID must be `target`, so that subsequent tries can be built on it (and so that + /// subsequent queries can read from it). `target` may not be known until it is time to write + /// the trie out, which is why it is provided here. + /// + /// Returns Ok(()) if the block trie was successfully persisted. + /// Returns Err(..) if there was an error in trying to persist this block trie. + fn commit_to_processed_block(self, target: &StacksBlockId) -> InterpreterResult<()>; + + /// Store a mined block's trie that this transaction was creating. + /// This function is distinct from `commit_to_processed_block()` in that the stored block will + /// not be added to the chainstate. However, it must be persisted so that the node can later + /// build on it. + /// + /// Returns Ok(()) if the block trie was successfully persisted. + /// Returns Err(..) if there was an error trying to persist this MARF trie. + fn commit_to_mined_block(self, target: &StacksBlockId) -> InterpreterResult<()>; + + /// Persist the unconfirmed state trie so that other parts of the Stacks node can read from it + /// (such as to handle pending transactions or process RPC requests on it). + fn commit_unconfirmed(self); + + /// Commit to the current chain tip. + /// Used only for testing. + #[cfg(test)] + fn test_commit(self); +} + +/// Unified API common to all MARF stores +pub trait ClarityMarfStore: ClarityBackingStore { + /// Instantiate a `ClarityDatabase` out of this MARF store. + /// Takes a `HeadersDB` and `BurnStateDB` implementation which are both used by + /// `ClarityDatabase` to access Stacks's chainstate and sortition chainstate, respectively. + fn as_clarity_db<'b>( &'b mut self, headers_db: &'b dyn HeadersDB, burn_state_db: &'b dyn BurnStateDB, - ) -> ClarityDatabase<'b> { + ) -> ClarityDatabase<'b> + where + Self: Sized, + { ClarityDatabase::new(self, headers_db, burn_state_db) } - pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { + /// Instantiate an `AnalysisDatabase` out of this MARF store. + fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> + where + Self: Sized, + { AnalysisDatabase::new(self) } +} + +/// A MARF store which can be written to is both a ClarityMarfStore and a +/// ClarityMarfStoreTransaction (and thus also a ClarityBackingStore). +pub trait WritableMarfStore: + ClarityMarfStore + ClarityMarfStoreTransaction + BoxedClarityMarfStoreTransaction +{ +} + +impl ClarityMarfStore for ReadOnlyMarfStore<'_> {} +impl ClarityMarfStore for PersistentWritableMarfStore<'_> {} +impl ClarityMarfStore for EphemeralMarfStore<'_> {} + +impl ClarityMarfStoreTransaction for PersistentWritableMarfStore<'_> { + /// Commit metadata for a given `target` trie. In this MARF store, this just renames all + /// metadata rows with `self.chain_tip` as their block identifier to have `target` instead. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + SqliteConnection::commit_metadata_to(self.marf.sqlite_tx(), &self.chain_tip, target) + } + + /// Drop metadata for the given `target` trie. This just drops the metadata rows with `target` + /// as their block identifier. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn drop_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + SqliteConnection::drop_metadata(self.marf.sqlite_tx(), target) + } + + /// Seal the trie -- compute the root hash. + /// NOTE: This is a one-time operation for this implementation -- a subsequent call will panic. + fn seal_trie(&mut self) -> TrieHash { + self.marf + .seal() + .expect("FATAL: failed to .seal() MARF transaction") + } + + /// Drop the trie being built. This just drops the data from RAM and aborts the underlying + /// sqlite transaction. This instance is consumed. + fn drop_current_trie(self) { + self.marf.drop_current(); + } + + /// Drop unconfirmed state being built. This will not only drop unconfirmed state in RAM, but + /// also any unconfirmed trie data from the sqlite DB as well as its associated metadata. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn drop_unconfirmed(mut self) -> InterpreterResult<()> { + let chain_tip = self.chain_tip.clone(); + debug!("Drop unconfirmed MARF trie {}", &chain_tip); + self.drop_metadata_for_trie(&chain_tip)?; + self.marf.drop_unconfirmed(); + Ok(()) + } + /// Commit the outstanding trie and metadata to the set of processed-block tries, and call it + /// `target` in the DB. Future tries can be built atop it. This commits the transaction and + /// drops this MARF store. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_to_processed_block(mut self, target: &StacksBlockId) -> InterpreterResult<()> { + debug!("commit_to({})", target); + self.commit_metadata_for_trie(target)?; + let _ = self.marf.commit_to(target).map_err(|e| { + error!("Failed to commit to MARF block {target}: {e:?}"); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + Ok(()) + } + + /// Commit the outstanding trie to the `mined_blocks` table in the underlying MARF. + /// The metadata will be dropped, since this won't be added to the chainstate. This commits + /// the transaction and drops this MARF store. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_to_mined_block(mut self, target: &StacksBlockId) -> InterpreterResult<()> { + debug!("commit_mined_block: ({}->{})", &self.chain_tip, target); + // rollback the side_store + // the side_store shouldn't commit data for blocks that won't be + // included in the processed chainstate (like a block constructed during mining) + // _if_ for some reason, we do want to be able to access that mined chain state in the future, + // we should probably commit the data to a different table which does not have uniqueness constraints. + let chain_tip = self.chain_tip.clone(); + self.drop_metadata_for_trie(&chain_tip)?; + let _ = self.marf.commit_mined(target).map_err(|e| { + error!("Failed to commit to mined MARF block {target}: {e:?}",); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + Ok(()) + } + + /// Commit the outstanding trie to unconfirmed state, so subsequent read I/O can be performed + /// on it (such as servicing RPC requests). This commits this transaction and drops this MARF + /// store + fn commit_unconfirmed(self) { + debug!("commit_unconfirmed()"); + // NOTE: Can omit commit_metadata_to, since the block header hash won't change + self.marf + .commit() + .expect("ERROR: Failed to commit MARF block"); + } + + #[cfg(test)] + fn test_commit(self) { + self.do_test_commit() + } +} + +impl ClarityMarfStoreTransaction for EphemeralMarfStore<'_> { + /// Commit metadata for a given `target` trie. In this MARF store, this just renames all + /// metadata rows with `self.chain_tip` as their block identifier to have `target` instead, + /// but only within the ephemeral MARF. None of the writes will hit disk, and they will + /// disappear when this instance is dropped + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + if let Some(tip) = self.ephemeral_marf.get_open_chain_tip() { + self.teardown_views(); + let res = + SqliteConnection::commit_metadata_to(self.ephemeral_marf.sqlite_tx(), tip, target); + self.setup_views(); + res + } else { + Ok(()) + } + } + + /// Drop metadata for the given `target` trie. This just drops the metadata rows with `target` + /// as their block identifier. None of the data is disk-backed, so this should always succeed + /// unless the RAM-only sqlite DB is experiencing problems (which is probably not recoverable). + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn drop_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + self.teardown_views(); + let res = SqliteConnection::drop_metadata(self.ephemeral_marf.sqlite_tx(), target); + self.setup_views(); + res + } + + /// Seal the trie -- compute the root hash. + /// NOTE: This is a one-time operation for this implementation -- a subsequent call will panic. + fn seal_trie(&mut self) -> TrieHash { + self.ephemeral_marf + .seal() + .expect("FATAL: failed to .seal() MARF") + } + + /// Drop the trie being built. This just drops the data from RAM and aborts the underlying + /// sqlite transaction. This MARF store instance is consumed. + fn drop_current_trie(self) { + self.ephemeral_marf.drop_current() + } + + /// Drop unconfirmed state being built. All data lives in RAM in the ephemeral MARF + /// transaction, so no disk I/O will be performed. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn drop_unconfirmed(mut self) -> InterpreterResult<()> { + if let Some(tip) = self.ephemeral_marf.get_open_chain_tip().cloned() { + debug!("Drop unconfirmed MARF trie {}", tip); + self.drop_metadata_for_trie(&tip)?; + self.ephemeral_marf.drop_unconfirmed(); + } + Ok(()) + } + + /// "Commit" the ephemeral MARF as if it were to be written to chainstate, + /// and consume this instance. This is effectively a no-op since + /// nothing will hit disk, and all written data will be dropped. However, we go through the + /// motions just in case any errors would be reported. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_to_processed_block(mut self, target: &StacksBlockId) -> InterpreterResult<()> { + if self.ephemeral_marf.get_open_chain_tip().is_some() { + self.commit_metadata_for_trie(target)?; + let _ = self.ephemeral_marf.commit_to(target).map_err(|e| { + error!("Failed to commit to ephemeral MARF block {target}: {e:?}",); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + } + Ok(()) + } + + /// "Commit" the ephemeral MARF as if it were to be written to the `mined_blocks` table, + /// and consume this instance. This is effectively a no-op since + /// nothing will hit disk, and all written data will be dropped. However, we go through the + /// motions just in case any errors would be reported. + /// + /// Returns Ok(()) on success + /// Returns Err(InterpreterError(..)) on sqlite failure + fn commit_to_mined_block(mut self, target: &StacksBlockId) -> InterpreterResult<()> { + if let Some(tip) = self.ephemeral_marf.get_open_chain_tip().cloned() { + // rollback the side_store + // the side_store shouldn't commit data for blocks that won't be + // included in the processed chainstate (like a block constructed during mining) + // _if_ for some reason, we do want to be able to access that mined chain state in the future, + // we should probably commit the data to a different table which does not have uniqueness constraints. + self.drop_metadata_for_trie(&tip)?; + let _ = self.ephemeral_marf.commit_mined(target).map_err(|e| { + error!("Failed to commit to mined MARF block {target}: {e:?}",); + InterpreterError::Expect("Failed to commit to MARF block".into()) + })?; + } + Ok(()) + } + + /// "Commit" unconfirmed data to the ephemeral MARF as if it were to be written to unconfiremd + /// state, and consume this instance. This is effectively a no-op since nothing will be + /// written to disk, and all written data will be dropped. However, we go through the motions + /// just in case any errors would be reported. + fn commit_unconfirmed(self) { + // NOTE: Can omit commit_metadata_to, since the block header hash won't change + self.ephemeral_marf + .commit() + .expect("ERROR: Failed to commit MARF block"); + } + + #[cfg(test)] + fn test_commit(self) { + self.do_test_commit() + } +} + +impl ReadOnlyMarfStore<'_> { + /// Determine if there is a trie in the underlying MARF with the given ID `bhh`. + /// + /// Return Ok(true) if so + /// Return Ok(false) if not + /// Return Err(..) if we encounter a sqlite error pub fn trie_exists_for_block(&mut self, bhh: &StacksBlockId) -> Result { self.marf .with_conn(|conn| conn.has_block(bhh).map_err(DatabaseError::IndexError)) @@ -438,24 +903,30 @@ impl ClarityBackingStore for ReadOnlyMarfStore<'_> { } fn get_data(&mut self, key: &str) -> InterpreterResult> { - trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf .get(&self.chain_tip, key) .or_else(|e| match e { Error::NotFoundError => { - trace!( - "MarfedKV get {:?} off of {:?}: not found", + test_debug!( + "ReadOnly MarfedKV get {:?} off of {:?}: not found", key, &self.chain_tip ); Ok(None) } - _ => Err(e), + _ => { + test_debug!( + "ReadOnly MarfedKV get {:?} off of {:?}: {:?}", + key, + &self.chain_tip, + &e + ); + Err(e) + } }) .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on GET".into()))? .map(|marf_value| { let side_key = marf_value.to_hex(); - trace!("MarfedKV get side-key for {:?}: {:?}", key, &side_key); SqliteConnection::get(self.get_side_store(), &side_key)?.ok_or_else(|| { InterpreterError::Expect(format!( "ERROR: MARF contained value_hash not found in side storage: {}", @@ -511,11 +982,12 @@ impl ClarityBackingStore for ReadOnlyMarfStore<'_> { fn insert_metadata( &mut self, - contract: &QualifiedContractIdentifier, - key: &str, - value: &str, + _contract: &QualifiedContractIdentifier, + _key: &str, + _value: &str, ) -> InterpreterResult<()> { - sqlite_insert_metadata(self, contract, key, value) + error!("Attempted to commit metadata changes to read-only MARF"); + panic!("BUG: attempted metadata commit to read-only MARF"); } fn get_metadata( @@ -536,86 +1008,15 @@ impl ClarityBackingStore for ReadOnlyMarfStore<'_> { } } -impl WritableMarfStore<'_> { - pub fn as_clarity_db<'b>( - &'b mut self, - headers_db: &'b dyn HeadersDB, - burn_state_db: &'b dyn BurnStateDB, - ) -> ClarityDatabase<'b> { - ClarityDatabase::new(self, headers_db, burn_state_db) - } - - pub fn as_analysis_db(&mut self) -> AnalysisDatabase<'_> { - AnalysisDatabase::new(self) - } - - pub fn rollback_block(self) { - self.marf.drop_current(); - } - - pub fn rollback_unconfirmed(self) -> InterpreterResult<()> { - debug!("Drop unconfirmed MARF trie {}", &self.chain_tip); - SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip)?; - self.marf.drop_unconfirmed(); - Ok(()) - } - - pub fn commit_to(self, final_bhh: &StacksBlockId) -> InterpreterResult<()> { - debug!("commit_to({})", final_bhh); - SqliteConnection::commit_metadata_to(self.marf.sqlite_tx(), &self.chain_tip, final_bhh)?; - - let _ = self.marf.commit_to(final_bhh).map_err(|e| { - error!("Failed to commit to MARF block {}: {:?}", &final_bhh, &e); - InterpreterError::Expect("Failed to commit to MARF block".into()) - })?; - Ok(()) - } - +impl PersistentWritableMarfStore<'_> { #[cfg(test)] - pub fn test_commit(self) { + fn do_test_commit(self) { let bhh = self.chain_tip.clone(); - self.commit_to(&bhh).unwrap(); - } - - pub fn commit_unconfirmed(self) { - debug!("commit_unconfirmed()"); - // NOTE: Can omit commit_metadata_to, since the block header hash won't change - // commit_metadata_to(&self.chain_tip, final_bhh); - self.marf - .commit() - .expect("ERROR: Failed to commit MARF block"); - } - - // This is used by miners - // so that the block validation and processing logic doesn't - // reprocess the same data as if it were already loaded - pub fn commit_mined_block(self, will_move_to: &StacksBlockId) -> InterpreterResult<()> { - debug!( - "commit_mined_block: ({}->{})", - &self.chain_tip, will_move_to - ); - // rollback the side_store - // the side_store shouldn't commit data for blocks that won't be - // included in the processed chainstate (like a block constructed during mining) - // _if_ for some reason, we do want to be able to access that mined chain state in the future, - // we should probably commit the data to a different table which does not have uniqueness constraints. - SqliteConnection::drop_metadata(self.marf.sqlite_tx(), &self.chain_tip)?; - let _ = self.marf.commit_mined(will_move_to).map_err(|e| { - error!( - "Failed to commit to mined MARF block {}: {:?}", - &will_move_to, &e - ); - InterpreterError::Expect("Failed to commit to MARF block".into()) - })?; - Ok(()) - } - - pub fn seal(&mut self) -> TrieHash { - self.marf.seal().expect("FATAL: failed to .seal() MARF") + self.commit_to_processed_block(&bhh).unwrap(); } } -impl ClarityBackingStore for WritableMarfStore<'_> { +impl ClarityBackingStore for PersistentWritableMarfStore<'_> { fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { self.marf .check_ancestor_block_hash(&bhh) @@ -817,12 +1218,11 @@ impl ClarityBackingStore for WritableMarfStore<'_> { } fn put_all_data(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { - let mut keys = Vec::new(); - let mut values = Vec::new(); + let mut keys = Vec::with_capacity(items.len()); + let mut values = Vec::with_capacity(items.len()); for (key, value) in items.into_iter() { - trace!("MarfedKV put '{}' = '{}'", &key, &value); let marf_value = MARFValue::from_value(&value); - SqliteConnection::put(self.get_side_store(), &marf_value.to_hex(), &value)?; + SqliteConnection::put(self.marf.sqlite_tx(), &marf_value.to_hex(), &value)?; keys.push(key); values.push(marf_value); } @@ -864,3 +1264,868 @@ impl ClarityBackingStore for WritableMarfStore<'_> { sqlite_get_metadata_manual(self, at_height, contract, key) } } + +/// Enumeration of the possible types of open tips in an ephemeral MARF. +/// The tip can point to a block in the ephemeral RAM-backed MARF, or the on-disk MARF. +#[derive(Debug, PartialEq, Clone)] +enum EphemeralTip { + RAM(StacksBlockId), + Disk(StacksBlockId), +} + +impl EphemeralTip { + fn into_block_id(self) -> StacksBlockId { + match self { + Self::RAM(tip) => tip, + Self::Disk(tip) => tip, + } + } +} + +impl<'a> EphemeralMarfStore<'a> { + /// Attach the sqlite DB of the given read-only MARF store to the ephemeral MARF, so that reads + /// on the ephemeral MARF for non-ephemeral data will automatically fall back to the read-only + /// MARF's database. + /// + /// Returns Ok(()) on success + /// Returns Err(..) on sqlite error + pub fn attach_read_only_marf( + ephemeral_marf: &MARF, + read_only_marf: &ReadOnlyMarfStore<'a>, + ) -> Result<(), Error> { + let conn = ephemeral_marf.sqlite_conn(); + conn.execute( + "ATTACH DATABASE ?1 AS read_only_marf", + rusqlite::params![read_only_marf.marf.get_db_path()], + )?; + Ok(()) + } + + /// Instantiate. + /// The `base_tip` must be a valid tip in the given MARF. New writes in the ephemeral MARF will + /// descend from the block identified by `tip`. + /// + /// Returns Ok(Self) on success + /// Returns Err(..) if the ephemeral MARF tx was not opened. + pub fn new( + mut read_only_marf: ReadOnlyMarfStore<'a>, + ephemeral_marf_tx: MarfTransaction<'a, StacksBlockId>, + ) -> Result { + let base_tip_height = read_only_marf.get_current_block_height(); + let ephemeral_tip = ephemeral_marf_tx + .get_open_chain_tip() + .ok_or(Error::NotFoundError)? + .clone(); + let ephemeral_marf_store = Self { + open_tip: EphemeralTip::RAM(ephemeral_tip), + base_tip: read_only_marf.chain_tip.clone(), + base_tip_height, + ephemeral_marf: ephemeral_marf_tx, + read_only_marf, + }; + + // setup views so that the ephemeral MARF's data and metadata tables show all MARF + // key/value data + ephemeral_marf_store.setup_views(); + + Ok(ephemeral_marf_store) + } + + /// Test to see if a given tip is in the ephemeral MARF + fn is_ephemeral_tip(&mut self, tip: &StacksBlockId) -> Result { + match self.ephemeral_marf.get_root_hash_at(tip) { + Ok(_) => Ok(true), + Err(Error::NotFoundError) => Ok(false), + Err(e) => Err(InterpreterError::MarfFailure(e.to_string())), + } + } + + /// Create a temporary view for `data_table` and `metadata_table` that merges the ephemeral + /// MARF's data with the disk-backed MARF. This must be done before reading anything out of + /// the side store, and must be undone before writing anything to the ephemeral MARF. + /// + /// This is infallible. Sqlite errors will panic. This is fine because all sqlite operations + /// are on the RAM-backed ephemeral MARF; if RAM exhaustion causes problems, then OOM failure + /// is not far behind. + fn setup_views(&self) { + let conn = self.ephemeral_marf.sqlite_conn(); + conn.execute( + "ALTER TABLE data_table RENAME TO ephemeral_data_table", + NO_PARAMS, + ) + .expect("FATAL: failed to rename data_table to ephemeral_data_table"); + conn.execute( + "ALTER TABLE metadata_table RENAME TO ephemeral_metadata_table", + NO_PARAMS, + ) + .expect("FATAL: failed to rename metadata_table to ephemeral_metadata_table"); + conn.execute("CREATE TEMP VIEW data_table(key, value) AS SELECT * FROM main.ephemeral_data_table UNION SELECT * FROM read_only_marf.data_table", NO_PARAMS) + .expect("FATAL: failed to setup temp view data_table on ephemeral MARF DB"); + conn.execute("CREATE TEMP VIEW metadata_table(key, blockhash, value) AS SELECT * FROM main.ephemeral_metadata_table UNION SELECT * FROM read_only_marf.metadata_table", NO_PARAMS) + .expect("FATAL: failed to setup temp view metadata_table on ephemeral MARF DB"); + } + + /// Delete temporary views `data_table` and `metadata_table`, and restore + /// `data_table` and `metadata_table` table names. Do this prior to writing. + /// + /// This is infallible. Sqlite errors will panic. This is fine because all sqlite operations + /// are on the RAM-backed ephemeral MARF; if RAM exhaustion causes problems, then OOM failure + /// is not far behind. + fn teardown_views(&self) { + let conn = self.ephemeral_marf.sqlite_conn(); + conn.execute("DROP VIEW data_table", NO_PARAMS) + .expect("FATAL: failed to drop data_table view"); + conn.execute("DROP VIEW metadata_table", NO_PARAMS) + .expect("FATAL: failed to drop metadata_table view"); + conn.execute( + "ALTER TABLE ephemeral_data_table RENAME TO data_table", + NO_PARAMS, + ) + .expect("FATAL: failed to restore data_table"); + conn.execute( + "ALTER TABLE ephemeral_metadata_table RENAME TO metadata_table", + NO_PARAMS, + ) + .expect("FATAL: failed to restore metadata_table"); + } + + /// Test helper to commit ephemeral MARF block data using the open chain tip as the final + /// identifier + #[cfg(test)] + fn do_test_commit(self) { + if let Some(tip) = self.ephemeral_marf.get_open_chain_tip() { + let bhh = tip.clone(); + self.commit_to_processed_block(&bhh).unwrap(); + } + } +} + +impl ClarityBackingStore for EphemeralMarfStore<'_> { + /// Seek to the given chain tip. This given tip will become the new tip from which + /// reads and writes will be indexed. + /// + /// Returns Ok(old-chain-tip) on success. + /// Returns Err(..) if the given chain tip does not exist or is on a different fork (e.g. is + /// not an ancestor of this struct's tip). + fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { + if self.is_ephemeral_tip(&bhh)? { + // open the disk-backed MARF to the base tip, so we can carry out reads on disk-backed + // data in the event that a read on a key is `None` for the ephemeral MARF. + self.read_only_marf.set_block_hash(self.base_tip.clone())?; + + // update ephemeral MARF open tip + let old_tip = mem::replace(&mut self.open_tip, EphemeralTip::RAM(bhh)).into_block_id(); + self.open_tip = EphemeralTip::RAM(bhh); + return Ok(old_tip); + } + + // this bhh is not ephemeral, so it might be disk-backed. + self.read_only_marf + .marf + .check_ancestor_block_hash(&bhh) + .map_err(|e| match e { + Error::NotFoundError => { + test_debug!("No such block {:?} (NotFoundError)", &bhh); + RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)) + } + Error::NonMatchingForks(_bh1, _bh2) => { + test_debug!( + "No such block {:?} (NonMatchingForks({}, {}))", + &bhh, + BlockHeaderHash(_bh1), + BlockHeaderHash(_bh2) + ); + RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)) + } + _ => panic!("ERROR: Unexpected MARF failure: {}", e), + })?; + + let old_tip = mem::replace(&mut self.open_tip, EphemeralTip::Disk(bhh)); + Ok(old_tip.into_block_id()) + } + + /// Get the special-case contract-call handlers (e.g. for PoX and .costs-voting) + fn get_cc_special_cases_handler(&self) -> Option { + Some(&handle_contract_call_special_cases) + } + + /// Load a value associated with the give key from the MARF and its side-store. + /// The key can be any string; it will be translated into a MARF key. + /// The caller must decode the resulting value. + /// + /// Returns Ok(Some(value)) if the key was mapped to the given value at the opened chain tip. + /// Returns Ok(None) if the key was not mapped to the given value at the opened chain tip. + /// Returns Err(..) on all other failures. + fn get_data(&mut self, key: &str) -> InterpreterResult> { + let value_res: InterpreterResult> = if let EphemeralTip::RAM(tip) = + &self.open_tip + { + // try the ephemeral MARF first + self.ephemeral_marf + .get(tip, key) + .or_else(|e| match e { + Error::NotFoundError => { + test_debug!( + "Ephemeral MarfedKV get {:?} off of {:?}: not found", + key, + tip + ); + Ok(None) + } + _ => { + test_debug!( + "Ephemeral MarfedKV failed to get {:?} off of {:?}: {:?}", + key, + tip, + &e + ); + Err(e) + } + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected Ephemeral MARF Failure on GET".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + SqliteConnection::get(self.ephemeral_marf.sqlite_conn(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: Ephemeral MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } else { + Ok(None) + }; + + if let Some(value) = value_res? { + // found in ephemeral MARF + return Ok(Some(value)); + } + + // Due to the way we implemented `.set_block_hash()`, the read-only + // MARF's tip will be set to `base_tip` if the open tip was ephemeral. + // Otherwise, it'll be set to the tip that was last opeend. Either way, + // the correct tip has been set in `self.read_only_marf` that `.get_data()` + // will work as expected. + self.read_only_marf.get_data(key) + } + + /// Get data from the MARF given a trie hash. + /// Returns Ok(Some(value)) if the key was mapped to the given value at the opeend chain tip. + /// Returns Ok(None) if the key was not mapped to the given value at the opened chain tip. + /// Returns Err(..) on all other failures + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + trace!( + "Ephemeral MarfedKV get_from_hash: {:?} tip={:?}", + hash, + &self.open_tip + ); + let value_res: InterpreterResult> = if let EphemeralTip::RAM(tip) = + &self.open_tip + { + // try the ephemeral MARF first + self.ephemeral_marf + .get_from_hash(tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "Ephemeral MarfedKV get {:?} off of {:?}: not found", + hash, + tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| InterpreterError::Expect("ERROR: Unexpected MARF Failure on get-by-path".into()))? + .map(|marf_value| { + let side_key = marf_value.to_hex(); + trace!("Ephemeral MarfedKV get side-key for {:?}: {:?}", hash, &side_key); + SqliteConnection::get(self.ephemeral_marf.sqlite_conn(), &side_key)?.ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: Ephemeral MARF contained value_hash not found in side storage: {}", + side_key + )) + .into() + }) + }) + .transpose() + } else { + Ok(None) + }; + + if let Some(value) = value_res? { + // found in ephemeral MARF + return Ok(Some(value)); + } + + // Due to the way we implemented `.set_block_hash()`, the read-only + // MARF's tip will be set to `base_tip` if the open tip was ephemeral. + // Otherwise, it'll be set to the tip that was last opeend. Either way, + // the correct tip has been set in `self.read_only_marf` that `.get_data_from_path()` + // will work as expected. + self.read_only_marf.get_data_from_path(hash) + } + + /// Get data from the MARF as well as a Merkle proof-of-inclusion. + /// Returns Ok(Some(value)) if the key was mapped to the given value at the opened chain tip. + /// Returns Ok(None) if the key was not mapped to the given value at the opened chain tip. + /// Returns Err(..) on all other failures + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + trace!( + "Ephemeral MarfedKV get_data_with_proof: '{}' tip={:?}", + key, + &self.open_tip + ); + let value_res: InterpreterResult)>> = + if let EphemeralTip::RAM(tip) = &self.open_tip { + // try the ephemeral MARF first + self.ephemeral_marf + .get_with_proof(tip, key) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "Ephemeral MarfedKV get-with-proof '{}' off of {:?}: not found", + key, + tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| { + InterpreterError::Expect( + "ERROR: Unexpected Ephemeral MARF Failure on get-with-proof".into(), + ) + })? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.ephemeral_marf.sqlite_conn(), &side_key)? + .ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } else { + Ok(None) + }; + + if let Some(value) = value_res? { + // found in ephemeral MARF + return Ok(Some(value)); + } + + // Due to the way we implemented `.set_block_hash()`, the read-only + // MARF's tip will be set to `base_tip` if the open tip was ephemeral. + // Otherwise, it'll be set to the tip that was last opeend. Either way, + // the correct tip has been set in `self.read_only_marf` that `.get_data_with_proof()` + // will work as expected. + self.read_only_marf.get_data_with_proof(key) + } + + /// Get data and a Merkle proof-of-inclusion for it from the MARF given a trie hash. + /// Returns Ok(Some(value)) if the key was mapped to the given value at the opeend chain tip. + /// Returns Ok(None) if the key was not mapped to the given value at the opened chain tip. + /// Returns Err(..) on all other failures + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + trace!( + "Ephemeral MarfedKV get_data_with_proof_from_hash: {:?} tip={:?}", + hash, + &self.open_tip + ); + let value_res: InterpreterResult)>> = + if let EphemeralTip::RAM(tip) = &self.open_tip { + self.ephemeral_marf + .get_with_proof_from_hash(tip, hash) + .or_else(|e| match e { + Error::NotFoundError => { + trace!( + "Ephemeral MarfedKV get-with-proof {:?} off of {:?}: not found", + hash, + tip + ); + Ok(None) + } + _ => Err(e), + }) + .map_err(|_| { + InterpreterError::Expect( + "ERROR: Unexpected ephemeral MARF Failure on get-data-with-proof" + .into(), + ) + })? + .map(|(marf_value, proof)| { + let side_key = marf_value.to_hex(); + let data = + SqliteConnection::get(self.ephemeral_marf.sqlite_conn(), &side_key)? + .ok_or_else(|| { + InterpreterError::Expect(format!( + "ERROR: MARF contained value_hash not found in side storage: {}", + side_key + )) + })?; + Ok((data, proof.serialize_to_vec())) + }) + .transpose() + } else { + Ok(None) + }; + + if let Some(value) = value_res? { + // found in ephemeral MARF + return Ok(Some(value)); + } + + // Due to the way we implemented `.set_block_hash()`, the read-only + // MARF's tip will be set to `base_tip` if the open tip was ephemeral. + // Otherwise, it'll be set to the tip that was last opeend. Either way, + // the correct tip has been set in `self.read_only_marf` that + // `.get_data_with_proof_from_path()` + // will work as expected. + self.read_only_marf.get_data_with_proof_from_path(hash) + } + + /// Get a sqlite connection to the MARF side-store. + /// Note that due to `setup_views()` and `teardown_views()`, the MARF DB will show key/value + /// pairs for both the ephemeral MARF and the disk-backed readonly MARF. + fn get_side_store(&mut self) -> &Connection { + self.ephemeral_marf.sqlite_conn() + } + + /// Get an ancestor block's ID at a given absolute height, off of the open tip. + /// Returns Some(block-id) if there is a block at the given height. + /// Returns None otherwise. + fn get_block_at_height(&mut self, height: u32) -> Option { + let block_id_opt = if let EphemeralTip::RAM(tip) = &self.open_tip { + // careful -- the ephemeral MARF's height 0 corresponds to the base tip height + if height > self.base_tip_height { + self.ephemeral_marf + .get_block_at_height(height - self.base_tip_height, tip) + .unwrap_or_else(|_| { + panic!( + "Unexpected MARF failure: failed to get block at height {} off of {}.", + height, tip + ) + }) + } else { + None + } + } else { + None + }; + + if let Some(block_id) = block_id_opt { + return Some(block_id); + } + + // Due to the way we implemented `.set_block_hash()`, the read-only + // MARF's tip will be set to `base_tip` if the open tip was ephemeral. + // Otherwise, it'll be set to the tip that was last opeend. Either way, + // the correct tip has been set in `self.read_only_marf` that `.get_block_at_height()` + // will work as expected. + self.read_only_marf.get_block_at_height(height) + } + + /// Get the block ID of the inner MARF's open chain tip. + /// If the tip points to the ephemeral MARF, then use that MARF. + /// Otherwise, use the disk-backed one. + fn get_open_chain_tip(&mut self) -> StacksBlockId { + if let EphemeralTip::RAM(..) = &self.open_tip { + return self + .ephemeral_marf + .get_open_chain_tip() + .expect("Attempted to get the open chain tip from an unopened context.") + .clone(); + } + + self.read_only_marf.get_open_chain_tip() + } + + /// Get the height of the inner MARF's open chain tip. + /// If the tip points to the ephemeral MARF, then use that MARF. + /// Otherwise, use the disk-backed one. + fn get_open_chain_tip_height(&mut self) -> u32 { + if let EphemeralTip::RAM(..) = &self.open_tip { + return self + .ephemeral_marf + .get_open_chain_tip_height() + .expect("Attempted to get the open chain tip from an unopened context.") + + self.base_tip_height + + 1; + } + + self.read_only_marf.get_open_chain_tip_height() + } + + /// Get the block height of the current open chain tip. + /// If the tip points to the ephemeral MARF, then use that MARF. + /// Otherwise, use the disk-backed one. + fn get_current_block_height(&mut self) -> u32 { + let height_opt = if let EphemeralTip::RAM(tip) = &self.open_tip { + match self.ephemeral_marf.get_block_height_of(tip, tip) { + Ok(Some(x)) => Some(x + self.base_tip_height + 1), + Ok(None) => { + let first_tip = StacksBlockId::new( + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + ); + if tip == &first_tip || tip == &StacksBlockId([0u8; 32]) { + // the current block height should always work, except if it's the first block + // height (in which case, the current chain tip should match the first-ever + // index block hash). + // In this case, this is the height of the base tip in the disk-backed MARF + return self.base_tip_height; + } + + // should never happen + let msg = format!( + "Failed to obtain current block height of {:?} (got None)", + &self.open_tip + ); + panic!("{}", &msg); + } + Err(e) => { + let msg = format!( + "Unexpected MARF failure: Failed to get current block height of {:?}: {:?}", + &self.open_tip, &e + ); + panic!("{}", &msg); + } + } + } else { + None + }; + + if let Some(height) = height_opt { + return height; + } + + self.read_only_marf.get_current_block_height() + } + + /// Write all (key, value) pairs to the ephemeral MARF. + /// Returns Ok(()) on success + /// Returns Err(..) on inner MARF errors. + fn put_all_data(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { + let mut keys = Vec::with_capacity(items.len()); + let mut values = Vec::with_capacity(items.len()); + + // we're only writing, so get rid of the temporary views and restore the data and metadata + // tables in the ephemeral MARF so this works. + self.teardown_views(); + for (key, value) in items.into_iter() { + let marf_value = MARFValue::from_value(&value); + SqliteConnection::put( + self.ephemeral_marf.sqlite_tx(), + &marf_value.to_hex(), + &value, + ) + .unwrap_or_else(|e| { + panic!( + "FATAL: failed to insert side-store data {:?}: {:?}", + &value, &e + ) + }); + + keys.push(key); + values.push(marf_value); + } + self.ephemeral_marf + .insert_batch(&keys, values) + .unwrap_or_else(|e| { + panic!( + "FATAL: failed to insert ephemeral MARF key/value pairs: {:?}", + e + ) + }); + + // restore unified data and metadata views + self.setup_views(); + Ok(()) + } + + /// Get the hash of a contract and the block it was mined in, + /// given its fully-qualified identifier. + /// Returns Ok((block-id, sha512/256)) on success. + /// Returns Err(..) on DB error (including not-found) + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + sqlite_get_contract_hash(self, contract) + } + + /// Write contract metadata into the metadata table. + /// This method needs to tear down and restore the materialized view of the ephemeral marf's + /// metadata table in order to work correctly, since the ephemeral MARF will store the data. + /// + /// Returns Ok(()) on success + /// Returns Err(..) on failure. + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + self.teardown_views(); + let res = sqlite_insert_metadata(self, contract, key, value); + self.setup_views(); + res + } + + /// Load up metadata from the metadata table (materialized view) in the ephemeral MARF + /// for a given contract and metadata key. + /// Returns Ok(Some(value)) if the metadata exists + /// Returns Ok(None) if the metadata does not exist + /// Returns Err(..) on failure + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata(self, contract, key) + } + + /// Load up metadata at a specific block height from the metadata table (materialized view) in + /// the ephemeral MARF for a given contract and metadata key. + /// Returns Ok(Some(value)) if the metadata exists + /// Returns Ok(None) if the metadata does not exist + /// Returns Err(..) on failure + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + sqlite_get_metadata_manual(self, at_height, contract, key) + } +} + +impl WritableMarfStore for PersistentWritableMarfStore<'_> {} +impl WritableMarfStore for EphemeralMarfStore<'_> {} + +/// This trait exists so we can implement `ClarityMarfStore`, `ClarityMarfStoreTransaction`, and +/// `WritableMarfStore` for `Box`. We need +/// `Box` because `dyn WritableMarfStore` doesn't have a size known at +/// compile time (so it cannot be Sized). But then we'd need it to implement `WritableMarfStore`, +/// which is tricky because some of `ClartyMarfStoreTransaction`'s functions take an instance +/// `self` instead of a reference. Because we don't know the size of `self` at compile-time, we +/// have to employ a layer of indirection. +/// +/// To work around this, `WritableMarfStore` is composed of `BoxedClarityMarfStoreTransaction` +/// below, and we have a blanket implementation of `BoxedClarityMarfStoreTransaction` for any +/// `T: ClarityMarfStoreTransaction`. This in turn allows us to implement +/// `ClarityMarfStoreTransaction for `Box` -- we cast to +/// `ClarityMarfStoreTransaction` to call functions that take a reference to `self`, and we cast to +/// `BoxedClarityMarfStoreTransaction` to call functions that take an instance of `self`. In the +/// latter case, the instance will have a compile-time size since it will be a Box. The +/// implementation of `BoxedClarityMarfStoreTransaction` just forwards the call to the +/// corresponding function in `ClarityMarfStoreTransaction` with a reference to the boxed instance. +pub trait BoxedClarityMarfStoreTransaction { + fn boxed_drop_current_trie(self: Box); + fn boxed_drop_unconfirmed(self: Box) -> InterpreterResult<()>; + fn boxed_commit_to_processed_block( + self: Box, + target: &StacksBlockId, + ) -> InterpreterResult<()>; + fn boxed_commit_to_mined_block( + self: Box, + target: &StacksBlockId, + ) -> InterpreterResult<()>; + fn boxed_commit_unconfirmed(self: Box); + + #[cfg(test)] + fn boxed_test_commit(self: Box); +} + +impl BoxedClarityMarfStoreTransaction for T { + fn boxed_drop_current_trie(self: Box) { + ::drop_current_trie(*self) + } + + fn boxed_drop_unconfirmed(self: Box) -> InterpreterResult<()> { + ::drop_unconfirmed(*self) + } + + fn boxed_commit_to_processed_block( + self: Box, + target: &StacksBlockId, + ) -> InterpreterResult<()> { + ::commit_to_processed_block(*self, target) + } + + fn boxed_commit_to_mined_block( + self: Box, + target: &StacksBlockId, + ) -> InterpreterResult<()> { + ::commit_to_mined_block(*self, target) + } + + fn boxed_commit_unconfirmed(self: Box) { + ::commit_unconfirmed(*self) + } + + #[cfg(test)] + fn boxed_test_commit(self: Box) { + ::test_commit(*self) + } +} + +impl<'a> ClarityMarfStoreTransaction for Box { + fn commit_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + ::commit_metadata_for_trie( + &mut **self, + target, + ) + } + + fn drop_metadata_for_trie(&mut self, target: &StacksBlockId) -> InterpreterResult<()> { + ::drop_metadata_for_trie( + &mut **self, + target, + ) + } + + fn seal_trie(&mut self) -> TrieHash { + ::seal_trie(&mut **self) + } + + fn drop_current_trie(self) { + ::boxed_drop_current_trie(self) + } + + fn drop_unconfirmed(self) -> InterpreterResult<()> { + ::boxed_drop_unconfirmed(self) + } + fn commit_to_processed_block(self, target: &StacksBlockId) -> InterpreterResult<()> { + ::boxed_commit_to_processed_block( + self, target, + ) + } + + fn commit_to_mined_block(self, target: &StacksBlockId) -> InterpreterResult<()> { + ::boxed_commit_to_mined_block( + self, target, + ) + } + + fn commit_unconfirmed(self) { + ::boxed_commit_unconfirmed(self) + } + + #[cfg(test)] + fn test_commit(self) { + ::boxed_test_commit(self) + } +} + +impl<'a> ClarityBackingStore for Box { + fn put_all_data(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { + ::put_all_data(&mut **self, items) + } + + fn get_data(&mut self, key: &str) -> InterpreterResult> { + ::get_data(&mut **self, key) + } + + fn get_data_from_path(&mut self, hash: &TrieHash) -> InterpreterResult> { + ::get_data_from_path(&mut **self, hash) + } + + fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + ::get_data_with_proof(&mut **self, key) + } + + fn get_data_with_proof_from_path( + &mut self, + hash: &TrieHash, + ) -> InterpreterResult)>> { + ::get_data_with_proof_from_path( + &mut **self, + hash, + ) + } + + fn set_block_hash(&mut self, bhh: StacksBlockId) -> InterpreterResult { + ::set_block_hash(&mut **self, bhh) + } + + fn get_block_at_height(&mut self, height: u32) -> Option { + ::get_block_at_height(&mut **self, height) + } + + fn get_current_block_height(&mut self) -> u32 { + ::get_current_block_height(&mut **self) + } + + fn get_open_chain_tip_height(&mut self) -> u32 { + ::get_open_chain_tip_height(&mut **self) + } + + fn get_open_chain_tip(&mut self) -> StacksBlockId { + ::get_open_chain_tip(&mut **self) + } + + fn get_side_store(&mut self) -> &Connection { + ::get_side_store(&mut **self) + } + + fn get_cc_special_cases_handler(&self) -> Option { + ::get_cc_special_cases_handler(&**self) + } + + fn get_contract_hash( + &mut self, + contract: &QualifiedContractIdentifier, + ) -> InterpreterResult<(StacksBlockId, Sha512Trunc256Sum)> { + ::get_contract_hash(&mut **self, contract) + } + + fn insert_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + value: &str, + ) -> InterpreterResult<()> { + ::insert_metadata( + &mut **self, + contract, + key, + value, + ) + } + + fn get_metadata( + &mut self, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + ::get_metadata(&mut **self, contract, key) + } + + fn get_metadata_manual( + &mut self, + at_height: u32, + contract: &QualifiedContractIdentifier, + key: &str, + ) -> InterpreterResult> { + ::get_metadata_manual( + &mut **self, + at_height, + contract, + key, + ) + } +} + +impl<'a> ClarityMarfStore for Box {} +impl<'a> WritableMarfStore for Box {} diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 6172141b19..a13702746a 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -39,7 +39,7 @@ use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity_vm::clarity::ClarityInstance; -use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::marf::{ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::util_lib::boot::boot_code_id; diff --git a/stackslib/src/clarity_vm/tests/ephemeral.rs b/stackslib/src/clarity_vm/tests/ephemeral.rs new file mode 100644 index 0000000000..0b77e753e6 --- /dev/null +++ b/stackslib/src/clarity_vm/tests/ephemeral.rs @@ -0,0 +1,808 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs; + +use clarity::vm::ast::ASTRules; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::{ClarityName, ContractName}; +use proptest::prelude::*; +use rand::seq::SliceRandom; +use rand::thread_rng; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::types::chainstate::{ + StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, +}; +use stacks_common::types::Address; + +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::tests::node::TestStacker; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; +use crate::chainstate::stacks::index::storage::TrieHashCalculationMode; +use crate::chainstate::stacks::index::ClarityMarfTrieId; +use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use crate::chainstate::stacks::{ + StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionVersion, +}; +use crate::clarity::vm::database::ClarityBackingStore; +use crate::clarity_vm::database::marf::{ClarityMarfStoreTransaction, MarfedKV}; +use crate::net::test::TestEventObserver; +use crate::net::tests::inv::nakamoto::make_nakamoto_peer_from_invs; +use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; +use crate::util_lib::strings::StacksString; + +/// Verify that an ephemeral MARF can be created off of an already-written block of an on-disk +/// MARF. +/// * Verify that keys inserted into the ephemeral MARF land in the RAM-backed MARF +/// * Verify that the ephemeral MARF store can read all keys inserted into the RAM-backed MARF, as +/// well as all keys in the disk-backed MARF. +/// * Verify that discarding the ephemeral MARF store leaves the disk-backed MARF unaltered (no new +/// keys) +#[test] +fn test_ephemeral_marf_store() { + let path = format!("/tmp/{}.marf", function_name!()); + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); + } + + let mut marfed_kv = MarfedKV::open( + &path, + None, + Some(MARFOpenOpts::new( + TrieHashCalculationMode::Deferred, + "noop", + false, + )), + ) + .unwrap(); + + // insert some key/value pairs into the disk-backed MARF + let mut blocks = vec![StacksBlockId::sentinel()]; + let mut block_data = vec![vec![]]; + + for blk in 0..10 { + let final_block_id = StacksBlockId([blk as u8; 32]); + let target_block_id = StacksBlockId([0xf0; 32]); + let mut keys_and_values = vec![]; + for k in 0..10 { + let key = format!("key-{}", blk * 10 + k); + let value = format!("value-{}", blk * 10 + k); + keys_and_values.push((key, value)); + } + let mut marf = marfed_kv.begin(blocks.last().as_ref().unwrap(), &target_block_id); + marf.put_all_data(keys_and_values.clone()).unwrap(); + marf.commit_to_processed_block(&final_block_id).unwrap(); + + blocks.push(final_block_id); + block_data.push(keys_and_values); + } + + // verify all keys are present at the right chain tips + for (i, block_id) in blocks.iter().enumerate() { + debug!("readonly: open block #{}: {}", i, block_id); + let mut marf_ro = marfed_kv.begin_read_only(Some(block_id)); + for j in 0..=i { + // all values up to those inserted in the block with this ID are present + let keys_and_values = &block_data[j]; + for (key, expected_value) in keys_and_values.iter() { + let value = marf_ro.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "readonly: at block #{} {}: {} == {}", + i, block_id, key, &value + ); + } + } + for j in i + 1..blocks.len() { + // all values afterwards are not present + let keys_and_values = &block_data[j]; + for (key, _) in keys_and_values.iter() { + assert!(marf_ro.get_data(key).unwrap().is_none()); + debug!("readonly: at block #{} {}: {} not mapped", i, block_id, key); + } + } + } + + // verify that we can read all keys with an ephemeral MARF opened to each block_id as its + // base_tip + for (i, block_id) in blocks.iter().enumerate() { + // skip opening ::sentinel() since it's not mapped + if i == 0 { + continue; + } + debug!("ephemeral: open block #{}: {}", i, block_id); + let ephemeral_tip = StacksBlockId([0xf0; 32]); + let mut marf_ephemeral = marfed_kv + .begin_ephemeral(&block_id, &ephemeral_tip) + .unwrap(); + for j in 0..=i { + // all values up to those inserted in the block with this ID are present + let keys_and_values = &block_data[j]; + for (key, expected_value) in keys_and_values.iter() { + let value = marf_ephemeral.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "ephemeral: at block #{} {}: {} == {}", + i, block_id, key, &value + ); + } + } + for j in i + 1..blocks.len() { + // all values afterwards are not present + let keys_and_values = &block_data[j]; + for (key, _) in keys_and_values.iter() { + assert!(marf_ephemeral.get_data(key).unwrap().is_none()); + debug!( + "ephemeral: at block #{} {}: {} not mapped", + i, block_id, key + ); + } + } + } + + // create one block at each base_tip and add ephemeral key/value pairs to it. + // verify that we can read them back as long as the ephemeral tx is open, and + // verify that we can read all other keys. + // verify that the ephemeral data is dropped along with the ephemeral tx + for (i, block_id) in blocks.iter().enumerate() { + // skip opening ::sentinel() since it's not mapped + if i == 0 { + continue; + } + let ephemeral_tip = StacksBlockId([0xf0; 32]); + let final_block_id = StacksBlockId([i as u8; 32]); + + let mut keys_and_values = vec![]; + for k in 0..10 { + let key = format!("ephemeral-key-{}", i * 10 + k); + let value = format!("ephemeral-value-{}", i * 10 + k); + keys_and_values.push((key, value)); + } + debug!( + "ephemeral: open block #{}: {} --> {}", + i, block_id, &ephemeral_tip + ); + let mut marf_ephemeral = marfed_kv + .begin_ephemeral(&block_id, &ephemeral_tip) + .unwrap(); + marf_ephemeral + .put_all_data(keys_and_values.clone()) + .unwrap(); + + // can read back all ephemeral data + for (key, expected_value) in keys_and_values.iter() { + let value = marf_ephemeral.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "ephemeral: at block #{}: {} --> {}: {} == {}", + i, block_id, &ephemeral_tip, key, &value + ); + } + + // can read back all disk-backed data represented up to the base_tip + for j in 0..=i { + // all values up to those inserted in the block with this ID are present + let keys_and_values = &block_data[j]; + for (key, expected_value) in keys_and_values.iter() { + let value = marf_ephemeral.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "ephemeral: at block #{} {} --> {}: {} == {}", + i, block_id, &ephemeral_tip, key, &value + ); + } + } + + // cannot read data beyond the base tip + for j in i + 1..blocks.len() { + // all values afterwards are not present + let keys_and_values = &block_data[j]; + for (key, _) in keys_and_values.iter() { + assert!(marf_ephemeral.get_data(key).unwrap().is_none()); + debug!( + "ephemeral: at block #{} {} --> {}: {} not mapped", + i, block_id, &ephemeral_tip, key + ); + } + } + + // can read all ephemeral values and all disk-backed values up to base_tip in random order + let mut all_keys_and_values: Vec<(String, String)> = block_data[0..=i] + .iter() + .map(|keys_and_values| keys_and_values.clone()) + .flatten() + .collect(); + + all_keys_and_values.append(&mut keys_and_values.clone()); + all_keys_and_values.shuffle(&mut thread_rng()); + for (key, expected_value) in all_keys_and_values.iter() { + let value = marf_ephemeral.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "ephemeral: at block #{} {} --> {} (random): {} == {}", + i, block_id, &ephemeral_tip, key, &value + ); + } + + // "commit" the data + marf_ephemeral + .commit_to_processed_block(&final_block_id) + .unwrap(); + + // data is _not_ persisted + let mut marf_ephemeral = marfed_kv + .begin_ephemeral(&block_id, &ephemeral_tip) + .unwrap(); + for (key, _) in keys_and_values.iter() { + assert!(marf_ephemeral.get_data(key).unwrap().is_none()); + debug!( + "ephemeral: at block #{}: {} --> {} post-commit: {} not mapped after commit", + i, block_id, &ephemeral_tip, key + ); + } + + // can still read back all disk-backed data represented up to the base_tip + for j in 0..=i { + // all values up to those inserted in the block with this ID are present + let keys_and_values = &block_data[j]; + for (key, expected_value) in keys_and_values.iter() { + let value = marf_ephemeral.get_data(key).unwrap().unwrap(); + assert_eq!(expected_value, &value); + debug!( + "ephemeral: at block #{} {} --> {} post-commit: {} == {}", + i, block_id, &ephemeral_tip, key, &value + ); + } + } + + // cannot still read data beyond the base tip + for j in i + 1..blocks.len() { + // all values afterwards are not present + let keys_and_values = &block_data[j]; + for (key, _) in keys_and_values.iter() { + assert!(marf_ephemeral.get_data(key).unwrap().is_none()); + debug!( + "ephemeral: at block #{} {} --> {} post-commit: {} not mapped", + i, block_id, &ephemeral_tip, key + ); + } + } + } +} + +fn replay_block( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + original_block: NakamotoBlock, + observer: &TestEventObserver, +) { + test_debug!( + "Replay block {} (id {}) ephemerally: {:?}", + &original_block.header.block_hash(), + &original_block.header.block_id(), + &original_block + ); + // open sortition view to the current burn view. + // If the block has a TenureChange with an Extend cause, then the burn view is whatever is + // indicated in the TenureChange. + // Otherwise, it's the same as the block's parent's burn view. + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &original_block.header.parent_block_id, + ) + .expect("FATAL: failed to find parent stacks header") + .expect("FATAL: no parent found"); + + let burn_view_consensus_hash = + NakamotoChainState::get_block_burn_view(sortdb, &original_block, &parent_stacks_header) + .expect("FATAL: could not get burn block view"); + + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &burn_view_consensus_hash) + .expect("FATAL: could not load block snapshot") + .expect("FATAL: no such snapshot for burn view"); + + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip.sortition_id); + + let tenure_change = original_block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::TenureChange(..))); + let coinbase = original_block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::Coinbase(..))); + let tenure_cause = tenure_change.and_then(|tx| match &tx.payload { + TransactionPayload::TenureChange(tc) => Some(tc.cause), + _ => None, + }); + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &original_block.header.consensus_hash, + original_block.header.burn_spent, + tenure_change, + coinbase, + original_block.header.pox_treatment.len(), + None, + ) + .unwrap(); + + let mut miner_tenure_info = builder + .load_ephemeral_tenure_info(chainstate, &burn_dbconn, tenure_cause) + .unwrap(); + let burn_chain_height = miner_tenure_info.burn_tip_height; + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + + let mut receipts = vec![]; + + for (i, tx) in original_block.txs.iter().enumerate() { + let tx_len = tx.tx_len(); + + let tx_result = builder.try_mine_tx_with_len( + &mut tenure_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + None, + ); + let err = match &tx_result { + TransactionResult::Success(_) => Ok(()), + TransactionResult::Skipped(ref s) => Err(format!("tx {i} skipped: {}", &s.error)), + TransactionResult::ProcessingError(e) => { + Err(format!("Error processing tx {i}: {}", &e.error)) + } + TransactionResult::Problematic(ref p) => { + Err(format!("Problematic tx {i}: {}", &p.error)) + } + }; + if let Err(reason) = err { + error!( + "Failed to replay block"; + "reason" => %reason, + "tx" => ?tx, + ); + panic!(); + } + let mut receipt = tx_result.unwrap().1; + receipt.tx_index = i as u32; + receipts.push(receipt); + } + + let _block = builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); + + // NOTE: the block hash (state root hash) will be *different* from what was originally computed. + // This is okay, however, since this API is only meant for extracting block receipts. So, as + // long as all Clarity code in the ephemeral MARF behaves the same, then it's fine that the + // state root hash (which is not visible in Clarity) never matches. + + let observed_blocks = observer.get_blocks(); + let mut found = false; + for block in observed_blocks { + if block.metadata.index_block_hash() == original_block.header.block_id() { + assert_eq!(block.receipts, receipts); + found = true; + } + } + assert!(found); +} + +/// Verify that we can replay nakamoto blocks and get the same block hash (including state root +/// hash). +/// +/// Note that this does not fully test this behavior -- specifically, all of the blocks will +/// contain only STX transfers. There are no smart contact or contract-call blocks, and thus no +/// exercizing of Clarity DB functions. +#[test] +fn test_ephemeral_nakamoto_block_replay_simple() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full reward cycle + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + let mut peer = make_nakamoto_peer_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + ); + + // read out all Nakamoto blocks + let sortdb = peer.sortdb.take().unwrap(); + let mut stacks_node = peer.stacks_node.take().unwrap(); + let naka_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let tip_id = naka_tip.index_block_hash(); + + let sortitions = SortitionDB::get_all_snapshots(&sortdb).unwrap(); + let mut all_nakamoto_blocks = vec![]; + + for sort in sortitions { + let nakamoto_db = stacks_node.chainstate.nakamoto_blocks_db(); + let mut nakamoto_blocks = nakamoto_db + .get_all_blocks_in_tenure(&sort.consensus_hash, &tip_id) + .unwrap(); + all_nakamoto_blocks.append(&mut nakamoto_blocks); + } + + for naka_block in all_nakamoto_blocks { + replay_block(&sortdb, &mut stacks_node.chainstate, naka_block, &observer); + } +} + +/// Test block replay with contract-calls which exercise the clarity DB +#[test] +fn test_ephemeral_nakamoto_block_replay_smart_contract() { + let test_name = function_name!(); + let observer = TestEventObserver::new(); + + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let initial_balances = vec![(addr.to_account_principal(), 1_000_000)]; + + let code_body = r#" +(define-constant RECIPIENT 'ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM) +(define-map test-data-map uint uint) +(define-data-var test-var uint u0) +(define-fungible-token stackaroos) +(define-non-fungible-token stacka-nfts uint) +(define-data-var start-height uint stacks-block-height) + +(define-private (test-get-burn-block-info? (height uint)) + (begin + (print (get-burn-block-info? header-hash height)) + (print (get-burn-block-info? pox-addrs height)) + true)) + +(define-private (test-get-stacks-block-info? (height uint)) + (begin + (print (get-stacks-block-info? id-header-hash height)) + (print (get-stacks-block-info? header-hash height)) + (print (get-stacks-block-info? time height)) + true)) + +(define-private (test-get-tenure-info? (height uint)) + (begin + (print (get-tenure-info? burnchain-header-hash height)) + (print (get-tenure-info? miner-address height)) + (print (get-tenure-info? time height)) + (print (get-tenure-info? vrf-seed height)) + (print (get-tenure-info? block-reward height)) + (print (get-tenure-info? miner-spend-total height)) + (print (get-tenure-info? miner-spend-winner height)) + true)) + +(define-read-only (test-readonly-map (height uint)) + (begin + (print (map-get? test-data-map height)) + true)) + +(define-private (test-map (height uint)) + (begin + (test-readonly-map height) + (print (map-set test-data-map (+ u1 height) (+ u1 height))) + true)) + +(define-read-only (test-readonly-data-var (height uint)) + (begin + (print (var-get test-var)) + true)) + +(define-private (test-data-var (height uint)) + (begin + (test-readonly-data-var height) + (print (var-set test-var height)) + true)) + +(define-read-only (test-readonly-ft (height uint) (user principal)) + (begin + (print (ft-get-balance stackaroos user)) + (print (ft-get-supply stackaroos)) + true)) + +(define-private (test-ft (height uint)) + (begin + (test-readonly-ft height tx-sender) + (test-readonly-ft height RECIPIENT) + (print (match (ft-mint? stackaroos height tx-sender) success success failure false)) + (test-readonly-ft height tx-sender) + (test-readonly-ft height RECIPIENT) + (print (match (ft-transfer? stackaroos height tx-sender RECIPIENT) success success failure false)) + (test-readonly-ft height tx-sender) + (test-readonly-ft height RECIPIENT) + true)) + +(define-read-only (test-readonly-nft (height uint)) + (begin + (print (nft-get-owner? stacka-nfts height)) + true)) + +(define-private (test-nft (height uint)) + (begin + (print (match (nft-mint? stacka-nfts height tx-sender) success success failure false)) + (test-readonly-nft height) + (print (match (nft-transfer? stacka-nfts height tx-sender RECIPIENT) success success failure false)) + (test-readonly-nft height) + true)) + +(define-read-only (test-readonly-stx-account (user principal)) + (begin + (print (stx-get-balance user)) + (print (stx-account user)) + true)) + +(define-private (test-stx-account (height uint)) + (begin + (test-readonly-stx-account tx-sender) + (test-readonly-stx-account RECIPIENT) + (print (match (stx-transfer? height tx-sender RECIPIENT) success success failure false)) + (print (match (stx-transfer-memo? height tx-sender RECIPIENT 0x01) success success failure false)) + (test-readonly-stx-account tx-sender) + (test-readonly-stx-account RECIPIENT) + true)) + +(define-read-only (test-readonly-clarity-db-funcs) + (let ( + (prev-stacks-block-height (if (> stacks-block-height (var-get start-height)) (- stacks-block-height u1) stacks-block-height)) + (prev-burn-block-height (if (> burn-block-height u0) (- burn-block-height u1) burn-block-height)) + ) + (test-get-burn-block-info? burn-block-height) + (test-get-burn-block-info? prev-burn-block-height) + + (test-get-stacks-block-info? stacks-block-height) + (test-get-stacks-block-info? prev-stacks-block-height) + + (test-get-tenure-info? stacks-block-height) + (test-get-tenure-info? prev-stacks-block-height) + + (test-readonly-map stacks-block-height) + (test-readonly-map prev-stacks-block-height) + + (test-readonly-data-var stacks-block-height) + (test-readonly-data-var prev-stacks-block-height) + + (test-readonly-ft stacks-block-height tx-sender) + (test-readonly-ft stacks-block-height RECIPIENT) + (test-readonly-ft prev-stacks-block-height tx-sender) + (test-readonly-ft prev-stacks-block-height RECIPIENT) + + (test-readonly-nft stacks-block-height) + (test-readonly-nft prev-stacks-block-height) + + (test-readonly-stx-account tx-sender) + (test-readonly-stx-account RECIPIENT) + + true)) + +(define-private (test-clarity-db-funcs) + (let ( + (prev-stacks-block-height (if (> stacks-block-height (var-get start-height)) (- stacks-block-height u1) stacks-block-height)) + (prev-burn-block-height (if (> burn-block-height u0) (- burn-block-height u1) burn-block-height)) + ) + (test-map prev-stacks-block-height) + (test-map stacks-block-height) + + (test-data-var prev-stacks-block-height) + (test-data-var stacks-block-height) + + (test-ft stacks-block-height) + (test-nft stacks-block-height) + (test-stx-account stacks-block-height) + + true +)) + +(define-read-only (test-readonly-clarity-db-funcs-at-prev-block) + (let ( + (prev-block-opt (if (> stacks-block-height (var-get start-height)) (get-stacks-block-info? id-header-hash (- stacks-block-height u1)) none)) + ) + (match prev-block-opt + prev-block + (at-block prev-block (test-readonly-clarity-db-funcs)) + true))) + +(define-public (test-all) + (begin + (test-readonly-clarity-db-funcs) + (test-clarity-db-funcs) + (test-readonly-clarity-db-funcs-at-prev-block) + (ok true))) +"#; + + let contract_deploy = || { + let smart_contract_payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from("test-clarity-db").unwrap(), + code_body: StacksString::from_str(&code_body).expect("FATAL: invalid code body"), + }, + None, + ); + + let auth = TransactionAuth::from_p2pkh(&private_key).unwrap(); + let mut smart_contract = + StacksTransaction::new(TransactionVersion::Testnet, auth, smart_contract_payload); + + smart_contract.chain_id = 0x80000000; + smart_contract.anchor_mode = TransactionAnchorMode::OnChainOnly; + smart_contract.post_condition_mode = TransactionPostConditionMode::Allow; + smart_contract.set_tx_fee(code_body.len() as u64); + smart_contract.auth.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&smart_contract); + tx_signer.sign_origin(&private_key).unwrap(); + let smart_contract_signed = tx_signer.get_tx().unwrap(); + + smart_contract_signed + }; + + let mut sender_nonce = 1; + let mut next_contract_call = || { + let cc_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: addr.clone(), + contract_name: ContractName::try_from("test-clarity-db").unwrap(), + function_name: ClarityName::try_from("test-all").unwrap(), + function_args: vec![], + }); + + let auth = TransactionAuth::from_p2pkh(&private_key).unwrap(); + let mut cc = StacksTransaction::new(TransactionVersion::Testnet, auth, cc_payload); + + cc.chain_id = 0x80000000; + cc.anchor_mode = TransactionAnchorMode::OnChainOnly; + cc.post_condition_mode = TransactionPostConditionMode::Allow; + cc.set_tx_fee(1); + cc.auth.set_origin_nonce(sender_nonce); + sender_nonce += 1; + + let mut tx_signer = StacksTransactionSigner::new(&cc); + tx_signer.sign_origin(&private_key).unwrap(); + let cc_signed = tx_signer.get_tx().unwrap(); + + cc_signed + }; + + let mut boot_tenures = vec![]; + + // deploy + boot_tenures.push(NakamotoBootTenure::Sortition(vec![ + NakamotoBootStep::Block(vec![contract_deploy(), next_contract_call()]), + NakamotoBootStep::Block(vec![next_contract_call()]), + NakamotoBootStep::Block(vec![next_contract_call(), next_contract_call()]), + ])); + + for i in 1..10 { + if i % 2 == 1 { + boot_tenures.push(NakamotoBootTenure::NoSortition(vec![ + NakamotoBootStep::Block(vec![next_contract_call()]), + NakamotoBootStep::Block(vec![next_contract_call(), next_contract_call()]), + ])); + } else { + boot_tenures.push(NakamotoBootTenure::Sortition(vec![ + NakamotoBootStep::Block(vec![next_contract_call()]), + NakamotoBootStep::Block(vec![next_contract_call(), next_contract_call()]), + ])); + } + } + + // make malleablized blocks + let (test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + + let plan = NakamotoBootPlan::new(test_name) + .with_private_key(private_key) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers) + .with_pox_constants(10, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances); + + let (mut peer, _other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); + + // read out all Nakamoto blocks + let sortdb = peer.sortdb.take().unwrap(); + let mut stacks_node = peer.stacks_node.take().unwrap(); + let naka_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let tip_id = naka_tip.index_block_hash(); + + let sortitions = SortitionDB::get_all_snapshots(&sortdb).unwrap(); + let mut all_nakamoto_blocks = vec![]; + + for sort in sortitions { + let nakamoto_db = stacks_node.chainstate.nakamoto_blocks_db(); + let mut nakamoto_blocks = nakamoto_db + .get_all_blocks_in_tenure(&sort.consensus_hash, &tip_id) + .unwrap(); + all_nakamoto_blocks.append(&mut nakamoto_blocks); + } + + all_nakamoto_blocks + .sort_by(|blk1, blk2| blk1.header.chain_length.cmp(&blk2.header.chain_length)); + + for naka_block in all_nakamoto_blocks { + replay_block(&sortdb, &mut stacks_node.chainstate, naka_block, &observer); + } +} + +#[test] +fn prop_ephemeral_tip_height_matches_current() { + proptest!(|(n in 1usize..=12)| { + let path = format!("/tmp/{}.marf", function_name!()); + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); + } + + let mut marfed_kv = MarfedKV::open( + &path, + None, + Some(MARFOpenOpts::new( + TrieHashCalculationMode::Deferred, + "noop", + false, + )), + ) + .unwrap(); + + let target_block_id = StacksBlockId([0xf0; 32]); + let mut tip = StacksBlockId::sentinel(); + for blk in 0..n { + let final_block_id = StacksBlockId([(blk as u8) + 1; 32]); + let mut marf = marfed_kv.begin(&tip, &target_block_id); + let keys_and_values = vec![( + format!("key-{}", blk), + format!("value-{}", blk) + )]; + marf.put_all_data(keys_and_values).unwrap(); + marf.commit_to_processed_block(&final_block_id).unwrap(); + tip = final_block_id; + } + + let ephemeral_tip = StacksBlockId([0xee; 32]); + let mut marf_ephemeral = + marfed_kv.begin_ephemeral(&tip, &ephemeral_tip).unwrap(); + + // Invariant: ephemeral tip height equals current height. + let height = marf_ephemeral.get_current_block_height(); + let open_height = marf_ephemeral.get_open_chain_tip_height(); + prop_assert_eq!(height, open_height); + }); +} + +// Test TODO: +// * stacks 2.x test diff --git a/stackslib/src/clarity_vm/tests/events.rs b/stackslib/src/clarity_vm/tests/events.rs index 0dfc16b5aa..56f2e70106 100644 --- a/stackslib/src/clarity_vm/tests/events.rs +++ b/stackslib/src/clarity_vm/tests/events.rs @@ -28,7 +28,7 @@ use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::chainstate::stacks::StacksBlockHeader; use crate::clarity_vm::clarity::ClarityInstance; -use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::marf::{ClarityMarfStore, MarfedKV}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; fn helper_execute(contract: &str, method: &str) -> (Value, Vec) { diff --git a/stackslib/src/clarity_vm/tests/forking.rs b/stackslib/src/clarity_vm/tests/forking.rs index 6dc3305562..0c6da5c6b9 100644 --- a/stackslib/src/clarity_vm/tests/forking.rs +++ b/stackslib/src/clarity_vm/tests/forking.rs @@ -29,7 +29,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::index::ClarityMarfTrieId; -use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::marf::{ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV}; const p1_str: &str = "'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"; diff --git a/stackslib/src/clarity_vm/tests/mod.rs b/stackslib/src/clarity_vm/tests/mod.rs index 5855d61f31..e427922457 100644 --- a/stackslib/src/clarity_vm/tests/mod.rs +++ b/stackslib/src/clarity_vm/tests/mod.rs @@ -18,6 +18,7 @@ pub mod analysis_costs; pub mod ast; pub mod contracts; pub mod costs; +pub mod ephemeral; pub mod epoch_switch; pub mod events; pub mod forking; diff --git a/stackslib/src/clarity_vm/tests/simple_tests.rs b/stackslib/src/clarity_vm/tests/simple_tests.rs index 0fb38cdf9e..5a7c03c380 100644 --- a/stackslib/src/clarity_vm/tests/simple_tests.rs +++ b/stackslib/src/clarity_vm/tests/simple_tests.rs @@ -7,7 +7,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::index::ClarityMarfTrieId; -use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::marf::{ClarityMarfStore, ClarityMarfStoreTransaction, MarfedKV}; pub fn with_marfed_environment(f: F, top_level: bool) where diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 20437817f2..1b99a9f536 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -966,7 +966,11 @@ impl NakamotoBootPlan { // transactions processed in the same order assert_eq!(receipt.transaction.txid(), tx.txid()); // no CheckErrors - assert!(receipt.vm_error.is_none()); + assert!( + receipt.vm_error.is_none(), + "Receipt had a CheckErrors: {:?}", + &receipt + ); // transaction was not aborted post-hoc assert!(!receipt.post_condition_aborted); }