diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index ba73bc7f83..940910df66 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -3135,7 +3135,7 @@ fn block_proposal_api_endpoint() { .unwrap(); let burn_chain_height = miner_tenure_info.burn_tip_height; let mut tenure_tx = builder - .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .tenure_begin(&burn_dbconn, &mut miner_tenure_info, false) .unwrap(); let tx = make_stacks_transfer_serialized( diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index e679a61c98..979e67c1b8 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -384,6 +384,7 @@ impl NakamotoBlockBuilder { &mut self, burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, + simulated: bool, ) -> Result, Error> { let Some(block_commit) = info.tenure_block_commit_opt.as_ref() else { return Err(Error::InvalidStacksBlock( @@ -412,6 +413,7 @@ impl NakamotoBlockBuilder { &self.header.pox_treatment, block_commit, &info.active_reward_set, + simulated, )?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; Ok(clarity_tx) @@ -541,7 +543,7 @@ impl NakamotoBlockBuilder { let mut miner_tenure_info = builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_info.cause())?; let burn_chain_height = miner_tenure_info.burn_tip_height; - let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info, false)?; let tenure_budget = tenure_tx .block_limit() diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0fd647e646..b890d7131f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -65,6 +65,7 @@ use super::stacks::{ Error as ChainstateError, StacksBlock, StacksTransaction, TenureChangePayload, TokenTransferMemo, TransactionPayload, TransactionVersion, }; +use crate::burnchains::db::BurnchainHeaderReader; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::operations::LeaderBlockCommitOp; @@ -3109,6 +3110,149 @@ impl NakamotoChainState { Ok(Vec::from_iter(epoch2_x)) } + pub fn get_block_headers_in_tenure_at_burnview( + db: &Connection, + tenure_id: &ConsensusHash, + burn_view: &ConsensusHash, + ) -> Result, ChainstateError> { + // see if we have a nakamoto block in this tenure + let qry = " + SELECT * + FROM nakamoto_block_headers + WHERE consensus_hash = ?1 + AND burn_view = ?2 + ORDER BY block_height + "; + let args = params![tenure_id, burn_view]; + let out = query_rows(db, qry, args)?; + if !out.is_empty() { + return Ok(out); + } + Err(ChainstateError::NoSuchBlockError) + } + + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + /// + /// Get the highest block in a given tenure (identified by its consensus hash) with a canonical + /// burn_view (i.e., burn_view on the canonical sortition fork) + pub fn find_highest_known_block_header_in_tenure_by_height( + chainstate: &StacksChainState, + sort_db: &SortitionDB, + tenure_height: u64, + ) -> Result, ChainstateError> { + let chainstate_db_conn = chainstate.db(); + + let candidates = Self::get_highest_known_block_header_in_tenure_by_height_at_each_burnview( + chainstate_db_conn, + tenure_height, + )?; + + let canonical_sortition_handle = sort_db.index_handle_at_tip(); + for candidate in candidates.into_iter() { + let Some(ref candidate_ch) = candidate.burn_view else { + // this is an epoch 2.x header, no burn view to check + return Ok(Some(candidate)); + }; + let in_canonical_fork = canonical_sortition_handle.processed_block(&candidate_ch)?; + if in_canonical_fork { + return Ok(Some(candidate)); + } + } + + // did not find any blocks in the tenure + Ok(None) + } + + pub fn find_highest_known_block_header_in_tenure_by_block_hash( + chainstate: &StacksChainState, + sort_db: &SortitionDB, + tenure_block_hash: &BurnchainHeaderHash, + ) -> Result, ChainstateError> { + let chainstate_db_conn = chainstate.db(); + + let candidates = Self::get_highest_known_block_header_in_tenure_by_block_hash_at_each_burnview( + chainstate_db_conn, + tenure_block_hash, + )?; + + let canonical_sortition_handle = sort_db.index_handle_at_tip(); + for candidate in candidates.into_iter() { + let Some(ref candidate_ch) = candidate.burn_view else { + // this is an epoch 2.x header, no burn view to check + return Ok(Some(candidate)); + }; + let in_canonical_fork = canonical_sortition_handle.processed_block(&candidate_ch)?; + if in_canonical_fork { + return Ok(Some(candidate)); + } + } + + // did not find any blocks in the tenure + Ok(None) + } + + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + /// + /// Get the highest blocks in a given tenure (identified by its consensus hash) at each burn view + /// active in that tenure. If there are ties at a given burn view, they will both be returned + fn get_highest_known_block_header_in_tenure_by_height_at_each_burnview( + db: &Connection, + tenure_height: u64, + ) -> Result, ChainstateError> { + // see if we have a nakamoto block in this tenure + let qry = " + SELECT h.* + FROM nakamoto_block_headers h + JOIN ( + SELECT burn_view, MAX(block_height) AS max_height + FROM nakamoto_block_headers + WHERE burn_header_height = ?1 + GROUP BY burn_view + ) maxed + ON h.burn_view = maxed.burn_view + AND h.block_height = maxed.max_height + WHERE h.burn_header_height = ?1 + ORDER BY h.block_height DESC, h.timestamp + "; + let args = params![tenure_height]; + let out = query_rows(db, qry, args)?; + if !out.is_empty() { + return Ok(out); + } + + Err(ChainstateError::NoSuchBlockError) + } + + fn get_highest_known_block_header_in_tenure_by_block_hash_at_each_burnview( + db: &Connection, + tenure_block_hash: &BurnchainHeaderHash, + ) -> Result, ChainstateError> { + // see if we have a nakamoto block in this tenure + let qry = " + SELECT h.* + FROM nakamoto_block_headers h + JOIN ( + SELECT burn_view, MAX(block_height) AS max_height + FROM nakamoto_block_headers + WHERE burn_header_hash = ?1 + GROUP BY burn_view + ) maxed + ON h.burn_view = maxed.burn_view + AND h.block_height = maxed.max_height + WHERE h.burn_header_hash = ?1 + ORDER BY h.block_height DESC, h.timestamp + "; + let args = params![tenure_block_hash]; + let out = query_rows(db, qry, args)?; + if !out.is_empty() { + return Ok(out); + } + + Err(ChainstateError::NoSuchBlockError) + } + /// Get the VRF proof for a Stacks block. /// For Nakamoto blocks, this is the VRF proof contained in the coinbase of the tenure-start /// block of the given tenure identified by the consensus hash. @@ -3978,6 +4122,7 @@ impl NakamotoChainState { block_bitvec: &BitVec<4000>, tenure_block_commit: &LeaderBlockCommitOp, active_reward_set: &RewardSet, + simulated: bool, ) -> Result, ChainstateError> { // this block's bitvec header must match the miner's block commit punishments Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; @@ -3995,6 +4140,7 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + simulated, ) } @@ -4093,6 +4239,7 @@ impl NakamotoChainState { block_bitvec, &tenure_block_commit, active_reward_set, + false, ) } @@ -4139,6 +4286,7 @@ impl NakamotoChainState { new_tenure: bool, coinbase_height: u64, tenure_extend: bool, + simulated: bool, ) -> Result, ChainstateError> { let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn @@ -4196,6 +4344,7 @@ impl NakamotoChainState { &parent_header_hash, &MINER_BLOCK_CONSENSUS_HASH, &MINER_BLOCK_HEADER_HASH, + simulated, ); // now that we have access to the ClarityVM, we can account for reward deductions from diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 59bcd2854a..aa2863439c 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -414,6 +414,7 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + false, ) } } diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 16cacd3530..ee41fee6ba 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1007,7 +1007,7 @@ impl TestStacksNode { let mut miner_tenure_info = builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_cause)?; let burn_chain_height = miner_tenure_info.burn_tip_height; - let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info, false)?; for tx in txs.into_iter() { let tx_len = tx.tx_len(); match builder.try_mine_tx_with_len( diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 7fe642ee71..9997d1e68f 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5128,6 +5128,7 @@ impl StacksChainState { &parent_header_hash, &MINER_BLOCK_CONSENSUS_HASH, &MINER_BLOCK_HEADER_HASH, + false, ); clarity_tx.reset_cost(parent_block_cost.clone()); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 51b38fd4b8..93c0a24d30 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -304,7 +304,7 @@ impl StacksBlockHeaderTypes { StacksBlockHeaderTypes::Nakamoto(x) => x.block_hash(), } } - + pub fn is_first_mined(&self) -> bool { match self { StacksBlockHeaderTypes::Epoch2(x) => x.is_first_mined(), @@ -2023,6 +2023,7 @@ impl StacksChainState { parent_block: &BlockHeaderHash, new_consensus_hash: &ConsensusHash, new_block: &BlockHeaderHash, + simulated: bool, ) -> ClarityTx<'a, 'b> { let conf = chainstate_tx.config.clone(); StacksChainState::inner_clarity_tx_begin( @@ -2034,6 +2035,7 @@ impl StacksChainState { parent_block, new_consensus_hash, new_block, + simulated, ) } @@ -2057,6 +2059,7 @@ impl StacksChainState { parent_block, new_consensus_hash, new_block, + false, ) } @@ -2122,6 +2125,45 @@ impl StacksChainState { self.clarity_state.with_marf(f) } + pub fn with_simulated_clarity_tx( + &mut self, + burn_dbconn: &dyn BurnStateDB, + parent_block_id: &StacksBlockId, + new_block_id: &StacksBlockId, + to_do: F, + ) -> Option + where + F: FnOnce(&mut ClarityTx) -> R, + { + match NakamotoChainState::get_block_header(self.db(), parent_block_id) { + Ok(Some(_)) => {} + Ok(None) => { + return None; + } + Err(e) => { + warn!("Failed to query for {}: {:?}", parent_block_id, &e); + return None; + } + } + + let dbconfig = self.config(); + + let conn = self.clarity_state.begin_simulated_block( + parent_block_id, + new_block_id, + &self.state_index, + burn_dbconn, + ); + + let mut clarity_tx = ClarityTx { + block: conn, + config: dbconfig, + }; + let result = to_do(&mut clarity_tx); + clarity_tx.rollback_block(); + Some(result) + } + /// Run to_do on the state of the Clarity VM at the given chain tip. /// Returns Some(x: R) if the given parent_tip exists. /// Returns None if not @@ -2287,6 +2329,7 @@ impl StacksChainState { parent_block: &BlockHeaderHash, new_consensus_hash: &ConsensusHash, new_block: &BlockHeaderHash, + simulated: bool, ) -> ClarityTx<'a, 'b> { // mix consensus hash and stacks block header hash together, since the stacks block hash // it not guaranteed to be globally unique (but the pair is) @@ -2314,12 +2357,21 @@ impl StacksChainState { parent_block ); - let inner_clarity_tx = clarity_instance.begin_block( - &parent_index_block, - &new_index_block, - headers_db, - burn_dbconn, - ); + let inner_clarity_tx = if simulated { + clarity_instance.begin_simulated_block( + &parent_index_block, + &new_index_block, + headers_db, + burn_dbconn, + ) + } else { + clarity_instance.begin_block( + &parent_index_block, + &new_index_block, + headers_db, + burn_dbconn, + ) + }; test_debug!("Got clarity TX!"); ClarityTx { diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 54a183abab..f31af60b6e 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -396,6 +396,26 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { self.inner_setup_extension(chain_tip, next_chain_tip, block_height, true) } + pub fn begin_simulated(&mut self, chain_tip: &T, next_chain_tip: &T) -> Result<(), Error> { + if self.storage.readonly() { + return Err(Error::ReadOnlyError); + } + if self.open_chain_tip.is_some() { + return Err(Error::InProgressError); + } + + self.storage.hide_block(next_chain_tip)?; + + if self.storage.has_block(next_chain_tip)? { + error!("Block data already exists: {}", next_chain_tip); + return Err(Error::ExistsError); + } + + let block_height = self.inner_get_extension_height(chain_tip, next_chain_tip)?; + MARF::extend_trie(&mut self.storage, next_chain_tip)?; + self.inner_setup_extension(chain_tip, next_chain_tip, block_height, true) + } + /// Set up the trie extension we're making. /// Sets storage pointer to chain_tip. /// Returns the height next_chain_tip would be at. @@ -1384,6 +1404,14 @@ impl MARF { }) } + pub fn begin_simulated_tx(&mut self) -> Result, Error> { + let storage = self.storage.transaction()?; + Ok(MarfTransaction { + storage, + open_chain_tip: &mut self.open_chain_tip, + }) + } + /// Target the MARF's storage at a given block. pub fn open_block(&mut self, block_hash: &T) -> Result<(), Error> { self.storage.connection().open_block(block_hash) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 91f3342482..ec5981bced 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -2169,6 +2169,13 @@ impl TrieStorageConnection<'_, T> { } } + pub fn hide_block(&self, bhh: &T) -> Result { + match trie_sql::hide_block(&self.db, bhh) { + Ok(_) => Ok(true), + Err(e) => Err(e), + } + } + /// Is the given block in the marf_data DB table, and is it unconfirmed? pub fn has_unconfirmed_block(&self, bhh: &T) -> Result { match trie_sql::get_unconfirmed_block_identifier(&self.db, bhh) { diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 009f2b6888..5b470d5445 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -215,7 +215,16 @@ pub fn get_block_hash(conn: &Connection, local_id: u32) -> Result }) } -/// Write a serialized trie to sqlite +/// temporary delete references to the currently simulated block +pub fn hide_block(conn: &Connection, block_hash: &T) -> Result<(), Error> { + let args = params![block_hash]; + let mut s = conn.prepare("DELETE FROM marf_data WHERE block_hash = ?")?; + s.execute(args)?; + let mut s = conn.prepare("DELETE FROM metadata_table WHERE blockhash = ?")?; + s.execute(args)?; + Ok(()) +} + pub fn write_trie_blob( conn: &Connection, block_hash: &T, diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 6ef509bdc3..31d1276a77 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -338,6 +338,41 @@ impl ClarityInstance { } } + pub fn begin_simulated_block<'a, 'b>( + &'a mut self, + current: &StacksBlockId, + next: &StacksBlockId, + header_db: &'b dyn HeadersDB, + burn_state_db: &'b dyn BurnStateDB, + ) -> ClarityBlockConnection<'a, 'b> { + let mut datastore = self.datastore.begin_simulated(current, next); + + let epoch = Self::get_epoch_of(current, header_db, burn_state_db); + let cost_track = { + let mut clarity_db = datastore.as_clarity_db(&NULL_HEADER_DB, &NULL_BURN_STATE_DB); + Some( + LimitedCostTracker::new( + self.mainnet, + self.chain_id, + epoch.block_limit.clone(), + &mut clarity_db, + epoch.epoch_id, + ) + .expect("FAIL: problem instantiating cost tracking"), + ) + }; + + ClarityBlockConnection { + datastore, + header_db, + burn_state_db, + cost_track, + mainnet: self.mainnet, + chain_id: self.chain_id, + epoch: epoch.epoch_id, + } + } + pub fn begin_genesis_block<'a, 'b>( &'a mut self, current: &StacksBlockId, diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 80a8534af8..1eb2370465 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -219,6 +219,30 @@ impl MarfedKV { } } + pub fn begin_simulated<'a>( + &'a mut self, + current: &StacksBlockId, + next: &StacksBlockId, + ) -> WritableMarfStore<'a> { + let mut tx = self.marf.begin_tx().unwrap_or_else(|_| { + panic!( + "ERROR: Failed to begin new MARF block {} - {})", + current, next + ) + }); + tx.begin_simulated(current, next).unwrap_or_else(|e| { + panic!( + "ERROR: Failed to begin new MARF block {} - {} {}", + current, next, e + ) + }); + + WritableMarfStore { + chain_tip: next.clone(), + marf: tx, + } + } + pub fn begin_unconfirmed<'a>(&'a mut self, current: &StacksBlockId) -> WritableMarfStore<'a> { let mut tx = self.marf.begin_tx().unwrap_or_else(|_| { panic!( diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index d8443772bf..f0f56dd6f7 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -110,6 +110,7 @@ pub fn make_block( &parent.1, &block_consensus, &block_hash, + false, ); let new_tip_info = StacksHeaderInfo { diff --git a/stackslib/src/net/api/blocksimulate.rs b/stackslib/src/net/api/blocksimulate.rs new file mode 100644 index 0000000000..b34667df6b --- /dev/null +++ b/stackslib/src/net/api/blocksimulate.rs @@ -0,0 +1,340 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::Value; +use regex::{Captures, Regex}; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId, TrieHash}; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::burnchains::Txid; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use crate::chainstate::stacks::{Error as ChainError, StacksTransaction, TransactionPayload}; +use crate::net::http::{ + parse_bytes, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttpResponse}; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCNakamotoBlockSimulateRequestHandler { + pub block_id: Option, +} + +impl RPCNakamotoBlockSimulateRequestHandler { + pub fn new() -> Self { + Self { block_id: None } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCSimulatedBlockTransaction { + pub txid: Txid, + pub tx_index: u32, + pub data: Option, + pub hex: String, + pub result: Value, + pub stx_burned: u128, + pub execution_cost: ExecutionCost, + pub events: Vec, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCSimulatedBlock { + pub block_id: StacksBlockId, + pub block_hash: BlockHeaderHash, + pub parent_block_id: StacksBlockId, + pub consensus_hash: ConsensusHash, + pub fees: u128, + pub tx_merkle_root: Sha512Trunc256Sum, + pub state_index_root: TrieHash, + pub timestamp: u64, + pub miner_signature: MessageSignature, + pub signer_signature: Vec, + pub transactions: Vec, + pub valid: bool, +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoBlockSimulateRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/blocks/simulate/(?P[0-9a-f]{64})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/blocks/simulate/:block_id" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_id_str = captures + .name("block_id") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to block ID group".to_string()) + })? + .as_str(); + + let block_id = StacksBlockId::from_hex(block_id_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + self.block_id = Some(block_id); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoBlockSimulateRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_id = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_id = self + .block_id + .take() + .ok_or(NetError::SendError("Missing `block_id`".into()))?; + + let simulated_block_res = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? + else { + return Err(ChainError::NoSuchBlockError); + }; + + let staging_db_path = chainstate.get_nakamoto_staging_blocks_path()?; + let db_conn = + StacksChainState::open_nakamoto_staging_blocks(&staging_db_path, false)?; + let rowid = db_conn + .conn() + .get_nakamoto_block_rowid(&block_id)? + .ok_or(ChainError::NoSuchBlockError)?; + + let mut blob_fd = db_conn + .open_nakamoto_block(rowid, false) + .map_err(|e| { + let msg = format!("Failed to open Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let block = NakamotoBlock::consensus_deserialize(&mut blob_fd) + .map_err(|e| { + let msg = format!("Failed to read Nakamoto block {}: {:?}", &block_id, &e); + warn!("{}", &msg); + msg + }) + .unwrap(); + + let burn_dbconn = match sortdb.index_handle_at_block(chainstate, &parent_block_id) { + Ok(burn_dbconn) => burn_dbconn, + Err(_) => return Err(ChainError::NoSuchBlockError), + }; + + let tenure_change = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::TenureChange(..))); + let coinbase = block + .txs + .iter() + .find(|tx| matches!(tx.payload, TransactionPayload::Coinbase(..))); + let tenure_cause = tenure_change.and_then(|tx| match &tx.payload { + TransactionPayload::TenureChange(tc) => Some(tc.cause), + _ => None, + }); + + // let (block_fees, txs_receipts) = chainstate + // .with_simulated_clarity_tx(&burn_dbconn, &parent_block_id, &block_id, |_| { + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id) + .unwrap() + .unwrap(); + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &block.header.consensus_hash, + block.header.burn_spent, + tenure_change, + coinbase, + block.header.pox_treatment.len(), + None, + ) + .unwrap(); + + let mut miner_tenure_info = builder + .load_tenure_info(chainstate, &burn_dbconn, tenure_cause) + .unwrap(); + let burn_chain_height = miner_tenure_info.burn_tip_height; + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info, true) + .unwrap(); + + let mut block_fees: u128 = 0; + let mut txs_receipts = vec![]; + + for (i, tx) in block.txs.iter().enumerate() { + let tx_len = tx.tx_len(); + + let tx_result = builder.try_mine_tx_with_len( + &mut tenure_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + None, + ); + let err = match tx_result { + TransactionResult::Success(tx_result) => { + txs_receipts.push(tx_result.receipt); + Ok(()) + } + _ => Err(format!("Problematic tx {i}")), + }; + if let Err(reason) = err { + panic!("Rejected block tx"); + } + + block_fees += tx.get_tx_fee() as u128; + } + + let simulated_block = + builder.mine_nakamoto_block(&mut tenure_tx, burn_chain_height); + + tenure_tx.rollback_block(); + + let block_hash = block.header.block_hash(); + + let mut simulated_block = RPCSimulatedBlock { + block_id, + block_hash, + parent_block_id, + consensus_hash: tenure_id, + fees: block_fees, + tx_merkle_root: block.header.tx_merkle_root, + state_index_root: block.header.state_index_root, + timestamp: block.header.timestamp, + miner_signature: block.header.miner_signature, + signer_signature: block.header.signer_signature, + transactions: vec![], + valid: block.header.state_index_root == simulated_block.header.state_index_root + && block.header.tx_merkle_root == simulated_block.header.tx_merkle_root, + }; + for receipt in txs_receipts { + let events = receipt + .events + .iter() + .enumerate() + .map(|(event_index, event)| { + event + .json_serialize(event_index, &receipt.transaction.txid(), true) + .unwrap() + }) + .collect(); + let transaction_data = match &receipt.transaction { + TransactionOrigin::Stacks(stacks) => Some(stacks.clone()), + TransactionOrigin::Burn(_) => None, + }; + let txid = receipt.transaction.txid(); + let transaction = RPCSimulatedBlockTransaction { + txid, + tx_index: receipt.tx_index, + data: transaction_data, + hex: receipt.transaction.serialize_to_dbstring(), + result: receipt.result, + stx_burned: receipt.stx_burned, + execution_cost: receipt.execution_cost, + events, + }; + simulated_block.transactions.push(transaction); + } + + Ok(simulated_block) + }); + + // start loading up the block + let simulated_block = match simulated_block_res { + Ok(simulated_block) => simulated_block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block {}: {:?}\n", &block_id, &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&simulated_block)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoBlockSimulateRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} diff --git a/stackslib/src/net/api/gettenureblocks.rs b/stackslib/src/net/api/gettenureblocks.rs new file mode 100644 index 0000000000..e120b6cf67 --- /dev/null +++ b/stackslib/src/net/api/gettenureblocks.rs @@ -0,0 +1,240 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::types::chainstate::StacksBlockId; +use regex::{Captures, Regex}; +use serde_json; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{request, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCNakamotoTenureBlocksRequestHandler { + pub(crate) consensus_hash: Option, +} + +impl RPCNakamotoTenureBlocksRequestHandler { + pub fn new() -> Self { + Self { + consensus_hash: None, + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCTenureBlock { + pub block_id: StacksBlockId, + pub block_hash: BlockHeaderHash, + pub parent_block_id: String, + pub height: u64, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct RPCTenure { + pub consensus_hash: ConsensusHash, + pub burn_block_height: u64, + pub burn_block_hash: String, + pub stacks_blocks: Vec, +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoTenureBlocksRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/tenures/blocks/(?P[0-9a-f]{40})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/tenures/blocks/:consensus_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + let consensus_hash = request::get_consensus_hash(captures, "consensus_hash")?; + self.consensus_hash = Some(consensus_hash); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoTenureBlocksRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.consensus_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let consensus_hash = self + .consensus_hash + .take() + .ok_or(NetError::SendError("`consensus_hash` not set".into()))?; + + let tenure_blocks_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let header_info = + match NakamotoChainState::find_highest_known_block_header_in_tenure( + &chainstate, + sortdb, + &consensus_hash, + ) { + Ok(Some(header)) => header, + Ok(None) => { + let msg = format!("No blocks in tenure {}", &consensus_hash); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + let blocks = match NakamotoChainState::get_block_headers_in_tenure_at_burnview( + chainstate.db(), + &header_info.consensus_hash, + &header_info.burn_view.unwrap(), + ) { + Ok(blocks) => blocks, + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + Ok((blocks, header_info)) + }); + + let (tenure_blocks, header_info): (Vec, StacksHeaderInfo) = + match tenure_blocks_resp { + Ok((tenure_blocks, header_info)) => ( + tenure_blocks + .into_iter() + .map(|header| RPCTenureBlock { + block_id: header.index_block_hash(), + block_hash: header.anchored_header.block_hash(), + parent_block_id: match header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(nakamoto) => { + nakamoto.parent_block_id.to_hex() + } + StacksBlockHeaderTypes::Epoch2(epoch2) => { + epoch2.parent_block.to_hex() + } + }, + + height: header.stacks_block_height, + }) + .collect(), + header_info, + ), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let tenure = RPCTenure { + consensus_hash: header_info.consensus_hash, + burn_block_height: header_info.burn_header_height.into(), + burn_block_hash: header_info.burn_header_hash.to_hex(), + stacks_blocks: tenure_blocks, + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&tenure)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoTenureBlocksRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenure: RPCTenure = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenure)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_tenure_blocks( + host: PeerHost, + consensus_hash: &ConsensusHash, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/tenures/blocks/{}", consensus_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_tenure_blocks(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let tenure: RPCTenure = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(tenure) + } +} diff --git a/stackslib/src/net/api/gettenureblocksbyhash.rs b/stackslib/src/net/api/gettenureblocksbyhash.rs new file mode 100644 index 0000000000..8698c397aa --- /dev/null +++ b/stackslib/src/net/api/gettenureblocksbyhash.rs @@ -0,0 +1,216 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::types::chainstate::StacksBlockId; +use regex::{Captures, Regex}; +use serde_json; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; +use crate::net::api::gettenureblocks::{RPCTenure, RPCTenureBlock}; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{request, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCNakamotoTenureBlocksByHashRequestHandler { + pub(crate) burn_block_hash: Option, +} + +impl RPCNakamotoTenureBlocksByHashRequestHandler { + pub fn new() -> Self { + Self { + burn_block_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoTenureBlocksByHashRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/tenures/blocks/hash/(?P[0-9a-f]{64})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/tenures/blocks/hash/:burn_block_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + let burn_block_hash = request::get_burn_block_hash(captures, "burn_block_hash")?; + self.burn_block_hash = Some(burn_block_hash); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoTenureBlocksByHashRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.burn_block_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let burn_block_hash = self + .burn_block_hash + .take() + .ok_or(NetError::SendError("`burn_block_hash` not set".into()))?; + + let tenure_blocks_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let header_info = + match NakamotoChainState::find_highest_known_block_header_in_tenure_by_block_hash( + &chainstate, + sortdb, + &burn_block_hash, + ) { + Ok(Some(header)) => header, + Ok(None) => { + let msg = + format!("No blocks at burn block hash {}", burn_block_hash); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by burn block hash '{}': {:?}", + burn_block_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + let blocks = match NakamotoChainState::get_block_headers_in_tenure_at_burnview( + chainstate.db(), + &header_info.consensus_hash, + &header_info.burn_view.unwrap(), + ) { + Ok(blocks) => blocks, + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + header_info.consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + Ok((blocks, header_info)) + }); + + let (tenure_blocks, header_info): (Vec, StacksHeaderInfo) = + match tenure_blocks_resp { + Ok((tenure_blocks, header_info)) => ( + tenure_blocks + .into_iter() + .map(|header| RPCTenureBlock { + block_id: header.index_block_hash(), + block_hash: header.anchored_header.block_hash(), + parent_block_id: match header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(nakamoto) => { + nakamoto.parent_block_id.to_hex() + } + StacksBlockHeaderTypes::Epoch2(epoch2) => { + epoch2.parent_block.to_hex() + } + }, + + height: header.stacks_block_height, + }) + .collect(), + header_info, + ), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let tenure = RPCTenure { + consensus_hash: header_info.consensus_hash, + burn_block_height: header_info.burn_header_height.into(), + burn_block_hash: header_info.burn_header_hash.to_hex(), + stacks_blocks: tenure_blocks, + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&tenure)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoTenureBlocksByHashRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let blocks: RPCTenure = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(blocks)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_tenure_blocks_by_hash( + host: PeerHost, + burn_block_hash: BurnchainHeaderHash, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/tenures/blocks/hash/{}", burn_block_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/gettenureblocksbyheight.rs b/stackslib/src/net/api/gettenureblocksbyheight.rs new file mode 100644 index 0000000000..b909008365 --- /dev/null +++ b/stackslib/src/net/api/gettenureblocksbyheight.rs @@ -0,0 +1,226 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::types::chainstate::StacksBlockId; +use regex::{Captures, Regex}; +use serde_json; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; +use crate::net::api::gettenureblocks::{RPCTenure, RPCTenureBlock}; +use crate::net::http::{ + parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{request, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse}; +use crate::net::{Error as NetError, StacksNodeState}; + +#[derive(Clone)] +pub struct RPCNakamotoTenureBlocksByHeightRequestHandler { + pub(crate) burn_block_height: Option, +} + +impl RPCNakamotoTenureBlocksByHeightRequestHandler { + pub fn new() -> Self { + Self { + burn_block_height: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoTenureBlocksByHeightRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/tenures/blocks/height/(?P[0-9]{1,20})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/tenures/blocks/height/:burn_block_height" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + let burn_block_height_str = captures + .name("burn_block_height") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to burn block height group".to_string()) + })? + .as_str(); + + let burn_block_height = burn_block_height_str.parse::().map_err(|_| { + Error::DecodeError("Invalid path: unparseable buron block height".to_string()) + })?; + self.burn_block_height = Some(burn_block_height); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoTenureBlocksByHeightRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.burn_block_height = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let burn_block_height = self + .burn_block_height + .take() + .ok_or(NetError::SendError("`burn_block_height` not set".into()))?; + + let tenure_blocks_resp = + node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let header_info = + match NakamotoChainState::find_highest_known_block_header_in_tenure_by_height( + &chainstate, + sortdb, + burn_block_height, + ) { + Ok(Some(header)) => header, + Ok(None) => { + let msg = + format!("No blocks at burn block height {}", burn_block_height); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by burn block height '{}': {:?}", + burn_block_height, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + let blocks = match NakamotoChainState::get_block_headers_in_tenure_at_burnview( + chainstate.db(), + &header_info.consensus_hash, + &header_info.burn_view.unwrap(), + ) { + Ok(blocks) => blocks, + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + header_info.consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + + Ok((blocks, header_info)) + }); + + let (tenure_blocks, header_info): (Vec, StacksHeaderInfo) = + match tenure_blocks_resp { + Ok((tenure_blocks, header_info)) => ( + tenure_blocks + .into_iter() + .map(|header| RPCTenureBlock { + block_id: header.index_block_hash(), + block_hash: header.anchored_header.block_hash(), + parent_block_id: match header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(nakamoto) => { + nakamoto.parent_block_id.to_hex() + } + StacksBlockHeaderTypes::Epoch2(epoch2) => { + epoch2.parent_block.to_hex() + } + }, + + height: header.stacks_block_height, + }) + .collect(), + header_info, + ), + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let tenure = RPCTenure { + consensus_hash: header_info.consensus_hash, + burn_block_height: header_info.burn_header_height.into(), + burn_block_hash: header_info.burn_header_hash.to_hex(), + stacks_blocks: tenure_blocks, + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&tenure)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoTenureBlocksByHeightRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let blocks: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(blocks)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_tenure_blocks_by_height( + host: PeerHost, + burn_block_height: u64, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/tenures/blocks/height/{}", burn_block_height), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 2f2f052eb6..6c44bf75c1 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -17,6 +17,7 @@ use crate::net::http::Error; use crate::net::httpcore::StacksHttp; use crate::net::Error as NetError; +pub mod blocksimulate; pub mod callreadonly; pub mod fastcallreadonly; pub mod get_tenures_fork_info; @@ -49,6 +50,9 @@ pub mod getstackerdbmetadata; pub mod getstackers; pub mod getstxtransfercost; pub mod gettenure; +pub mod gettenureblocks; +pub mod gettenureblocksbyhash; +pub mod gettenureblocksbyheight; pub mod gettenureinfo; pub mod gettenuretip; pub mod gettransaction; @@ -119,6 +123,13 @@ impl StacksHttp { self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); self.register_rpc_endpoint(gettenuretip::RPCNakamotoTenureTipRequestHandler::new()); + self.register_rpc_endpoint(gettenureblocks::RPCNakamotoTenureBlocksRequestHandler::new()); + self.register_rpc_endpoint( + gettenureblocksbyhash::RPCNakamotoTenureBlocksByHashRequestHandler::new(), + ); + self.register_rpc_endpoint( + gettenureblocksbyheight::RPCNakamotoTenureBlocksByHeightRequestHandler::new(), + ); self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), @@ -141,6 +152,7 @@ impl StacksHttp { self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); + self.register_rpc_endpoint(blocksimulate::RPCNakamotoBlockSimulateRequestHandler::new()); } } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index d7b5abfcf3..992b780676 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -562,7 +562,7 @@ impl NakamotoBlockProposal { let mut miner_tenure_info = builder.load_tenure_info(chainstate, &burn_dbconn, tenure_cause)?; let burn_chain_height = miner_tenure_info.burn_tip_height; - let mut tenure_tx = builder.tenure_begin(&burn_dbconn, &mut miner_tenure_info)?; + let mut tenure_tx = builder.tenure_begin(&burn_dbconn, &mut miner_tenure_info, false)?; for (i, tx) in self.block.txs.iter().enumerate() { let tx_len = tx.tx_len(); @@ -709,7 +709,7 @@ impl NakamotoBlockProposal { let mut replay_miner_tenure_info = replay_builder.load_tenure_info(&mut replay_chainstate, &burn_dbconn, tenure_cause)?; let mut replay_tenure_tx = - replay_builder.tenure_begin(&burn_dbconn, &mut replay_miner_tenure_info)?; + replay_builder.tenure_begin(&burn_dbconn, &mut replay_miner_tenure_info, false)?; for (i, tx) in self.block.txs.iter().enumerate() { let tx_len = tx.tx_len(); diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 81dc454a0b..323fe95e72 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -287,7 +287,7 @@ fn test_try_make_response() { .unwrap(); let burn_chain_height = miner_tenure_info.burn_tip_height; let mut tenure_tx = builder - .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .tenure_begin(&burn_dbconn, &mut miner_tenure_info, false) .unwrap(); builder.try_mine_tx_with_len( &mut tenure_tx, @@ -538,7 +538,7 @@ fn replay_validation_test( .unwrap(); let burn_chain_height = miner_tenure_info.burn_tip_height; let mut tenure_tx = builder - .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .tenure_begin(&burn_dbconn, &mut miner_tenure_info, false) .unwrap(); for tx in block_txs { builder.try_mine_tx_with_len( diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 28e1673e1a..37ea3ce028 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -28,7 +28,7 @@ use percent_encoding::percent_decode_str; use regex::{Captures, Regex}; use stacks_common::codec::{read_next, Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, + BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, }; use stacks_common::types::net::PeerHost; use stacks_common::types::Address; @@ -280,6 +280,23 @@ pub mod request { Ok(ch) } + pub fn get_burn_block_hash( + captures: &Captures, + key: &str, + ) -> Result { + let bbh = if let Some(bbh_str) = captures.name(key) { + match BurnchainHeaderHash::from_hex(bbh_str.as_str()) { + Ok(bbh) => bbh, + Err(_e) => { + return Err(HttpError::Http(400, format!("Failed to decode `{}`", key))); + } + } + } else { + return Err(HttpError::Http(404, format!("Missing `{}`", key))); + }; + Ok(bbh) + } + /// Get and parse a u32 from a path's captures, given the name of the regex field. pub fn get_u32(captures: &Captures, key: &str) -> Result { let u = if let Some(u32_str) = captures.name(key) {