diff --git a/stacks-node/src/nakamoto_node/miner.rs b/stacks-node/src/nakamoto_node/miner.rs index 95589728877..4083c3ce2d7 100644 --- a/stacks-node/src/nakamoto_node/miner.rs +++ b/stacks-node/src/nakamoto_node/miner.rs @@ -17,8 +17,8 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; #[cfg(test)] use std::sync::LazyLock; -use std::thread; use std::time::{Duration, Instant}; +use std::{cmp, thread}; use clarity::boot_util::boot_code_id; use clarity::vm::costs::ExecutionCost; @@ -1661,6 +1661,9 @@ impl BlockMinerThread { } } }; + + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + // Check if we can and should include a time-based tenure extend. if self.last_block_mined.is_some() { if self.config.miner.replay_transactions @@ -1673,12 +1676,35 @@ impl BlockMinerThread { info!("Tenure extend: In replay, always extending tenure"); self.tenure_extend_reset(); } else { - // Do not extend if we have spent < 50% of the budget, since it is + // Do not extend if we have spent < 50% of the budget and < 50% of the tenure size limit, since it is // not necessary. + + let mut tenure_size_usage = 0; + + if let Some(total_tenure_size) = + match NakamotoChainState::get_block_header_nakamoto_total_tenure_size( + chainstate.db(), + &parent_block_id, + ) { + Ok(total_tenure_size) => total_tenure_size, + Err(e) => match e { + ChainstateError::DBError(e) => { + return Err(NakamotoNodeError::DBError(e)) + } + _ => return Err(NakamotoNodeError::UnexpectedChainState), + }, + } + { + tenure_size_usage = + total_tenure_size / cmp::max(1, self.config.miner.max_tenure_bytes / 100); + } + let usage = self .tenure_budget .proportion_largest_dimension(&self.tenure_cost); - if usage < self.config.miner.tenure_extend_cost_threshold { + if usage < self.config.miner.tenure_extend_cost_threshold + && tenure_size_usage < self.config.miner.tenure_extend_cost_threshold + { return Ok(NakamotoTenureInfo { coinbase_tx: None, tenure_change_tx: None, @@ -1705,7 +1731,6 @@ impl BlockMinerThread { } } - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash.clone(), diff --git a/stacks-node/src/tests/nakamoto_integrations.rs b/stacks-node/src/tests/nakamoto_integrations.rs index 9fe72b88870..04c2ef61576 100644 --- a/stacks-node/src/tests/nakamoto_integrations.rs +++ b/stacks-node/src/tests/nakamoto_integrations.rs @@ -142,6 +142,7 @@ pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; use clarity::vm::database::STXBalance; use stacks::chainstate::stacks::boot::SIP_031_NAME; use stacks::clarity_vm::clarity::SIP_031_INITIAL_MINT; +use stacks::config::DEFAULT_MAX_TENURE_BYTES; use crate::clarity::vm::clarity::ClarityConnection; @@ -3253,6 +3254,7 @@ fn block_proposal_api_endpoint() { 1, None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), ) .expect("Failed to build Nakamoto block"); @@ -15324,3 +15326,588 @@ fn check_block_time_keyword() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// Tests that the tenure size limit is correctly accounted. +/// Deploys 10 (big) contracts (each 512K) +/// The block limit is 2MB, the tenure limit is 3MB +/// One block will contain 3 of the deployed contracts (the block size will be reached at it) +/// The following one will contain 2 of the deployed contract (tenure size limit will be reached) +fn smaller_tenure_size_for_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let mut senders: Vec<(Secp256k1PrivateKey, StacksAddress)> = vec![]; + + // number of deploys to submit in the test + let num_deploys = 10; + + for _ in 0..num_deploys { + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 10000000000000, + ); + + senders.push((sender_sk, sender_addr)); + } + + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + + naka_conf.miner.max_tenure_bytes = 3 * 1024 * 1024; // 3MB + naka_conf.miner.log_skipped_transactions = true; + + naka_conf.add_initial_balance( + PrincipalData::from(signer_addr.clone()).to_string(), + 10000000000000, + ); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); + + let mut btcd_controller = BitcoinCoreController::from_stx_config(&naka_conf); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk.clone()], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, &counters); + + let mut long_comment = String::from(";; "); + long_comment.extend(std::iter::repeat('x').take(524_288 - long_comment.len())); + let contract = format!( + r#" + {long_comment} + (define-public (test-fn) + (ok "Hello, world!") + ) + "# + ); + + let deploy_fee = 524504; + + test_observer::clear(); + + for deploy in 0..num_deploys { + info!("Submitting deploy {deploy}"); + let contract_name = format!("test-{deploy}"); + + let contract_tx = make_contract_publish( + &senders[deploy].0, + 0, + deploy_fee, + naka_conf.burnchain.chain_id, + &contract_name, + &contract, + ); + + submit_tx(&http_origin, &contract_tx); + } + + next_block_and(&mut btc_regtest_controller, 60, || { + let nakamoto_block_events = test_observer::get_mined_nakamoto_blocks(); + if !nakamoto_block_events.is_empty() { + let nakamoto_block_event = nakamoto_block_events.last().unwrap(); + let mut skipped_transactions = 0; + for tx_event in &nakamoto_block_event.tx_events { + match tx_event { + TransactionEvent::Skipped(reason) => { + if reason.error == "Too much data in tenure" { + skipped_transactions += 1; + } + } + _ => (), + } + } + // assume 2 blocks, the first one with 3 transactions the second with 2 + // that means we will have 5 skipped transactions at the end + if skipped_transactions == 5 { + return Ok(true); + } + } + Ok(false) + }) + .unwrap(); + + // wait for signers + wait_for(30, || Ok(test_observer::get_blocks().len() >= 3)) + .expect("Timed out waiting for signers"); + + let blocks = test_observer::get_blocks(); + + assert_eq!( + blocks.len(), + 3, + "Should have successfully mined three blocks, but got {}", + blocks.len() + ); + + let mut deployed_contracts = 0; + for deploy in 0..num_deploys { + if get_account(&http_origin, &senders[deploy].1).nonce == 1 { + deployed_contracts += 1; + } + } + + assert_eq!( + deployed_contracts, 5, + "Should have successfully deployed 5 contracts, but got {}", + deployed_contracts + ); + + // ensure no tenure extend + for block in &blocks { + let txs = test_observer::parse_transactions(block); + let has_tenure_extend = txs.iter().any(|tx| match &tx.payload { + TransactionPayload::TenureChange(tenure_change) => { + tenure_change.cause == TenureChangeCause::Extended + } + _ => false, + }); + + assert!(!has_tenure_extend, "Unexpected tenure extend transaction"); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// Tests that the tenure size limit is correctly accounted. +/// Deploys 10 (big) contracts (each 512K) +/// The block limit is 2MB, the tenure limit is 3MB +/// One block will contain 3 of the deployed contracts (the block size will be reached at it) +/// The following one will contain 2 of the deployed contract (tenure size limit will be reached) +/// Start a new tenure to process the remaining transactions. +fn smaller_tenure_size_for_miner_on_two_tenures() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let mut senders: Vec<(Secp256k1PrivateKey, StacksAddress)> = vec![]; + + // number of deploys to submit in the test + let num_deploys = 10; + + for _ in 0..num_deploys { + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 10000000000000, + ); + + senders.push((sender_sk, sender_addr)); + } + + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + + naka_conf.miner.max_tenure_bytes = 3 * 1024 * 1024; // 3MB + naka_conf.miner.log_skipped_transactions = true; + + naka_conf.add_initial_balance( + PrincipalData::from(signer_addr.clone()).to_string(), + 10000000000000, + ); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register( + &mut naka_conf, + &[ + EventKeyType::AnyEvent, + EventKeyType::MinedBlocks, + EventKeyType::MemPoolTransactions, + ], + ); + + let mut btcd_controller = BitcoinCoreController::from_stx_config(&naka_conf); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk.clone()], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, &counters); + + let mut long_comment = String::from(";; "); + long_comment.extend(std::iter::repeat('x').take(524_288 - long_comment.len())); + let contract = format!( + r#" + {long_comment} + (define-public (test-fn) + (ok "Hello, world!") + ) + "# + ); + + let deploy_fee = 524504; + + test_observer::clear(); + + for deploy in 0..num_deploys { + info!("Submitting deploy {deploy}"); + let contract_name = format!("test-{deploy}"); + + let contract_tx = make_contract_publish( + &senders[deploy].0, + 0, + deploy_fee, + naka_conf.burnchain.chain_id, + &contract_name, + &contract, + ); + + submit_tx(&http_origin, &contract_tx); + } + + next_block_and(&mut btc_regtest_controller, 60, || { + let nakamoto_block_events = test_observer::get_mined_nakamoto_blocks(); + if !nakamoto_block_events.is_empty() { + let nakamoto_block_event = nakamoto_block_events.last().unwrap(); + let mut skipped_transactions = 0; + for tx_event in &nakamoto_block_event.tx_events { + match tx_event { + TransactionEvent::Skipped(reason) => { + if reason.error == "Too much data in tenure" { + skipped_transactions += 1; + } + } + _ => (), + } + } + // assume 2 blocks, the first one with 3 transactions the second with 2 + // that means we will have 5 skipped transactions at the end + if skipped_transactions == 5 { + return Ok(true); + } + } + Ok(false) + }) + .unwrap(); + + // wait for signers + wait_for(30, || Ok(test_observer::get_blocks().len() >= 3)) + .expect("Timed out waiting for signers"); + + // start the second tenure and wait till no more transactions are skipped + next_block_and(&mut btc_regtest_controller, 60, || { + let nakamoto_block_events = test_observer::get_mined_nakamoto_blocks(); + if !nakamoto_block_events.is_empty() { + let nakamoto_block_event = nakamoto_block_events.last().unwrap(); + let mut skipped_transactions = 0; + for tx_event in &nakamoto_block_event.tx_events { + match tx_event { + TransactionEvent::Skipped(reason) => { + if reason.error == "Too much data in tenure" { + skipped_transactions += 1; + } + } + _ => (), + } + } + if skipped_transactions == 0 { + return Ok(true); + } + } + Ok(false) + }) + .unwrap(); + + // wait for signers + wait_for(30, || Ok(test_observer::get_blocks().len() >= 6)) + .expect("Timed out waiting for signers"); + + let blocks = test_observer::get_blocks(); + + assert_eq!( + blocks.len(), + 6, + "Should have successfully mined six blocks, but got {}", + blocks.len() + ); + + let mut deployed_contracts = 0; + for deploy in 0..num_deploys { + if get_account(&http_origin, &senders[deploy].1).nonce == 1 { + deployed_contracts += 1; + } + } + + assert_eq!( + deployed_contracts, 10, + "Should have successfully deployes 10 contracts, but got {}", + deployed_contracts + ); + + // ensure no tenure extend + for block in &blocks { + let txs = test_observer::parse_transactions(block); + let has_tenure_extend = txs.iter().any(|tx| match &tx.payload { + TransactionPayload::TenureChange(tenure_change) => { + tenure_change.cause == TenureChangeCause::Extended + } + _ => false, + }); + + assert!(!has_tenure_extend, "Unexpected tenure extend transaction"); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// Tests that the tenure size limit is correctly reset on tenure extend. +/// Deploys 10 (big) contracts (each 512K) +/// The block limit is 2MB, the tenure limit is 3MB +/// One block will contain 3 of the deployed contracts (the block size will be reached at it) +/// The following ones will contain the others as tenure extend is constantly triggered +fn smaller_tenure_size_for_miner_with_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let mut senders: Vec<(Secp256k1PrivateKey, StacksAddress)> = vec![]; + + // number of deploys to submit in the test + let num_deploys = 10; + + for _ in 0..num_deploys { + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 10000000000000, + ); + + senders.push((sender_sk, sender_addr)); + } + + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + + naka_conf.miner.max_tenure_bytes = 3 * 1024 * 1024; // 3MB + naka_conf.miner.log_skipped_transactions = true; + // quickly tenure extend + naka_conf.miner.tenure_timeout = Duration::from_secs(1); + naka_conf.miner.tenure_extend_cost_threshold = 2; + + naka_conf.add_initial_balance( + PrincipalData::from(signer_addr.clone()).to_string(), + 10000000000000, + ); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); + + let mut btcd_controller = BitcoinCoreController::from_stx_config(&naka_conf); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk.clone()], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, &counters); + + let mut long_comment = String::from(";; "); + long_comment.extend(std::iter::repeat('x').take(524_288 - long_comment.len())); + let contract = format!( + r#" + {long_comment} + (define-public (test-fn) + (ok "Hello, world!") + ) + "# + ); + + let deploy_fee = 524504; + + test_observer::clear(); + + for deploy in 0..num_deploys { + info!("Submitting deploy {deploy}"); + let contract_name = format!("test-{deploy}"); + + let contract_tx = make_contract_publish( + &senders[deploy].0, + 0, + deploy_fee, + naka_conf.burnchain.chain_id, + &contract_name, + &contract, + ); + + submit_tx(&http_origin, &contract_tx); + } + + next_block_and(&mut btc_regtest_controller, 60, || { + let mut deployed_contracts = 0; + for deploy in 0..num_deploys { + if get_account(&http_origin, &senders[deploy].1).nonce == 1 { + deployed_contracts += 1; + } + } + Ok(deployed_contracts == 10) + }) + .unwrap(); + + let blocks = test_observer::get_blocks(); + + assert_eq!( + blocks.len(), + 5, + "Should have successfully mined five blocks, but got {}", + blocks.len() + ); + + // ensure tenure extend is present in the last 3 blocks + for (block_index, block) in blocks.iter().enumerate() { + let txs = test_observer::parse_transactions(block); + let has_tenure_extend = txs.iter().any(|tx| match &tx.payload { + TransactionPayload::TenureChange(tenure_change) => { + tenure_change.cause == TenureChangeCause::Extended + } + _ => false, + }); + + if block_index > 1 { + assert!(has_tenure_extend, "Expected tenure extend transaction"); + } else { + assert!(!has_tenure_extend, "Unexpected tenure extend transaction"); + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/stacks-node/src/tests/signer/v0.rs b/stacks-node/src/tests/signer/v0.rs index c2e6e7a4cb3..6c359d6c739 100644 --- a/stacks-node/src/tests/signer/v0.rs +++ b/stacks-node/src/tests/signer/v0.rs @@ -2842,6 +2842,7 @@ fn forked_tenure_testing( burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, burn_view: Some(tip_b_block.header.consensus_hash), + total_tenure_size: 0, }; let blocks = test_observer::get_mined_nakamoto_blocks(); @@ -17587,6 +17588,7 @@ fn reorging_signers_capitulate_to_nonreorging_signers_during_tenure_fork() { burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tenure_b_block.serialize_to_vec().len() as u64, burn_view: Some(tenure_b_block.header.consensus_hash), + total_tenure_size: 0, }; // Block B was built atop block A diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 7a32a9dd782..87929b2c495 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -37,7 +37,7 @@ use crate::chainstate::stacks::miner::{ }; use crate::chainstate::stacks::{Error, StacksBlockHeader, *}; use crate::clarity_vm::clarity::ClarityInstance; -use crate::config::DEFAULT_CONTRACT_COST_LIMIT_PERCENTAGE; +use crate::config::{DEFAULT_CONTRACT_COST_LIMIT_PERCENTAGE, DEFAULT_MAX_TENURE_BYTES}; use crate::core::mempool::*; use crate::core::*; use crate::monitoring::{ @@ -93,6 +93,8 @@ pub struct NakamotoBlockBuilder { /// Percentage of a block's budget that may be consumed by /// contract calls before reverting to stx transfers/boot contract calls only contract_limit_percentage: Option, + /// Maximum size of the whole tenure + pub max_tenure_bytes: u64, } pub struct MinerTenureInfo<'a> { @@ -146,6 +148,7 @@ impl NakamotoBlockBuilder { header: NakamotoBlockHeader::genesis(), soft_limit: None, contract_limit_percentage: None, + max_tenure_bytes: u64::from(DEFAULT_MAX_TENURE_BYTES), } } @@ -176,6 +179,7 @@ impl NakamotoBlockBuilder { bitvec_len: u16, soft_limit: Option, contract_limit_percentage: Option, + max_tenure_bytes: u64, ) -> Result { let next_height = parent_stacks_header .anchored_header @@ -217,6 +221,7 @@ impl NakamotoBlockBuilder { ), soft_limit, contract_limit_percentage, + max_tenure_bytes, }) } @@ -581,6 +586,7 @@ impl NakamotoBlockBuilder { signer_bitvec_len, None, settings.mempool_settings.contract_cost_limit_percentage, + settings.max_tenure_bytes, )?; let ts_start = get_epoch_time_ms(); @@ -716,6 +722,20 @@ impl BlockBuilder for NakamotoBlockBuilder { return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); } + if let Some(parent_header) = &self.parent_header { + let mut total_tenure_size = self.bytes_so_far + tx_len; + + // if we are in the same tenure of the parent, accumulate the parent total_tenure_size + // note that total_tenure_size is reset whenever a new tenure extend happens + if parent_header.consensus_hash == self.header.consensus_hash { + total_tenure_size += parent_header.total_tenure_size; + } + + if total_tenure_size >= self.max_tenure_bytes { + return TransactionResult::skipped_due_to_error(tx, Error::TenureTooBigError); + } + } + let non_boot_code_contract_call = match &tx.payload { TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), TransactionPayload::SmartContract(..) => true, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 52bc37d78a5..71c7ef12560 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -287,6 +287,24 @@ pub static NAKAMOTO_CHAINSTATE_SCHEMA_6: &[&str] = &[ "CREATE INDEX IF NOT EXISTS nakamoto_block_headers_by_ch_bv ON nakamoto_block_headers(consensus_hash, burn_view);" ]; +pub static NAKAMOTO_CHAINSTATE_SCHEMA_7: &[&str] = &[ + r#" + UPDATE db_config SET version = "12"; + "#, + // Add a `total_tenure_size` field to the block header row, so we can keep track + // of the whole tenure size (and eventually limit it) + // + // + // + // Default to 0. + r#" + -- total_tenure_size cannot be consensus critical as existing nodes which migrate will report a 0 size while + -- nodes booting from genesis sync will get the true tenure size + ALTER TABLE nakamoto_block_headers + ADD COLUMN total_tenure_size NOT NULL DEFAULT 0; + "#, +]; + #[cfg(test)] mod fault_injection { static PROCESS_BLOCK_STALL: std::sync::Mutex = std::sync::Mutex::new(false); @@ -2717,6 +2735,19 @@ impl NakamotoChainState { Ok(result) } + /// Load the total_tenure_size for a Nakamoto header + pub fn get_block_header_nakamoto_total_tenure_size( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = + "SELECT total_tenure_size FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + Ok(result) + } + /// Load an epoch2 header pub fn get_block_header_epoch2( chainstate_conn: &Connection, @@ -3313,6 +3344,7 @@ impl NakamotoChainState { stacks_block_height, burn_header_height, burn_header_timestamp, + total_tenure_size, .. } = tip_info; @@ -3369,6 +3401,7 @@ impl NakamotoChainState { "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), )) })?, + total_tenure_size ]; chainstate_tx.execute( @@ -3401,8 +3434,9 @@ impl NakamotoChainState { vrf_proof, signer_bitvec, height_in_tenure, - burn_view) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26, ?27)", + burn_view, + total_tenure_size) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26, ?27, ?28)", args )?; @@ -3468,6 +3502,9 @@ impl NakamotoChainState { let mut marf_keys = vec![]; let mut marf_values = vec![]; + // assume a new tenure (we will eventually add the parent accumulated size later) + let mut total_tenure_size = block_size; + if new_tenure { // make the coinbase height point to this tenure-start block marf_keys.push(nakamoto_keys::ongoing_tenure_coinbase_height( @@ -3531,6 +3568,22 @@ impl NakamotoChainState { marf_keys.push(nakamoto_keys::ongoing_tenure_id().to_string()); marf_values.push(nakamoto_keys::make_tenure_id_value(&tenure_id)); + } else { + // if we are here (no new tenure or tenure_extend) we need to accumulate the parent total tenure size + if let Some(current_total_tenure_size) = + NakamotoChainState::get_block_header_nakamoto_total_tenure_size( + &headers_tx, + &new_tip.parent_block_id, + )? + { + total_tenure_size = match total_tenure_size.checked_add(current_total_tenure_size) { + Some(total_tenure_size) => total_tenure_size, + // in the extremely improbable case of overflow, just throw the tenure too big error + None => { + return Err(ChainstateError::TenureTooBigError); + } + }; + } } // record the highest block in this tenure @@ -3562,6 +3615,7 @@ impl NakamotoChainState { burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: block_size, burn_view: Some(burn_view.clone()), + total_tenure_size, }; let tenure_fees = block_fees diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 52973fb113d..446020e5be3 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -69,6 +69,7 @@ use crate::chainstate::stacks::{ use crate::clarity::vm::types::StacksAddressExtensions; use crate::clarity_vm::clarity::ClarityInstance; use crate::clarity_vm::database::SortitionDBRef; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::net::Error as NetError; use crate::util_lib::db::{query_row, u64_to_sql, Error as DBError}; @@ -727,6 +728,7 @@ impl NakamotoBlockBuilder { 1, None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), )?; let mut block_txs = vec![tenure_change_tx, coinbase_tx]; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 4c285874d1b..6cbe80f01f3 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -590,6 +590,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1000, anchored_block_size: 12345, burn_view: None, + total_tenure_size: 0, }; let epoch2_execution_cost = ExecutionCost { @@ -723,6 +724,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1001, anchored_block_size: 123, burn_view: Some(nakamoto_header.consensus_hash.clone()), + total_tenure_size: 0, }; let epoch2_block = StacksBlock { @@ -769,6 +771,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1001, anchored_block_size: 123, burn_view: Some(nakamoto_header_2.consensus_hash.clone()), + total_tenure_size: 0, }; let nakamoto_block_2 = NakamotoBlock { @@ -810,6 +813,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1001, anchored_block_size: 123, burn_view: Some(nakamoto_header_3.consensus_hash.clone()), + total_tenure_size: 0, }; let nakamoto_block_3 = NakamotoBlock { @@ -843,6 +847,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1001, anchored_block_size: 123, burn_view: Some(nakamoto_header_3.consensus_hash.clone()), + total_tenure_size: 0, }; let nakamoto_block_3_weight_2 = NakamotoBlock { @@ -876,6 +881,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_timestamp: 1001, anchored_block_size: 123, burn_view: Some(nakamoto_header_4.consensus_hash.clone()), + total_tenure_size: 0, }; let nakamoto_block_4 = NakamotoBlock { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 1c9a64b9596..ecbe903769f 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -51,6 +51,7 @@ use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::tests::TestStacksNode; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, *}; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::core::{BOOT_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER}; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::test::{TestPeer, *}; @@ -798,6 +799,7 @@ impl TestStacksNode { 1, None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), )? } else { NakamotoBlockBuilder::new_first_block( diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index a72f3f9edc9..83310c0d4e9 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -50,6 +50,7 @@ use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, NAKAMOTO_CHAINSTATE_SCHEMA_5, NAKAMOTO_CHAINSTATE_SCHEMA_6, + NAKAMOTO_CHAINSTATE_SCHEMA_7, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -184,6 +185,9 @@ pub struct StacksHeaderInfo { /// The burnchain tip that is passed to Clarity while processing this block. /// This should always be `Some()` for Nakamoto blocks and `None` for 2.x blocks pub burn_view: Option, + /// Total tenure size (reset at every tenure extend) in bytes + /// Not consensus-critical (may differ between nodes) + pub total_tenure_size: u64, } #[derive(Debug, Clone, PartialEq)] @@ -282,8 +286,8 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => (1..=11).contains(&version_u32), - StacksEpochId::Epoch2_05 => (2..=11).contains(&version_u32), + StacksEpochId::Epoch20 => (1..=CHAINSTATE_VERSION_NUMBER).contains(&version_u32), + StacksEpochId::Epoch2_05 => (2..=CHAINSTATE_VERSION_NUMBER).contains(&version_u32), StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 @@ -292,7 +296,7 @@ impl DBConfig { | StacksEpochId::Epoch30 | StacksEpochId::Epoch31 | StacksEpochId::Epoch32 - | StacksEpochId::Epoch33 => (3..=11).contains(&version_u32), + | StacksEpochId::Epoch33 => (3..=CHAINSTATE_VERSION_NUMBER).contains(&version_u32), } } } @@ -361,6 +365,7 @@ impl StacksHeaderInfo { burn_header_timestamp: 0, anchored_block_size: 0, burn_view: None, + total_tenure_size: 0, } } @@ -381,6 +386,7 @@ impl StacksHeaderInfo { burn_header_timestamp: first_burnchain_block_timestamp, anchored_block_size: 0, burn_view: None, + total_tenure_size: 0, } } @@ -453,6 +459,13 @@ impl FromRow for StacksHeaderInfo { return Err(db_error::ParseError); } + let total_tenure_size = { + match header_type { + HeaderTypeNames::Epoch2 => 0, + HeaderTypeNames::Nakamoto => u64::from_column(row, "total_tenure_size")?, + } + }; + Ok(StacksHeaderInfo { anchored_header: stacks_header, microblock_tail: None, @@ -464,6 +477,7 @@ impl FromRow for StacksHeaderInfo { burn_header_timestamp, anchored_block_size, burn_view, + total_tenure_size, }) } } @@ -651,7 +665,8 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &str = "11"; +pub const CHAINSTATE_VERSION: &str = "12"; +pub const CHAINSTATE_VERSION_NUMBER: u32 = 12; const CHAINSTATE_INITIAL_SCHEMA: &[&str] = &[ "PRAGMA foreign_keys = ON;", @@ -1144,6 +1159,14 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "11" => { + info!( + "Migrating chainstate schema from version 11 to 12: add total_tenure_size field" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_7.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", @@ -2768,6 +2791,7 @@ impl StacksChainState { burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: anchor_block_size, burn_view: None, + total_tenure_size: 0, }; StacksChainState::insert_stacks_block_header( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 32dcc496fd3..5aa663f72c5 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -51,6 +51,7 @@ use crate::chainstate::stacks::db::{ChainstateTx, ClarityTx, StacksChainState}; use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::{Error, StacksBlockHeader, StacksMicroblockHeader, *}; use crate::clarity_vm::clarity::{ClarityInstance, Error as clarity_error}; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::core::mempool::*; use crate::core::*; use crate::monitoring::{ @@ -242,6 +243,7 @@ pub struct BlockBuilderSettings { /// Should the builder attempt to confirm any parent microblocks pub confirm_microblocks: bool, pub max_execution_time: Option, + pub max_tenure_bytes: u64, } impl BlockBuilderSettings { @@ -254,6 +256,7 @@ impl BlockBuilderSettings { miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, max_execution_time: None, + max_tenure_bytes: u64::from(DEFAULT_MAX_TENURE_BYTES), } } @@ -266,6 +269,7 @@ impl BlockBuilderSettings { miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, max_execution_time: None, + max_tenure_bytes: u64::from(DEFAULT_MAX_TENURE_BYTES), } } } @@ -1536,6 +1540,7 @@ impl StacksBlockBuilder { burn_header_height: genesis_burn_header_height, anchored_block_size: 0, burn_view: None, + total_tenure_size: 0, }; let mut builder = StacksBlockBuilder::from_parent_pubkey_hash( diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 7bf4d87c5df..a24218c0827 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -120,6 +120,7 @@ pub enum Error { /// This error indicates a Epoch2 block attempted to build off of a Nakamoto block. InvalidChildOfNakomotoBlock, NoRegisteredSigners(u64), + TenureTooBigError, } impl From for Error { @@ -222,6 +223,7 @@ impl fmt::Display for Error { Error::NotInSameFork => { write!(f, "The supplied block identifiers are not in the same fork") } + Error::TenureTooBigError => write!(f, "Too much data in tenure"), } } } @@ -268,6 +270,7 @@ impl error::Error for Error { Error::ExpectedTenureChange => None, Error::NoRegisteredSigners(_) => None, Error::NotInSameFork => None, + Error::TenureTooBigError => None, } } } @@ -314,6 +317,7 @@ impl Error { Error::ExpectedTenureChange => "ExpectedTenureChange", Error::NoRegisteredSigners(_) => "NoRegisteredSigners", Error::NotInSameFork => "NotInSameFork", + Error::TenureTooBigError => "TenureTooBigError", } } diff --git a/stackslib/src/clarity_vm/tests/ephemeral.rs b/stackslib/src/clarity_vm/tests/ephemeral.rs index 73b5897212c..4b048db6eef 100644 --- a/stackslib/src/clarity_vm/tests/ephemeral.rs +++ b/stackslib/src/clarity_vm/tests/ephemeral.rs @@ -43,6 +43,7 @@ use crate::chainstate::stacks::{ use crate::clarity::vm::database::ClarityBackingStore; use crate::clarity_vm::clarity::ClarityMarfStoreTransaction; use crate::clarity_vm::database::marf::MarfedKV; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::net::test::TestEventObserver; use crate::net::tests::inv::nakamoto::make_nakamoto_peer_from_invs; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; @@ -342,6 +343,7 @@ fn replay_block( original_block.header.pox_treatment.len(), None, Some(100), + u64::from(DEFAULT_MAX_TENURE_BYTES), ) .unwrap(); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b7d1919752c..c2ab15dcd9f 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -123,11 +123,16 @@ const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; /// Default percentage of block budget that must be used before attempting a /// time-based tenure extend const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; +/// Default percentage of tenure size that must be used before attempting a +/// time-based tenure extend +const DEFAULT_TENURE_EXTEND_TENURE_SIZE_THRESHOLD: u64 = 50; /// Default number of milliseconds that the miner should sleep between mining /// attempts when the mempool is empty. const DEFAULT_EMPTY_MEMPOOL_SLEEP_MS: u64 = 2_500; /// Default number of seconds that a miner should wait before timing out an HTTP request to StackerDB. const DEFAULT_STACKERDB_TIMEOUT_SECS: u64 = 120; +/// Default maximum size for a tenure (note: the counter is reset on tenure extend). +pub const DEFAULT_MAX_TENURE_BYTES: u64 = 10 * 1024 * 1024; // 10 MB static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -1143,12 +1148,14 @@ impl Config { tenure_cost_limit_per_block_percentage: miner_config .tenure_cost_limit_per_block_percentage, contract_cost_limit_percentage: miner_config.contract_cost_limit_percentage, + log_skipped_transactions: miner_config.log_skipped_transactions, }, miner_status, confirm_microblocks: false, max_execution_time: miner_config .max_execution_time_secs .map(Duration::from_secs), + max_tenure_bytes: miner_config.max_tenure_bytes, } } @@ -1190,12 +1197,14 @@ impl Config { tenure_cost_limit_per_block_percentage: miner_config .tenure_cost_limit_per_block_percentage, contract_cost_limit_percentage: miner_config.contract_cost_limit_percentage, + log_skipped_transactions: miner_config.log_skipped_transactions, }, miner_status, confirm_microblocks: true, max_execution_time: miner_config .max_execution_time_secs .map(Duration::from_secs), + max_tenure_bytes: miner_config.max_tenure_bytes, } } @@ -3053,6 +3062,26 @@ pub struct MinerConfig { /// @default: [`DEFAULT_STACKERDB_TIMEOUT_SECS`] /// @units: seconds. pub stackerdb_timeout: Duration, + /// Defines them maximum numnber of bytes to allow in a tenure. + /// The miner will stop mining if the limit is reached. + pub max_tenure_bytes: u64, + /// Enable logging of skipped transactions (generally used for tests) + pub log_skipped_transactions: bool, + /// Percentage of total tenure size that must be used before attempting a time-based tenure extend. + /// + /// This sets a minimum threshold for the accumulated blocks size within a + /// tenure before a time-based tenure extension ([`MinerConfig::tenure_timeout`]) + /// can be initiated. The miner checks if the proportion of the total tenure + /// size so far exceeds this percentage. If the cost usage is below + /// this threshold, a time-based extension will not be attempted, even if the + /// [`MinerConfig::tenure_timeout`] duration has elapsed. This prevents miners + /// from extending tenures very early if they have produced only small blocks. + /// --- + /// @default: [`DEFAULT_TENURE_EXTEND_TENURE_SIZE_THRESHOLD`] + /// @units: percent + /// @notes: + /// - Values: 0-100. + pub tenure_extend_tenure_size_threshold: u64, } impl Default for MinerConfig { @@ -3108,6 +3137,9 @@ impl Default for MinerConfig { max_execution_time_secs: None, replay_transactions: false, stackerdb_timeout: Duration::from_secs(DEFAULT_STACKERDB_TIMEOUT_SECS), + max_tenure_bytes: DEFAULT_MAX_TENURE_BYTES, + log_skipped_transactions: false, + tenure_extend_tenure_size_threshold: DEFAULT_TENURE_EXTEND_TENURE_SIZE_THRESHOLD, } } } @@ -4039,6 +4071,9 @@ pub struct MinerConfigFile { /// TODO: remove this config option once its no longer a testing feature pub replay_transactions: Option, pub stackerdb_timeout_secs: Option, + pub max_tenure_bytes: Option, + pub tenure_extend_tenure_size_threshold: Option, + pub log_skipped_transactions: Option, } impl MinerConfigFile { @@ -4231,6 +4266,9 @@ impl MinerConfigFile { max_execution_time_secs: self.max_execution_time_secs, replay_transactions: self.replay_transactions.unwrap_or_default(), stackerdb_timeout: self.stackerdb_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.stackerdb_timeout), + max_tenure_bytes: self.max_tenure_bytes.unwrap_or(miner_default_config.max_tenure_bytes), + log_skipped_transactions: self.log_skipped_transactions.unwrap_or(miner_default_config.log_skipped_transactions), + tenure_extend_tenure_size_threshold: self.tenure_extend_tenure_size_threshold.unwrap_or(miner_default_config.tenure_extend_tenure_size_threshold), }) } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 099e24b7713..a2e2445e312 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -569,6 +569,8 @@ pub struct MemPoolWalkSettings { /// further non-boot contract calls and instead consider only boot contract calls /// and STX transfers. pub contract_cost_limit_percentage: Option, + /// Enable logging of skipped transactions (disabled by default, generally used for tests) + pub log_skipped_transactions: bool, } impl Default for MemPoolWalkSettings { @@ -583,6 +585,7 @@ impl Default for MemPoolWalkSettings { filter_origins: HashSet::new(), tenure_cost_limit_per_block_percentage: None, contract_cost_limit_percentage: None, + log_skipped_transactions: false, } } } @@ -598,6 +601,7 @@ impl MemPoolWalkSettings { filter_origins: HashSet::new(), tenure_cost_limit_per_block_percentage: None, contract_cost_limit_percentage: None, + log_skipped_transactions: false, } } } @@ -1905,7 +1909,10 @@ impl MemPoolDB { output_events.push(tx_event); } TransactionEvent::Skipped(_) => { - // don't push `Skipped` events to the observer + // don't push `Skipped` events to the observer by default + if settings.log_skipped_transactions { + output_events.push(tx_event); + } } _ => { output_events.push(tx_event); diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 3e0a5224c30..4977c2e85c3 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -123,6 +123,7 @@ pub fn make_block( burn_header_timestamp: 0, anchored_block_size: 1, burn_view: None, + total_tenure_size: 0, }; c_tx.commit_block(); diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 56e7269f365..7f169393d3c 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -37,6 +37,7 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE burn_header_timestamp: 2, anchored_block_size: 1, burn_view: None, + total_tenure_size: 0, }, tx_receipts, matured_rewards: vec![], diff --git a/stackslib/src/net/api/blockreplay.rs b/stackslib/src/net/api/blockreplay.rs index 2d3fe50e44c..5aa55f75819 100644 --- a/stackslib/src/net/api/blockreplay.rs +++ b/stackslib/src/net/api/blockreplay.rs @@ -30,6 +30,7 @@ use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; use crate::chainstate::stacks::{Error as ChainError, StacksTransaction, TransactionPayload}; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::net::http::{ parse_json, Error, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, @@ -131,6 +132,7 @@ impl RPCNakamotoBlockReplayRequestHandler { block.header.pox_treatment.len(), None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), ) { Ok(builder) => builder, Err(e) => return Err(e), diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index a1170041b9f..f97b1dbb535 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -46,6 +46,7 @@ use crate::chainstate::stacks::{ Error as ChainError, StacksTransaction, TenureChangeCause, TransactionPayload, }; use crate::clarity_vm::clarity::Error as ClarityError; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::core::mempool::ProposalCallbackReceiver; use crate::net::http::{ http_reason, parse_json, Error, HttpContentType, HttpRequest, HttpRequestContents, @@ -579,6 +580,7 @@ impl NakamotoBlockProposal { self.block.header.pox_treatment.len(), None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), )?; let mut miner_tenure_info = @@ -725,6 +727,7 @@ impl NakamotoBlockProposal { self.block.header.pox_treatment.len(), None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), )?; let (mut replay_chainstate, _) = StacksChainState::open(mainnet, chain_id, chainstate_path, None)?; diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index efca96863b6..241e5254907 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -37,6 +37,7 @@ use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction}; use crate::chainstate::stacks::test::make_codec_test_nakamoto_block; use crate::chainstate::stacks::{StacksMicroblock, StacksTransaction}; +use crate::config::DEFAULT_MAX_TENURE_BYTES; use crate::core::mempool::{MemPoolDropReason, MemPoolEventDispatcher, ProposalCallbackReceiver}; use crate::core::test_util::{ make_big_read_count_contract, make_contract_call, make_contract_publish, @@ -271,6 +272,7 @@ fn test_try_make_response() { 8, None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), ) .unwrap(); @@ -522,6 +524,7 @@ fn replay_validation_test( 8, None, None, + u64::from(DEFAULT_MAX_TENURE_BYTES), ) .unwrap();